split out proxmox-backup-client binary
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
This commit is contained in:
34
proxmox-backup-client/Cargo.toml
Normal file
34
proxmox-backup-client/Cargo.toml
Normal file
@ -0,0 +1,34 @@
|
||||
[package]
|
||||
name = "proxmox-backup-client"
|
||||
version = "0.1.0"
|
||||
authors = ["Proxmox Support Team <support@proxmox.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
futures = "0.3"
|
||||
hyper = { version = "0.14", features = [ "full" ] }
|
||||
libc = "0.2"
|
||||
nix = "0.19.1"
|
||||
openssl = "0.10"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
tokio = { version = "1.6", features = [ "rt", "rt-multi-thread" ] }
|
||||
tokio-stream = "0.1.0"
|
||||
tokio-util = { version = "0.6", features = [ "codec", "io" ] }
|
||||
xdg = "2.2"
|
||||
zstd = { version = "0.6", features = [ "bindgen" ] }
|
||||
|
||||
pathpatterns = "0.1.2"
|
||||
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
||||
|
||||
proxmox = { version = "0.13.0", features = [ "sortable-macro", "api-macro", "cli", "router" ] }
|
||||
|
||||
pbs-api-types = { path = "../pbs-api-types" }
|
||||
pbs-buildcfg = { path = "../pbs-buildcfg" }
|
||||
pbs-client = { path = "../pbs-client" }
|
||||
pbs-datastore = { path = "../pbs-datastore" }
|
||||
pbs-fuse-loop = { path = "../pbs-fuse-loop" }
|
||||
pbs-runtime = { path = "../pbs-runtime" }
|
||||
pbs-systemd = { path = "../pbs-systemd" }
|
||||
pbs-tools = { path = "../pbs-tools" }
|
364
proxmox-backup-client/src/benchmark.rs
Normal file
364
proxmox-backup-client/src/benchmark.rs
Normal file
@ -0,0 +1,364 @@
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{Error};
|
||||
use serde_json::Value;
|
||||
use serde::Serialize;
|
||||
|
||||
use proxmox::api::{ApiMethod, RpcEnvironment};
|
||||
use proxmox::api::{
|
||||
api,
|
||||
cli::{
|
||||
OUTPUT_FORMAT,
|
||||
ColumnConfig,
|
||||
get_output_format,
|
||||
format_and_print_result_full,
|
||||
default_table_format_options,
|
||||
},
|
||||
router::ReturnType,
|
||||
schema::ApiType,
|
||||
};
|
||||
|
||||
use pbs_client::tools::key_source::get_encryption_key_password;
|
||||
use pbs_client::{BackupRepository, BackupWriter};
|
||||
use pbs_datastore::{CryptConfig, KeyDerivationConfig, load_and_decrypt_key};
|
||||
use pbs_datastore::data_blob::DataChunkBuilder;
|
||||
|
||||
use crate::{
|
||||
KEYFILE_SCHEMA, REPO_URL_SCHEMA,
|
||||
extract_repository_from_value,
|
||||
record_repository,
|
||||
connect,
|
||||
};
|
||||
|
||||
#[api()]
|
||||
#[derive(Copy, Clone, Serialize)]
|
||||
/// Speed test result
|
||||
struct Speed {
|
||||
/// The meassured speed in Bytes/second
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
speed: Option<f64>,
|
||||
/// Top result we want to compare with
|
||||
top: f64,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"tls": {
|
||||
type: Speed,
|
||||
},
|
||||
"sha256": {
|
||||
type: Speed,
|
||||
},
|
||||
"compress": {
|
||||
type: Speed,
|
||||
},
|
||||
"decompress": {
|
||||
type: Speed,
|
||||
},
|
||||
"aes256_gcm": {
|
||||
type: Speed,
|
||||
},
|
||||
"verify": {
|
||||
type: Speed,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Copy, Clone, Serialize)]
|
||||
/// Benchmark Results
|
||||
struct BenchmarkResult {
|
||||
/// TLS upload speed
|
||||
tls: Speed,
|
||||
/// SHA256 checksum computation speed
|
||||
sha256: Speed,
|
||||
/// ZStd level 1 compression speed
|
||||
compress: Speed,
|
||||
/// ZStd level 1 decompression speed
|
||||
decompress: Speed,
|
||||
/// AES256 GCM encryption speed
|
||||
aes256_gcm: Speed,
|
||||
/// Verify speed
|
||||
verify: Speed,
|
||||
}
|
||||
|
||||
static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
|
||||
tls: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 1235.0, // TLS to localhost, AMD Ryzen 7 2700X
|
||||
},
|
||||
sha256: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 2022.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
compress: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 752.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
decompress: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 1198.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
aes256_gcm: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 3645.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
verify: Speed {
|
||||
speed: None,
|
||||
top: 1_000_000.0 * 758.0, // AMD Ryzen 7 2700X
|
||||
},
|
||||
};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
verbose: {
|
||||
description: "Verbose output.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
keyfile: {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Run benchmark tests
|
||||
pub async fn benchmark(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m).ok();
|
||||
|
||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||
|
||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let crypt_config = match keyfile {
|
||||
None => None,
|
||||
Some(path) => {
|
||||
let (key, _, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||
let crypt_config = CryptConfig::new(key)?;
|
||||
Some(Arc::new(crypt_config))
|
||||
}
|
||||
};
|
||||
|
||||
let mut benchmark_result = BENCHMARK_RESULT_2020_TOP;
|
||||
|
||||
// do repo tests first, because this may prompt for a password
|
||||
if let Some(repo) = repo {
|
||||
test_upload_speed(&mut benchmark_result, repo, crypt_config.clone(), verbose).await?;
|
||||
}
|
||||
|
||||
test_crypt_speed(&mut benchmark_result, verbose)?;
|
||||
|
||||
render_result(&output_format, &benchmark_result)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// print comparison table
|
||||
fn render_result(
|
||||
output_format: &str,
|
||||
benchmark_result: &BenchmarkResult,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut data = serde_json::to_value(benchmark_result)?;
|
||||
let return_type = ReturnType::new(false, &BenchmarkResult::API_SCHEMA);
|
||||
|
||||
let render_speed = |value: &Value, _record: &Value| -> Result<String, Error> {
|
||||
match value["speed"].as_f64() {
|
||||
None => Ok(String::from("not tested")),
|
||||
Some(speed) => {
|
||||
let top = value["top"].as_f64().unwrap();
|
||||
Ok(format!("{:.2} MB/s ({:.0}%)", speed/1_000_000.0, (speed*100.0)/top))
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("tls")
|
||||
.header("TLS (maximal backup upload speed)")
|
||||
.right_align(false).renderer(render_speed))
|
||||
.column(ColumnConfig::new("sha256")
|
||||
.header("SHA256 checksum computation speed")
|
||||
.right_align(false).renderer(render_speed))
|
||||
.column(ColumnConfig::new("compress")
|
||||
.header("ZStd level 1 compression speed")
|
||||
.right_align(false).renderer(render_speed))
|
||||
.column(ColumnConfig::new("decompress")
|
||||
.header("ZStd level 1 decompression speed")
|
||||
.right_align(false).renderer(render_speed))
|
||||
.column(ColumnConfig::new("verify")
|
||||
.header("Chunk verification speed")
|
||||
.right_align(false).renderer(render_speed))
|
||||
.column(ColumnConfig::new("aes256_gcm")
|
||||
.header("AES256 GCM encryption speed")
|
||||
.right_align(false).renderer(render_speed));
|
||||
|
||||
|
||||
format_and_print_result_full(&mut data, &return_type, output_format, &options);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn test_upload_speed(
|
||||
benchmark_result: &mut BenchmarkResult,
|
||||
repo: BackupRepository,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
verbose: bool,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let backup_time = proxmox::tools::time::epoch_i64();
|
||||
|
||||
let client = connect(&repo)?;
|
||||
record_repository(&repo);
|
||||
|
||||
if verbose { eprintln!("Connecting to backup server"); }
|
||||
let client = BackupWriter::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
"host",
|
||||
"benchmark",
|
||||
backup_time,
|
||||
false,
|
||||
true
|
||||
).await?;
|
||||
|
||||
if verbose { eprintln!("Start TLS speed test"); }
|
||||
let speed = client.upload_speedtest(verbose).await?;
|
||||
|
||||
eprintln!("TLS speed: {:.2} MB/s", speed/1_000_000.0);
|
||||
|
||||
benchmark_result.tls.speed = Some(speed);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// test hash/crypt/compress speed
|
||||
fn test_crypt_speed(
|
||||
benchmark_result: &mut BenchmarkResult,
|
||||
_verbose: bool,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let pw = b"test";
|
||||
|
||||
let kdf = KeyDerivationConfig::Scrypt {
|
||||
n: 65536,
|
||||
r: 8,
|
||||
p: 1,
|
||||
salt: Vec::new(),
|
||||
};
|
||||
|
||||
let testkey = kdf.derive_key(pw)?;
|
||||
|
||||
let crypt_config = CryptConfig::new(testkey)?;
|
||||
|
||||
//let random_data = proxmox::sys::linux::random_data(1024*1024)?;
|
||||
let mut random_data = vec![];
|
||||
// generate pseudo random byte sequence
|
||||
for i in 0..256*1024 {
|
||||
for j in 0..4 {
|
||||
let byte = ((i >> (j<<3))&0xff) as u8;
|
||||
random_data.push(byte);
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(random_data.len(), 1024*1024);
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let mut bytes = 0;
|
||||
loop {
|
||||
openssl::sha::sha256(&random_data);
|
||||
bytes += random_data.len();
|
||||
if start_time.elapsed().as_micros() > 1_000_000 { break; }
|
||||
}
|
||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||
benchmark_result.sha256.speed = Some(speed);
|
||||
|
||||
eprintln!("SHA256 speed: {:.2} MB/s", speed/1_000_000.0);
|
||||
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let mut bytes = 0;
|
||||
loop {
|
||||
let mut reader = &random_data[..];
|
||||
zstd::stream::encode_all(&mut reader, 1)?;
|
||||
bytes += random_data.len();
|
||||
if start_time.elapsed().as_micros() > 3_000_000 { break; }
|
||||
}
|
||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||
benchmark_result.compress.speed = Some(speed);
|
||||
|
||||
eprintln!("Compression speed: {:.2} MB/s", speed/1_000_000.0);
|
||||
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let compressed_data = {
|
||||
let mut reader = &random_data[..];
|
||||
zstd::stream::encode_all(&mut reader, 1)?
|
||||
};
|
||||
|
||||
let mut bytes = 0;
|
||||
loop {
|
||||
let mut reader = &compressed_data[..];
|
||||
let data = zstd::stream::decode_all(&mut reader)?;
|
||||
bytes += data.len();
|
||||
if start_time.elapsed().as_micros() > 1_000_000 { break; }
|
||||
}
|
||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||
benchmark_result.decompress.speed = Some(speed);
|
||||
|
||||
eprintln!("Decompress speed: {:.2} MB/s", speed/1_000_000.0);
|
||||
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let mut bytes = 0;
|
||||
loop {
|
||||
let mut out = Vec::new();
|
||||
crypt_config.encrypt_to(&random_data, &mut out)?;
|
||||
bytes += random_data.len();
|
||||
if start_time.elapsed().as_micros() > 1_000_000 { break; }
|
||||
}
|
||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||
benchmark_result.aes256_gcm.speed = Some(speed);
|
||||
|
||||
eprintln!("AES256/GCM speed: {:.2} MB/s", speed/1_000_000.0);
|
||||
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let (chunk, digest) = DataChunkBuilder::new(&random_data)
|
||||
.compress(true)
|
||||
.build()?;
|
||||
|
||||
let mut bytes = 0;
|
||||
loop {
|
||||
chunk.verify_unencrypted(random_data.len(), &digest)?;
|
||||
bytes += random_data.len();
|
||||
if start_time.elapsed().as_micros() > 1_000_000 { break; }
|
||||
}
|
||||
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||
benchmark_result.verify.speed = Some(speed);
|
||||
|
||||
eprintln!("Verify speed: {:.2} MB/s", speed/1_000_000.0);
|
||||
|
||||
Ok(())
|
||||
}
|
277
proxmox-backup-client/src/catalog.rs
Normal file
277
proxmox-backup-client/src/catalog.rs
Normal file
@ -0,0 +1,277 @@
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::io::{Seek, SeekFrom};
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, cli::*};
|
||||
|
||||
use pbs_client::tools::key_source::get_encryption_key_password;
|
||||
use pbs_client::{BackupReader, RemoteChunkReader};
|
||||
use pbs_tools::json::required_string_param;
|
||||
|
||||
use crate::{
|
||||
REPO_URL_SCHEMA,
|
||||
KEYFD_SCHEMA,
|
||||
extract_repository_from_value,
|
||||
format_key_source,
|
||||
record_repository,
|
||||
decrypt_key,
|
||||
api_datastore_latest_snapshot,
|
||||
complete_repository,
|
||||
complete_backup_snapshot,
|
||||
complete_group_or_snapshot,
|
||||
complete_pxar_archive_name,
|
||||
connect,
|
||||
crypto_parameters,
|
||||
BackupDir,
|
||||
BackupGroup,
|
||||
BufferedDynamicReader,
|
||||
BufferedDynamicReadAt,
|
||||
CatalogReader,
|
||||
CATALOG_NAME,
|
||||
CryptConfig,
|
||||
DynamicIndexReader,
|
||||
IndexFile,
|
||||
Shell,
|
||||
};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
snapshot: {
|
||||
type: String,
|
||||
description: "Snapshot path.",
|
||||
},
|
||||
"keyfile": {
|
||||
optional: true,
|
||||
type: String,
|
||||
description: "Path to encryption key.",
|
||||
},
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Dump catalog.
|
||||
async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let path = required_string_param(¶m, "snapshot")?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
|
||||
let crypto = crypto_parameters(¶m)?;
|
||||
|
||||
let crypt_config = match crypto.enc_key {
|
||||
None => None,
|
||||
Some(key) => {
|
||||
let (key, _created, _fingerprint) = decrypt_key(&key.key, &get_encryption_key_password)
|
||||
.map_err(|err| {
|
||||
eprintln!("{}", format_key_source(&key.source, "encryption"));
|
||||
err
|
||||
})?;
|
||||
let crypt_config = CryptConfig::new(key)?;
|
||||
Some(Arc::new(crypt_config))
|
||||
}
|
||||
};
|
||||
|
||||
let client = connect(&repo)?;
|
||||
|
||||
let client = BackupReader::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
&snapshot.group().backup_type(),
|
||||
&snapshot.group().backup_id(),
|
||||
snapshot.backup_time(),
|
||||
true,
|
||||
).await?;
|
||||
|
||||
let (manifest, _) = client.download_manifest().await?;
|
||||
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
|
||||
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
|
||||
let file_info = manifest.lookup_file_info(&CATALOG_NAME)?;
|
||||
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
||||
|
||||
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
|
||||
let mut catalogfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
std::io::copy(&mut reader, &mut catalogfile)
|
||||
.map_err(|err| format_err!("unable to download catalog - {}", err))?;
|
||||
|
||||
catalogfile.seek(SeekFrom::Start(0))?;
|
||||
|
||||
let mut catalog_reader = CatalogReader::new(catalogfile);
|
||||
|
||||
catalog_reader.dump()?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"snapshot": {
|
||||
type: String,
|
||||
description: "Group/Snapshot path.",
|
||||
},
|
||||
"archive-name": {
|
||||
type: String,
|
||||
description: "Backup archive name.",
|
||||
},
|
||||
"repository": {
|
||||
optional: true,
|
||||
schema: REPO_URL_SCHEMA,
|
||||
},
|
||||
"keyfile": {
|
||||
optional: true,
|
||||
type: String,
|
||||
description: "Path to encryption key.",
|
||||
},
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Shell to interactively inspect and restore snapshots.
|
||||
async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let client = connect(&repo)?;
|
||||
let path = required_string_param(¶m, "snapshot")?;
|
||||
let archive_name = required_string_param(¶m, "archive-name")?;
|
||||
|
||||
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||
let group: BackupGroup = path.parse()?;
|
||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||
} else {
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||
};
|
||||
|
||||
let crypto = crypto_parameters(¶m)?;
|
||||
|
||||
let crypt_config = match crypto.enc_key {
|
||||
None => None,
|
||||
Some(key) => {
|
||||
let (key, _created, _fingerprint) = decrypt_key(&key.key, &get_encryption_key_password)
|
||||
.map_err(|err| {
|
||||
eprintln!("{}", format_key_source(&key.source, "encryption"));
|
||||
err
|
||||
})?;
|
||||
let crypt_config = CryptConfig::new(key)?;
|
||||
Some(Arc::new(crypt_config))
|
||||
}
|
||||
};
|
||||
|
||||
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||
format!("{}.didx", archive_name)
|
||||
} else {
|
||||
bail!("Can only mount pxar archives.");
|
||||
};
|
||||
|
||||
let client = BackupReader::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
&backup_type,
|
||||
&backup_id,
|
||||
backup_time,
|
||||
true,
|
||||
).await?;
|
||||
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let (manifest, _) = client.download_manifest().await?;
|
||||
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
|
||||
let file_info = manifest.lookup_file_info(&server_archive_name)?;
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), file_info.chunk_crypt_mode(), most_used);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let archive_size = reader.archive_size();
|
||||
let reader: pbs_client::pxar::fuse::Reader =
|
||||
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||
let decoder = pbs_client::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||
|
||||
client.download(CATALOG_NAME, &mut tmpfile).await?;
|
||||
let index = DynamicIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read catalog index - {}", err))?;
|
||||
|
||||
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(CATALOG_NAME, &csum, size)?;
|
||||
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
|
||||
let file_info = manifest.lookup_file_info(&CATALOG_NAME)?;
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
||||
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let mut catalogfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
std::io::copy(&mut reader, &mut catalogfile)
|
||||
.map_err(|err| format_err!("unable to download catalog - {}", err))?;
|
||||
|
||||
catalogfile.seek(SeekFrom::Start(0))?;
|
||||
let catalog_reader = CatalogReader::new(catalogfile);
|
||||
let state = Shell::new(
|
||||
catalog_reader,
|
||||
&server_archive_name,
|
||||
decoder,
|
||||
).await?;
|
||||
|
||||
println!("Starting interactive shell");
|
||||
state.shell().await?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn catalog_mgmt_cli() -> CliCommandMap {
|
||||
let catalog_shell_cmd_def = CliCommand::new(&API_METHOD_CATALOG_SHELL)
|
||||
.arg_param(&["snapshot", "archive-name"])
|
||||
.completion_cb("repository", complete_repository)
|
||||
.completion_cb("archive-name", complete_pxar_archive_name)
|
||||
.completion_cb("snapshot", complete_group_or_snapshot);
|
||||
|
||||
let catalog_dump_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
|
||||
.arg_param(&["snapshot"])
|
||||
.completion_cb("repository", complete_repository)
|
||||
.completion_cb("snapshot", complete_backup_snapshot);
|
||||
|
||||
CliCommandMap::new()
|
||||
.insert("dump", catalog_dump_cmd_def)
|
||||
.insert("shell", catalog_shell_cmd_def)
|
||||
}
|
495
proxmox-backup-client/src/key.rs
Normal file
495
proxmox-backup-client/src/key.rs
Normal file
@ -0,0 +1,495 @@
|
||||
use std::convert::TryFrom;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::cli::{
|
||||
format_and_print_result_full, get_output_format, CliCommand, CliCommandMap, ColumnConfig,
|
||||
OUTPUT_FORMAT,
|
||||
};
|
||||
use proxmox::api::router::ReturnType;
|
||||
use proxmox::api::schema::ApiType;
|
||||
use proxmox::sys::linux::tty;
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
|
||||
use pbs_api_types::{RsaPubKeyInfo, PASSWORD_HINT_SCHEMA};
|
||||
use pbs_datastore::{KeyConfig, KeyInfo, Kdf, rsa_decrypt_key_config};
|
||||
use pbs_datastore::paperkey::{generate_paper_key, PaperkeyFormat};
|
||||
use pbs_client::tools::key_source::{
|
||||
find_default_encryption_key, find_default_master_pubkey, get_encryption_key_password,
|
||||
place_default_encryption_key, place_default_master_pubkey,
|
||||
};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
kdf: {
|
||||
type: Kdf,
|
||||
optional: true,
|
||||
},
|
||||
path: {
|
||||
description:
|
||||
"Output file. Without this the key will become the new default encryption key.",
|
||||
optional: true,
|
||||
},
|
||||
hint: {
|
||||
schema: PASSWORD_HINT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Create a new encryption key.
|
||||
fn create(kdf: Option<Kdf>, path: Option<String>, hint: Option<String>) -> Result<(), Error> {
|
||||
let path = match path {
|
||||
Some(path) => PathBuf::from(path),
|
||||
None => {
|
||||
let path = place_default_encryption_key()?;
|
||||
println!("creating default key at: {:?}", path);
|
||||
path
|
||||
}
|
||||
};
|
||||
|
||||
let kdf = kdf.unwrap_or_default();
|
||||
|
||||
let mut key = [0u8; 32];
|
||||
proxmox::sys::linux::fill_with_random_data(&mut key)?;
|
||||
|
||||
match kdf {
|
||||
Kdf::None => {
|
||||
if hint.is_some() {
|
||||
bail!("password hint not allowed for Kdf::None");
|
||||
}
|
||||
|
||||
let key_config = KeyConfig::without_password(key)?;
|
||||
|
||||
key_config.store(path, false)?;
|
||||
}
|
||||
Kdf::Scrypt | Kdf::PBKDF2 => {
|
||||
// always read passphrase from tty
|
||||
if !tty::stdin_isatty() {
|
||||
bail!("unable to read passphrase - no tty");
|
||||
}
|
||||
|
||||
let password = tty::read_and_verify_password("Encryption Key Password: ")?;
|
||||
|
||||
let mut key_config = KeyConfig::with_key(&key, &password, kdf)?;
|
||||
key_config.hint = hint;
|
||||
|
||||
key_config.store(&path, false)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"master-keyfile": {
|
||||
description: "(Private) master key to use.",
|
||||
},
|
||||
"encrypted-keyfile": {
|
||||
description: "RSA-encrypted keyfile to import.",
|
||||
},
|
||||
kdf: {
|
||||
type: Kdf,
|
||||
optional: true,
|
||||
},
|
||||
"path": {
|
||||
description:
|
||||
"Output file. Without this the key will become the new default encryption key.",
|
||||
optional: true,
|
||||
},
|
||||
hint: {
|
||||
schema: PASSWORD_HINT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Import an encrypted backup of an encryption key using a (private) master key.
|
||||
async fn import_with_master_key(
|
||||
master_keyfile: String,
|
||||
encrypted_keyfile: String,
|
||||
kdf: Option<Kdf>,
|
||||
path: Option<String>,
|
||||
hint: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
let path = match path {
|
||||
Some(path) => PathBuf::from(path),
|
||||
None => {
|
||||
let path = place_default_encryption_key()?;
|
||||
if path.exists() {
|
||||
bail!("Please remove default encryption key at {:?} before importing to default location (or choose a non-default one).", path);
|
||||
}
|
||||
println!("Importing key to default location at: {:?}", path);
|
||||
path
|
||||
}
|
||||
};
|
||||
|
||||
let encrypted_key = file_get_contents(&encrypted_keyfile)?;
|
||||
let master_key = file_get_contents(&master_keyfile)?;
|
||||
let password = tty::read_password("Master Key Password: ")?;
|
||||
|
||||
let master_key = openssl::pkey::PKey::private_key_from_pem_passphrase(&master_key, &password)
|
||||
.map_err(|err| format_err!("failed to read PEM-formatted private key - {}", err))?
|
||||
.rsa()
|
||||
.map_err(|err| format_err!("not a valid private RSA key - {}", err))?;
|
||||
|
||||
let (key, created, _fingerprint) =
|
||||
rsa_decrypt_key_config(master_key, &encrypted_key, &get_encryption_key_password)?;
|
||||
|
||||
let kdf = kdf.unwrap_or_default();
|
||||
match kdf {
|
||||
Kdf::None => {
|
||||
if hint.is_some() {
|
||||
bail!("password hint not allowed for Kdf::None");
|
||||
}
|
||||
|
||||
let mut key_config = KeyConfig::without_password(key)?;
|
||||
key_config.created = created; // keep original value
|
||||
|
||||
key_config.store(path, true)?;
|
||||
}
|
||||
Kdf::Scrypt | Kdf::PBKDF2 => {
|
||||
let password = tty::read_and_verify_password("New Password: ")?;
|
||||
|
||||
let mut new_key_config = KeyConfig::with_key(&key, &password, kdf)?;
|
||||
new_key_config.created = created; // keep original value
|
||||
new_key_config.hint = hint;
|
||||
|
||||
new_key_config.store(path, true)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
kdf: {
|
||||
type: Kdf,
|
||||
optional: true,
|
||||
},
|
||||
path: {
|
||||
description: "Key file. Without this the default key's password will be changed.",
|
||||
optional: true,
|
||||
},
|
||||
hint: {
|
||||
schema: PASSWORD_HINT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Change the encryption key's password.
|
||||
fn change_passphrase(
|
||||
kdf: Option<Kdf>,
|
||||
path: Option<String>,
|
||||
hint: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
let path = match path {
|
||||
Some(path) => PathBuf::from(path),
|
||||
None => {
|
||||
let path = find_default_encryption_key()?.ok_or_else(|| {
|
||||
format_err!("no encryption file provided and no default file found")
|
||||
})?;
|
||||
println!("updating default key at: {:?}", path);
|
||||
path
|
||||
}
|
||||
};
|
||||
|
||||
let kdf = kdf.unwrap_or_default();
|
||||
|
||||
if !tty::stdin_isatty() {
|
||||
bail!("unable to change passphrase - no tty");
|
||||
}
|
||||
|
||||
let key_config = KeyConfig::load(&path)?;
|
||||
let (key, created, _fingerprint) = key_config.decrypt(&get_encryption_key_password)?;
|
||||
|
||||
match kdf {
|
||||
Kdf::None => {
|
||||
if hint.is_some() {
|
||||
bail!("password hint not allowed for Kdf::None");
|
||||
}
|
||||
|
||||
let mut key_config = KeyConfig::without_password(key)?;
|
||||
key_config.created = created; // keep original value
|
||||
|
||||
key_config.store(&path, true)?;
|
||||
}
|
||||
Kdf::Scrypt | Kdf::PBKDF2 => {
|
||||
let password = tty::read_and_verify_password("New Password: ")?;
|
||||
|
||||
let mut new_key_config = KeyConfig::with_key(&key, &password, kdf)?;
|
||||
new_key_config.created = created; // keep original value
|
||||
new_key_config.hint = hint;
|
||||
|
||||
new_key_config.store(&path, true)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
path: {
|
||||
description: "Key file. Without this the default key's metadata will be shown.",
|
||||
optional: true,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Print the encryption key's metadata.
|
||||
fn show_key(path: Option<String>, param: Value) -> Result<(), Error> {
|
||||
let path = match path {
|
||||
Some(path) => PathBuf::from(path),
|
||||
None => find_default_encryption_key()?
|
||||
.ok_or_else(|| format_err!("no encryption file provided and no default file found"))?,
|
||||
};
|
||||
|
||||
let config: KeyConfig = serde_json::from_slice(&file_get_contents(path.clone())?)?;
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let mut info: KeyInfo = (&config).into();
|
||||
info.path = Some(format!("{:?}", path));
|
||||
|
||||
let options = proxmox::api::cli::default_table_format_options()
|
||||
.column(ColumnConfig::new("path"))
|
||||
.column(ColumnConfig::new("kdf"))
|
||||
.column(ColumnConfig::new("created").renderer(pbs_tools::format::render_epoch))
|
||||
.column(ColumnConfig::new("modified").renderer(pbs_tools::format::render_epoch))
|
||||
.column(ColumnConfig::new("fingerprint"))
|
||||
.column(ColumnConfig::new("hint"));
|
||||
|
||||
let return_type = ReturnType::new(false, &KeyInfo::API_SCHEMA);
|
||||
|
||||
format_and_print_result_full(
|
||||
&mut serde_json::to_value(info)?,
|
||||
&return_type,
|
||||
&output_format,
|
||||
&options,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
path: {
|
||||
description: "Path to the PEM formatted RSA public key.",
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Import an RSA public key used to put an encrypted version of the symmetric backup encryption
|
||||
/// key onto the backup server along with each backup.
|
||||
///
|
||||
/// The imported key will be used as default master key for future invocations by the same local
|
||||
/// user.
|
||||
fn import_master_pubkey(path: String) -> Result<(), Error> {
|
||||
let pem_data = file_get_contents(&path)?;
|
||||
|
||||
match openssl::pkey::PKey::public_key_from_pem(&pem_data) {
|
||||
Ok(key) => {
|
||||
let info = RsaPubKeyInfo::try_from(key.rsa()?)?;
|
||||
println!("Found following key at {:?}", path);
|
||||
println!("Modulus: {}", info.modulus);
|
||||
println!("Exponent: {}", info.exponent);
|
||||
println!("Length: {}", info.length);
|
||||
}
|
||||
Err(err) => bail!("Unable to decode PEM data - {}", err),
|
||||
};
|
||||
|
||||
let target_path = place_default_master_pubkey()?;
|
||||
|
||||
replace_file(&target_path, &pem_data, CreateOptions::new())?;
|
||||
|
||||
println!("Imported public master key to {:?}", target_path);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api]
|
||||
/// Create an RSA public/private key pair used to put an encrypted version of the symmetric backup
|
||||
/// encryption key onto the backup server along with each backup.
|
||||
fn create_master_key() -> Result<(), Error> {
|
||||
// we need a TTY to query the new password
|
||||
if !tty::stdin_isatty() {
|
||||
bail!("unable to create master key - no tty");
|
||||
}
|
||||
|
||||
let bits = 4096;
|
||||
println!("Generating {}-bit RSA key..", bits);
|
||||
let rsa = openssl::rsa::Rsa::generate(bits)?;
|
||||
let public =
|
||||
openssl::rsa::Rsa::from_public_components(rsa.n().to_owned()?, rsa.e().to_owned()?)?;
|
||||
let info = RsaPubKeyInfo::try_from(public)?;
|
||||
println!("Modulus: {}", info.modulus);
|
||||
println!("Exponent: {}", info.exponent);
|
||||
println!();
|
||||
|
||||
let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
|
||||
|
||||
let password = String::from_utf8(tty::read_and_verify_password("Master Key Password: ")?)?;
|
||||
|
||||
let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
|
||||
let filename_pub = "master-public.pem";
|
||||
println!("Writing public master key to {}", filename_pub);
|
||||
replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new())?;
|
||||
|
||||
let cipher = openssl::symm::Cipher::aes_256_cbc();
|
||||
let priv_key: Vec<u8> =
|
||||
pkey.private_key_to_pem_pkcs8_passphrase(cipher, password.as_bytes())?;
|
||||
|
||||
let filename_priv = "master-private.pem";
|
||||
println!("Writing private master key to {}", filename_priv);
|
||||
replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
path: {
|
||||
description: "Path to the PEM formatted RSA public key. Default location will be used if not specified.",
|
||||
optional: true,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// List information about master key
|
||||
fn show_master_pubkey(path: Option<String>, param: Value) -> Result<(), Error> {
|
||||
let path = match path {
|
||||
Some(path) => PathBuf::from(path),
|
||||
None => find_default_master_pubkey()?
|
||||
.ok_or_else(|| format_err!("No path specified and no default master key available."))?,
|
||||
};
|
||||
|
||||
let path = path.canonicalize()?;
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let pem_data = file_get_contents(path.clone())?;
|
||||
let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
|
||||
|
||||
let mut info = RsaPubKeyInfo::try_from(rsa)?;
|
||||
info.path = Some(path.display().to_string());
|
||||
|
||||
let options = proxmox::api::cli::default_table_format_options()
|
||||
.column(ColumnConfig::new("path"))
|
||||
.column(ColumnConfig::new("modulus"))
|
||||
.column(ColumnConfig::new("exponent"))
|
||||
.column(ColumnConfig::new("length"));
|
||||
|
||||
let return_type = ReturnType::new(false, &RsaPubKeyInfo::API_SCHEMA);
|
||||
|
||||
format_and_print_result_full(
|
||||
&mut serde_json::to_value(info)?,
|
||||
&return_type,
|
||||
&output_format,
|
||||
&options,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
path: {
|
||||
description: "Key file. Without this the default key's will be used.",
|
||||
optional: true,
|
||||
},
|
||||
subject: {
|
||||
description: "Include the specified subject as title text.",
|
||||
optional: true,
|
||||
},
|
||||
"output-format": {
|
||||
type: PaperkeyFormat,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Generate a printable, human readable text file containing the encryption key.
|
||||
///
|
||||
/// This also includes a scanable QR code for fast key restore.
|
||||
fn paper_key(
|
||||
path: Option<String>,
|
||||
subject: Option<String>,
|
||||
output_format: Option<PaperkeyFormat>,
|
||||
) -> Result<(), Error> {
|
||||
let path = match path {
|
||||
Some(path) => PathBuf::from(path),
|
||||
None => find_default_encryption_key()?
|
||||
.ok_or_else(|| format_err!("no encryption file provided and no default file found"))?,
|
||||
};
|
||||
|
||||
let data = file_get_contents(&path)?;
|
||||
let data = String::from_utf8(data)?;
|
||||
|
||||
generate_paper_key(std::io::stdout(), &data, subject, output_format)
|
||||
}
|
||||
|
||||
pub fn cli() -> CliCommandMap {
|
||||
let key_create_cmd_def = CliCommand::new(&API_METHOD_CREATE)
|
||||
.arg_param(&["path"])
|
||||
.completion_cb("path", pbs_tools::fs::complete_file_name);
|
||||
|
||||
let key_import_with_master_key_cmd_def = CliCommand::new(&API_METHOD_IMPORT_WITH_MASTER_KEY)
|
||||
.arg_param(&["master-keyfile"])
|
||||
.completion_cb("master-keyfile", pbs_tools::fs::complete_file_name)
|
||||
.arg_param(&["encrypted-keyfile"])
|
||||
.completion_cb("encrypted-keyfile", pbs_tools::fs::complete_file_name)
|
||||
.arg_param(&["path"])
|
||||
.completion_cb("path", pbs_tools::fs::complete_file_name);
|
||||
|
||||
let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_CHANGE_PASSPHRASE)
|
||||
.arg_param(&["path"])
|
||||
.completion_cb("path", pbs_tools::fs::complete_file_name);
|
||||
|
||||
let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_CREATE_MASTER_KEY);
|
||||
let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_IMPORT_MASTER_PUBKEY)
|
||||
.arg_param(&["path"])
|
||||
.completion_cb("path", pbs_tools::fs::complete_file_name);
|
||||
let key_show_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_SHOW_MASTER_PUBKEY)
|
||||
.arg_param(&["path"])
|
||||
.completion_cb("path", pbs_tools::fs::complete_file_name);
|
||||
|
||||
let key_show_cmd_def = CliCommand::new(&API_METHOD_SHOW_KEY)
|
||||
.arg_param(&["path"])
|
||||
.completion_cb("path", pbs_tools::fs::complete_file_name);
|
||||
|
||||
let paper_key_cmd_def = CliCommand::new(&API_METHOD_PAPER_KEY)
|
||||
.arg_param(&["path"])
|
||||
.completion_cb("path", pbs_tools::fs::complete_file_name);
|
||||
|
||||
CliCommandMap::new()
|
||||
.insert("create", key_create_cmd_def)
|
||||
.insert("import-with-master-key", key_import_with_master_key_cmd_def)
|
||||
.insert("create-master-key", key_create_master_key_cmd_def)
|
||||
.insert("import-master-pubkey", key_import_master_pubkey_cmd_def)
|
||||
.insert("change-passphrase", key_change_passphrase_cmd_def)
|
||||
.insert("show", key_show_cmd_def)
|
||||
.insert("show-master-pubkey", key_show_master_pubkey_cmd_def)
|
||||
.insert("paperkey", paper_key_cmd_def)
|
||||
}
|
1513
proxmox-backup-client/src/main.rs
Normal file
1513
proxmox-backup-client/src/main.rs
Normal file
File diff suppressed because it is too large
Load Diff
366
proxmox-backup-client/src/mount.rs
Normal file
366
proxmox-backup-client/src/mount.rs
Normal file
@ -0,0 +1,366 @@
|
||||
use std::collections::HashMap;
|
||||
use std::ffi::OsStr;
|
||||
use std::hash::BuildHasher;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::future::FutureExt;
|
||||
use futures::select;
|
||||
use futures::stream::{StreamExt, TryStreamExt};
|
||||
use nix::unistd::{fork, ForkResult};
|
||||
use serde_json::Value;
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment, schema::*, cli::*};
|
||||
use proxmox::tools::fd::Fd;
|
||||
|
||||
use pbs_datastore::{BackupDir, BackupGroup, CryptConfig, load_and_decrypt_key};
|
||||
use pbs_datastore::index::IndexFile;
|
||||
use pbs_datastore::dynamic_index::BufferedDynamicReader;
|
||||
use pbs_datastore::cached_chunk_reader::CachedChunkReader;
|
||||
use pbs_client::tools::key_source::get_encryption_key_password;
|
||||
use pbs_client::{BackupReader, RemoteChunkReader};
|
||||
use pbs_tools::json::required_string_param;
|
||||
|
||||
use crate::{
|
||||
REPO_URL_SCHEMA,
|
||||
extract_repository_from_value,
|
||||
complete_pxar_archive_name,
|
||||
complete_img_archive_name,
|
||||
complete_group_or_snapshot,
|
||||
complete_repository,
|
||||
record_repository,
|
||||
connect,
|
||||
api_datastore_latest_snapshot,
|
||||
BufferedDynamicReadAt,
|
||||
};
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&mount),
|
||||
&ObjectSchema::new(
|
||||
"Mount pxar archive.",
|
||||
&sorted!([
|
||||
("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
|
||||
("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
|
||||
("target", false, &StringSchema::new("Target directory path.").schema()),
|
||||
("repository", true, &REPO_URL_SCHEMA),
|
||||
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
|
||||
("verbose", true, &BooleanSchema::new("Verbose output and stay in foreground.").default(false).schema()),
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_MAP: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&mount),
|
||||
&ObjectSchema::new(
|
||||
"Map a drive image from a VM backup to a local loopback device. Use 'unmap' to undo.
|
||||
WARNING: Only do this with *trusted* backups!",
|
||||
&sorted!([
|
||||
("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
|
||||
("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
|
||||
("repository", true, &REPO_URL_SCHEMA),
|
||||
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
|
||||
("verbose", true, &BooleanSchema::new("Verbose output and stay in foreground.").default(false).schema()),
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_UNMAP: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&unmap),
|
||||
&ObjectSchema::new(
|
||||
"Unmap a loop device mapped with 'map' and release all resources.",
|
||||
&sorted!([
|
||||
("name", true, &StringSchema::new(
|
||||
concat!("Archive name, path to loopdev (/dev/loopX) or loop device number. ",
|
||||
"Omit to list all current mappings and force cleaning up leftover instances.")
|
||||
).schema()),
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
pub fn mount_cmd_def() -> CliCommand {
|
||||
|
||||
CliCommand::new(&API_METHOD_MOUNT)
|
||||
.arg_param(&["snapshot", "archive-name", "target"])
|
||||
.completion_cb("repository", complete_repository)
|
||||
.completion_cb("snapshot", complete_group_or_snapshot)
|
||||
.completion_cb("archive-name", complete_pxar_archive_name)
|
||||
.completion_cb("target", pbs_tools::fs::complete_file_name)
|
||||
}
|
||||
|
||||
pub fn map_cmd_def() -> CliCommand {
|
||||
|
||||
CliCommand::new(&API_METHOD_MAP)
|
||||
.arg_param(&["snapshot", "archive-name"])
|
||||
.completion_cb("repository", complete_repository)
|
||||
.completion_cb("snapshot", complete_group_or_snapshot)
|
||||
.completion_cb("archive-name", complete_img_archive_name)
|
||||
}
|
||||
|
||||
pub fn unmap_cmd_def() -> CliCommand {
|
||||
|
||||
CliCommand::new(&API_METHOD_UNMAP)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", complete_mapping_names)
|
||||
}
|
||||
|
||||
fn complete_mapping_names<S: BuildHasher>(_arg: &str, _param: &HashMap<String, String, S>)
|
||||
-> Vec<String>
|
||||
{
|
||||
match pbs_fuse_loop::find_all_mappings() {
|
||||
Ok(mappings) => mappings
|
||||
.filter_map(|(name, _)| {
|
||||
pbs_systemd::unescape_unit(&name).ok()
|
||||
}).collect(),
|
||||
Err(_) => Vec::new()
|
||||
}
|
||||
}
|
||||
|
||||
fn mount(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||
if verbose {
|
||||
// This will stay in foreground with debug output enabled as None is
|
||||
// passed for the RawFd.
|
||||
return pbs_runtime::main(mount_do(param, None));
|
||||
}
|
||||
|
||||
// Process should be daemonized.
|
||||
// Make sure to fork before the async runtime is instantiated to avoid troubles.
|
||||
let (pr, pw) = pbs_tools::io::pipe()?;
|
||||
match unsafe { fork() } {
|
||||
Ok(ForkResult::Parent { .. }) => {
|
||||
drop(pw);
|
||||
// Blocks the parent process until we are ready to go in the child
|
||||
let _res = nix::unistd::read(pr.as_raw_fd(), &mut [0]).unwrap();
|
||||
Ok(Value::Null)
|
||||
}
|
||||
Ok(ForkResult::Child) => {
|
||||
drop(pr);
|
||||
nix::unistd::setsid().unwrap();
|
||||
pbs_runtime::main(mount_do(param, Some(pw)))
|
||||
}
|
||||
Err(_) => bail!("failed to daemonize process"),
|
||||
}
|
||||
}
|
||||
|
||||
async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let archive_name = required_string_param(¶m, "archive-name")?;
|
||||
let client = connect(&repo)?;
|
||||
|
||||
let target = param["target"].as_str();
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
let path = required_string_param(¶m, "snapshot")?;
|
||||
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||
let group: BackupGroup = path.parse()?;
|
||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||
} else {
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||
};
|
||||
|
||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||
let crypt_config = match keyfile {
|
||||
None => None,
|
||||
Some(path) => {
|
||||
println!("Encryption key file: '{:?}'", path);
|
||||
let (key, _, fingerprint) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||
println!("Encryption key fingerprint: '{}'", fingerprint);
|
||||
Some(Arc::new(CryptConfig::new(key)?))
|
||||
}
|
||||
};
|
||||
|
||||
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||
if target.is_none() {
|
||||
bail!("use the 'mount' command to mount pxar archives");
|
||||
}
|
||||
format!("{}.didx", archive_name)
|
||||
} else if archive_name.ends_with(".img") {
|
||||
if target.is_some() {
|
||||
bail!("use the 'map' command to map drive images");
|
||||
}
|
||||
format!("{}.fidx", archive_name)
|
||||
} else {
|
||||
bail!("Can only mount/map pxar archives and drive images.");
|
||||
};
|
||||
|
||||
let client = BackupReader::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
&backup_type,
|
||||
&backup_id,
|
||||
backup_time,
|
||||
true,
|
||||
).await?;
|
||||
|
||||
let (manifest, _) = client.download_manifest().await?;
|
||||
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
let file_info = manifest.lookup_file_info(&server_archive_name)?;
|
||||
|
||||
let daemonize = || -> Result<(), Error> {
|
||||
if let Some(pipe) = pipe {
|
||||
nix::unistd::chdir(Path::new("/")).unwrap();
|
||||
// Finish creation of daemon by redirecting filedescriptors.
|
||||
let nullfd = nix::fcntl::open(
|
||||
"/dev/null",
|
||||
nix::fcntl::OFlag::O_RDWR,
|
||||
nix::sys::stat::Mode::empty(),
|
||||
).unwrap();
|
||||
nix::unistd::dup2(nullfd, 0).unwrap();
|
||||
nix::unistd::dup2(nullfd, 1).unwrap();
|
||||
nix::unistd::dup2(nullfd, 2).unwrap();
|
||||
if nullfd > 2 {
|
||||
nix::unistd::close(nullfd).unwrap();
|
||||
}
|
||||
// Signal the parent process that we are done with the setup and it can
|
||||
// terminate.
|
||||
nix::unistd::write(pipe.as_raw_fd(), &[0u8])?;
|
||||
let _: Fd = pipe;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let options = OsStr::new("ro,default_permissions");
|
||||
|
||||
// handle SIGINT and SIGTERM
|
||||
let mut interrupt_int = signal(SignalKind::interrupt())?;
|
||||
let mut interrupt_term = signal(SignalKind::terminate())?;
|
||||
|
||||
let mut interrupt = futures::future::select(interrupt_int.recv().boxed(), interrupt_term.recv().boxed());
|
||||
|
||||
if server_archive_name.ends_with(".didx") {
|
||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let archive_size = reader.archive_size();
|
||||
let reader: pbs_client::pxar::fuse::Reader =
|
||||
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||
let decoder = pbs_client::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||
|
||||
let session = pbs_client::pxar::fuse::Session::mount(
|
||||
decoder,
|
||||
&options,
|
||||
false,
|
||||
Path::new(target.unwrap()),
|
||||
)
|
||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||
|
||||
daemonize()?;
|
||||
|
||||
select! {
|
||||
res = session.fuse() => res?,
|
||||
_ = interrupt => {
|
||||
// exit on interrupted
|
||||
}
|
||||
}
|
||||
} else if server_archive_name.ends_with(".fidx") {
|
||||
let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
|
||||
let size = index.index_bytes();
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), HashMap::new());
|
||||
let reader = CachedChunkReader::new(chunk_reader, index, 8).seekable();
|
||||
|
||||
let name = &format!("{}:{}/{}", repo.to_string(), path, archive_name);
|
||||
let name_escaped = pbs_systemd::escape_unit(name, false);
|
||||
|
||||
let mut session = pbs_fuse_loop::FuseLoopSession::map_loop(size, reader, &name_escaped, options).await?;
|
||||
let loopdev = session.loopdev_path.clone();
|
||||
|
||||
let (st_send, st_recv) = futures::channel::mpsc::channel(1);
|
||||
let (mut abort_send, abort_recv) = futures::channel::mpsc::channel(1);
|
||||
let mut st_recv = st_recv.fuse();
|
||||
let mut session_fut = session.main(st_send, abort_recv).boxed().fuse();
|
||||
|
||||
// poll until loop file is mapped (or errors)
|
||||
select! {
|
||||
_res = session_fut => {
|
||||
bail!("FUSE session unexpectedly ended before loop file mapping");
|
||||
},
|
||||
res = st_recv.try_next() => {
|
||||
if let Err(err) = res {
|
||||
// init went wrong, abort now
|
||||
abort_send.try_send(()).map_err(|err|
|
||||
format_err!("error while sending abort signal - {}", err))?;
|
||||
// ignore and keep original error cause
|
||||
let _ = session_fut.await;
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// daemonize only now to be able to print mapped loopdev or startup errors
|
||||
println!("Image '{}' mapped on {}", name, loopdev);
|
||||
daemonize()?;
|
||||
|
||||
// continue polling until complete or interrupted (which also happens on unmap)
|
||||
select! {
|
||||
res = session_fut => res?,
|
||||
_ = interrupt => {
|
||||
// exit on interrupted
|
||||
abort_send.try_send(()).map_err(|err|
|
||||
format_err!("error while sending abort signal - {}", err))?;
|
||||
session_fut.await?;
|
||||
}
|
||||
}
|
||||
|
||||
println!("Image unmapped");
|
||||
} else {
|
||||
bail!("unknown archive file extension (expected .pxar or .img)");
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
fn unmap(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let mut name = match param["name"].as_str() {
|
||||
Some(name) => name.to_owned(),
|
||||
None => {
|
||||
pbs_fuse_loop::cleanup_unused_run_files(None);
|
||||
let mut any = false;
|
||||
for (backing, loopdev) in pbs_fuse_loop::find_all_mappings()? {
|
||||
let name = pbs_systemd::unescape_unit(&backing)?;
|
||||
println!("{}:\t{}", loopdev.unwrap_or_else(|| "(unmapped)".to_string()), name);
|
||||
any = true;
|
||||
}
|
||||
if !any {
|
||||
println!("Nothing mapped.");
|
||||
}
|
||||
return Ok(Value::Null);
|
||||
},
|
||||
};
|
||||
|
||||
// allow loop device number alone
|
||||
if let Ok(num) = name.parse::<u8>() {
|
||||
name = format!("/dev/loop{}", num);
|
||||
}
|
||||
|
||||
if name.starts_with("/dev/loop") {
|
||||
pbs_fuse_loop::unmap_loopdev(name)?;
|
||||
} else {
|
||||
let name = pbs_systemd::escape_unit(&name, false);
|
||||
pbs_fuse_loop::unmap_name(name)?;
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
410
proxmox-backup-client/src/snapshot.rs
Normal file
410
proxmox-backup-client/src/snapshot.rs
Normal file
@ -0,0 +1,410 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Error;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::{
|
||||
api::{api, cli::*},
|
||||
tools::fs::file_get_contents,
|
||||
};
|
||||
|
||||
use pbs_api_types::SnapshotListItem;
|
||||
use pbs_client::tools::key_source::get_encryption_key_password;
|
||||
use pbs_datastore::{BackupGroup, CryptMode, CryptConfig, decrypt_key};
|
||||
use pbs_datastore::data_blob::DataBlob;
|
||||
use pbs_tools::json::required_string_param;
|
||||
|
||||
use crate::{
|
||||
REPO_URL_SCHEMA,
|
||||
KEYFILE_SCHEMA,
|
||||
KEYFD_SCHEMA,
|
||||
BackupDir,
|
||||
api_datastore_list_snapshots,
|
||||
complete_backup_snapshot,
|
||||
complete_backup_group,
|
||||
complete_repository,
|
||||
connect,
|
||||
crypto_parameters,
|
||||
extract_repository_from_value,
|
||||
record_repository,
|
||||
};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
group: {
|
||||
type: String,
|
||||
description: "Backup group.",
|
||||
optional: true,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// List backup snapshots.
|
||||
async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let client = connect(&repo)?;
|
||||
|
||||
let group: Option<BackupGroup> = if let Some(path) = param["group"].as_str() {
|
||||
Some(path.parse()?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut data = api_datastore_list_snapshots(&client, repo.store(), group).await?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
||||
};
|
||||
|
||||
let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
|
||||
let mut filenames = Vec::new();
|
||||
for file in &item.files {
|
||||
filenames.push(file.filename.to_string());
|
||||
}
|
||||
Ok(pbs_tools::format::render_backup_file_list(&filenames[..]))
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.sortby("backup-type", false)
|
||||
.sortby("backup-id", false)
|
||||
.sortby("backup-time", false)
|
||||
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
|
||||
.column(ColumnConfig::new("size").renderer(pbs_tools::format::render_bytes_human_readable))
|
||||
.column(ColumnConfig::new("files").renderer(render_files))
|
||||
;
|
||||
|
||||
let return_type = &pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE;
|
||||
|
||||
format_and_print_result_full(&mut data, return_type, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
snapshot: {
|
||||
type: String,
|
||||
description: "Snapshot path.",
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// List snapshot files.
|
||||
async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let path = required_string_param(¶m, "snapshot")?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let client = connect(&repo)?;
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/files", repo.store());
|
||||
|
||||
let mut result = client.get(&path, Some(json!({
|
||||
"backup-type": snapshot.group().backup_type(),
|
||||
"backup-id": snapshot.group().backup_id(),
|
||||
"backup-time": snapshot.backup_time(),
|
||||
}))).await?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
let return_type = &pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE;
|
||||
|
||||
let mut data: Value = result["data"].take();
|
||||
|
||||
let options = default_table_format_options();
|
||||
|
||||
format_and_print_result_full(&mut data, return_type, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
snapshot: {
|
||||
type: String,
|
||||
description: "Snapshot path.",
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Forget (remove) backup snapshots.
|
||||
async fn forget_snapshots(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let path = required_string_param(¶m, "snapshot")?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
|
||||
let mut client = connect(&repo)?;
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
|
||||
|
||||
let result = client.delete(&path, Some(json!({
|
||||
"backup-type": snapshot.group().backup_type(),
|
||||
"backup-id": snapshot.group().backup_id(),
|
||||
"backup-time": snapshot.backup_time(),
|
||||
}))).await?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
snapshot: {
|
||||
type: String,
|
||||
description: "Group/Snapshot path.",
|
||||
},
|
||||
logfile: {
|
||||
type: String,
|
||||
description: "The path to the log file you want to upload.",
|
||||
},
|
||||
keyfile: {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Upload backup log file.
|
||||
async fn upload_log(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let logfile = required_string_param(¶m, "logfile")?;
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let snapshot = required_string_param(¶m, "snapshot")?;
|
||||
let snapshot: BackupDir = snapshot.parse()?;
|
||||
|
||||
let mut client = connect(&repo)?;
|
||||
|
||||
let crypto = crypto_parameters(¶m)?;
|
||||
|
||||
let crypt_config = match crypto.enc_key {
|
||||
None => None,
|
||||
Some(key) => {
|
||||
let (key, _created, _) = decrypt_key(&key.key, &get_encryption_key_password)?;
|
||||
let crypt_config = CryptConfig::new(key)?;
|
||||
Some(Arc::new(crypt_config))
|
||||
}
|
||||
};
|
||||
|
||||
let data = file_get_contents(logfile)?;
|
||||
|
||||
// fixme: howto sign log?
|
||||
let blob = match crypto.mode {
|
||||
CryptMode::None | CryptMode::SignOnly => DataBlob::encode(&data, None, true)?,
|
||||
CryptMode::Encrypt => DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?,
|
||||
};
|
||||
|
||||
let raw_data = blob.into_inner();
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/upload-backup-log", repo.store());
|
||||
|
||||
let args = json!({
|
||||
"backup-type": snapshot.group().backup_type(),
|
||||
"backup-id": snapshot.group().backup_id(),
|
||||
"backup-time": snapshot.backup_time(),
|
||||
});
|
||||
|
||||
let body = hyper::Body::from(raw_data);
|
||||
|
||||
client.upload("application/octet-stream", body, &path, Some(args)).await
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
snapshot: {
|
||||
type: String,
|
||||
description: "Snapshot path.",
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Show notes
|
||||
async fn show_notes(param: Value) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let path = required_string_param(¶m, "snapshot")?;
|
||||
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
let client = connect(&repo)?;
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/notes", repo.store());
|
||||
|
||||
let args = json!({
|
||||
"backup-type": snapshot.group().backup_type(),
|
||||
"backup-id": snapshot.group().backup_id(),
|
||||
"backup-time": snapshot.backup_time(),
|
||||
});
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let mut result = client.get(&path, Some(args)).await?;
|
||||
|
||||
let notes = result["data"].take();
|
||||
|
||||
if output_format == "text" {
|
||||
if let Some(notes) = notes.as_str() {
|
||||
println!("{}", notes);
|
||||
}
|
||||
} else {
|
||||
format_and_print_result(
|
||||
&json!({
|
||||
"notes": notes,
|
||||
}),
|
||||
&output_format,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
snapshot: {
|
||||
type: String,
|
||||
description: "Snapshot path.",
|
||||
},
|
||||
notes: {
|
||||
type: String,
|
||||
description: "The Notes.",
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Update Notes
|
||||
async fn update_notes(param: Value) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let path = required_string_param(¶m, "snapshot")?;
|
||||
let notes = required_string_param(¶m, "notes")?;
|
||||
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
let mut client = connect(&repo)?;
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/notes", repo.store());
|
||||
|
||||
let args = json!({
|
||||
"backup-type": snapshot.group().backup_type(),
|
||||
"backup-id": snapshot.group().backup_id(),
|
||||
"backup-time": snapshot.backup_time(),
|
||||
"notes": notes,
|
||||
});
|
||||
|
||||
client.put(&path, Some(args)).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
fn notes_cli() -> CliCommandMap {
|
||||
CliCommandMap::new()
|
||||
.insert(
|
||||
"show",
|
||||
CliCommand::new(&API_METHOD_SHOW_NOTES)
|
||||
.arg_param(&["snapshot"])
|
||||
.completion_cb("snapshot", complete_backup_snapshot),
|
||||
)
|
||||
.insert(
|
||||
"update",
|
||||
CliCommand::new(&API_METHOD_UPDATE_NOTES)
|
||||
.arg_param(&["snapshot", "notes"])
|
||||
.completion_cb("snapshot", complete_backup_snapshot),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn snapshot_mgtm_cli() -> CliCommandMap {
|
||||
CliCommandMap::new()
|
||||
.insert("notes", notes_cli())
|
||||
.insert(
|
||||
"list",
|
||||
CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
|
||||
.arg_param(&["group"])
|
||||
.completion_cb("group", complete_backup_group)
|
||||
.completion_cb("repository", complete_repository)
|
||||
)
|
||||
.insert(
|
||||
"files",
|
||||
CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
|
||||
.arg_param(&["snapshot"])
|
||||
.completion_cb("repository", complete_repository)
|
||||
.completion_cb("snapshot", complete_backup_snapshot)
|
||||
)
|
||||
.insert(
|
||||
"forget",
|
||||
CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
|
||||
.arg_param(&["snapshot"])
|
||||
.completion_cb("repository", complete_repository)
|
||||
.completion_cb("snapshot", complete_backup_snapshot)
|
||||
)
|
||||
.insert(
|
||||
"upload-log",
|
||||
CliCommand::new(&API_METHOD_UPLOAD_LOG)
|
||||
.arg_param(&["snapshot", "logfile"])
|
||||
.completion_cb("snapshot", complete_backup_snapshot)
|
||||
.completion_cb("logfile", pbs_tools::fs::complete_file_name)
|
||||
.completion_cb("keyfile", pbs_tools::fs::complete_file_name)
|
||||
.completion_cb("repository", complete_repository)
|
||||
)
|
||||
}
|
150
proxmox-backup-client/src/task.rs
Normal file
150
proxmox-backup-client/src/task.rs
Normal file
@ -0,0 +1,150 @@
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, cli::*};
|
||||
|
||||
use pbs_client::display_task_log;
|
||||
use pbs_tools::percent_encoding::percent_encode_component;
|
||||
use pbs_tools::json::required_string_param;
|
||||
|
||||
use pbs_api_types::UPID;
|
||||
|
||||
use crate::{
|
||||
REPO_URL_SCHEMA,
|
||||
extract_repository_from_value,
|
||||
complete_repository,
|
||||
connect,
|
||||
};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
limit: {
|
||||
description: "The maximal number of tasks to list.",
|
||||
type: Integer,
|
||||
optional: true,
|
||||
minimum: 1,
|
||||
maximum: 1000,
|
||||
default: 50,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
all: {
|
||||
type: Boolean,
|
||||
description: "Also list stopped tasks.",
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// List running server tasks for this repo user
|
||||
async fn task_list(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let client = connect(&repo)?;
|
||||
|
||||
let limit = param["limit"].as_u64().unwrap_or(50) as usize;
|
||||
let running = !param["all"].as_bool().unwrap_or(false);
|
||||
|
||||
let args = json!({
|
||||
"running": running,
|
||||
"start": 0,
|
||||
"limit": limit,
|
||||
"userfilter": repo.auth_id(),
|
||||
"store": repo.store(),
|
||||
});
|
||||
|
||||
let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
|
||||
let mut data = result["data"].take();
|
||||
|
||||
let return_type = &pbs_api_types::NODE_TASKS_LIST_TASKS_RETURN_TYPE;
|
||||
|
||||
use pbs_tools::format::{render_epoch, render_task_status};
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("starttime").right_align(false).renderer(render_epoch))
|
||||
.column(ColumnConfig::new("endtime").right_align(false).renderer(render_epoch))
|
||||
.column(ColumnConfig::new("upid"))
|
||||
.column(ColumnConfig::new("status").renderer(render_task_status));
|
||||
|
||||
format_and_print_result_full(&mut data, return_type, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
upid: {
|
||||
type: UPID,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Display the task log.
|
||||
async fn task_log(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let upid = required_string_param(¶m, "upid")?;
|
||||
|
||||
let mut client = connect(&repo)?;
|
||||
|
||||
display_task_log(&mut client, upid, true).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
upid: {
|
||||
type: UPID,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Try to stop a specific task.
|
||||
async fn task_stop(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let upid_str = required_string_param(¶m, "upid")?;
|
||||
|
||||
let mut client = connect(&repo)?;
|
||||
|
||||
let path = format!("api2/json/nodes/localhost/tasks/{}", percent_encode_component(upid_str));
|
||||
let _ = client.delete(&path, None).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn task_mgmt_cli() -> CliCommandMap {
|
||||
|
||||
let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
|
||||
.completion_cb("repository", complete_repository);
|
||||
|
||||
let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
|
||||
.arg_param(&["upid"]);
|
||||
|
||||
let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
|
||||
.arg_param(&["upid"]);
|
||||
|
||||
CliCommandMap::new()
|
||||
.insert("log", task_log_cmd_def)
|
||||
.insert("list", task_list_cmd_def)
|
||||
.insert("stop", task_stop_cmd_def)
|
||||
}
|
Reference in New Issue
Block a user