2019-06-18 09:17:22 +00:00
|
|
|
//#[macro_use]
|
2018-12-20 09:32:49 +00:00
|
|
|
extern crate proxmox_backup;
|
2018-12-14 07:28:56 +00:00
|
|
|
|
|
|
|
use failure::*;
|
2019-01-18 15:50:15 +00:00
|
|
|
//use std::os::unix::io::AsRawFd;
|
2019-07-22 08:12:51 +00:00
|
|
|
use chrono::{Local, Utc, TimeZone};
|
2019-03-08 15:55:54 +00:00
|
|
|
use std::path::{Path, PathBuf};
|
2019-07-24 05:48:59 +00:00
|
|
|
use std::collections::{HashSet, HashMap};
|
2019-07-05 10:14:50 +00:00
|
|
|
use std::io::Write;
|
2019-08-03 11:05:38 +00:00
|
|
|
use proxmox::tools::fs::{file_get_contents, file_get_json, file_set_contents, image_size};
|
2018-12-14 07:28:56 +00:00
|
|
|
|
2018-12-20 09:32:49 +00:00
|
|
|
use proxmox_backup::tools;
|
2019-02-21 08:07:25 +00:00
|
|
|
use proxmox_backup::cli::*;
|
2019-07-26 07:07:29 +00:00
|
|
|
use proxmox_backup::api2::types::*;
|
2019-02-17 09:16:33 +00:00
|
|
|
use proxmox_backup::api_schema::*;
|
2019-02-17 08:59:20 +00:00
|
|
|
use proxmox_backup::api_schema::router::*;
|
2019-02-14 10:11:39 +00:00
|
|
|
use proxmox_backup::client::*;
|
2019-02-19 14:19:12 +00:00
|
|
|
use proxmox_backup::backup::*;
|
2019-07-03 13:45:12 +00:00
|
|
|
use proxmox_backup::pxar;
|
|
|
|
|
2018-12-20 09:32:49 +00:00
|
|
|
//use proxmox_backup::backup::image_index::*;
|
|
|
|
//use proxmox_backup::config::datastore;
|
2019-03-14 09:54:09 +00:00
|
|
|
//use proxmox_backup::pxar::encoder::*;
|
2019-01-18 15:50:15 +00:00
|
|
|
//use proxmox_backup::backup::datastore::*;
|
2019-01-17 10:38:22 +00:00
|
|
|
|
2019-03-03 07:51:37 +00:00
|
|
|
use serde_json::{json, Value};
|
2019-06-03 04:43:47 +00:00
|
|
|
//use hyper::Body;
|
2019-02-13 11:30:52 +00:00
|
|
|
use std::sync::Arc;
|
2019-02-26 11:27:28 +00:00
|
|
|
use regex::Regex;
|
2019-03-13 08:47:12 +00:00
|
|
|
use xdg::BaseDirectories;
|
2019-02-26 11:27:28 +00:00
|
|
|
|
|
|
|
use lazy_static::lazy_static;
|
2019-04-28 08:55:03 +00:00
|
|
|
use futures::*;
|
2019-05-28 08:12:44 +00:00
|
|
|
use tokio::sync::mpsc;
|
2019-02-26 11:27:28 +00:00
|
|
|
|
|
|
|
lazy_static! {
|
2019-07-24 11:07:02 +00:00
|
|
|
static ref BACKUPSPEC_REGEX: Regex = Regex::new(r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$").unwrap();
|
2019-06-17 08:33:24 +00:00
|
|
|
|
|
|
|
static ref REPO_URL_SCHEMA: Arc<Schema> = Arc::new(
|
|
|
|
StringSchema::new("Repository URL.")
|
|
|
|
.format(BACKUP_REPO_URL.clone())
|
|
|
|
.max_length(256)
|
|
|
|
.into()
|
|
|
|
);
|
2019-02-26 11:27:28 +00:00
|
|
|
}
|
2019-02-13 11:30:52 +00:00
|
|
|
|
2019-03-13 08:47:12 +00:00
|
|
|
|
2019-07-16 05:30:04 +00:00
|
|
|
fn get_default_repository() -> Option<String> {
|
|
|
|
std::env::var("PBS_REPOSITORY").ok()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn extract_repository_from_value(
|
|
|
|
param: &Value,
|
|
|
|
) -> Result<BackupRepository, Error> {
|
|
|
|
|
|
|
|
let repo_url = param["repository"]
|
|
|
|
.as_str()
|
|
|
|
.map(String::from)
|
|
|
|
.or_else(get_default_repository)
|
|
|
|
.ok_or_else(|| format_err!("unable to get (default) repository"))?;
|
|
|
|
|
|
|
|
let repo: BackupRepository = repo_url.parse()?;
|
|
|
|
|
|
|
|
Ok(repo)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn extract_repository_from_map(
|
|
|
|
param: &HashMap<String, String>,
|
|
|
|
) -> Option<BackupRepository> {
|
|
|
|
|
|
|
|
param.get("repository")
|
|
|
|
.map(String::from)
|
|
|
|
.or_else(get_default_repository)
|
|
|
|
.and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
|
|
|
|
}
|
|
|
|
|
2019-03-13 08:47:12 +00:00
|
|
|
fn record_repository(repo: &BackupRepository) {
|
|
|
|
|
|
|
|
let base = match BaseDirectories::with_prefix("proxmox-backup") {
|
|
|
|
Ok(v) => v,
|
|
|
|
_ => return,
|
|
|
|
};
|
|
|
|
|
|
|
|
// usually $HOME/.cache/proxmox-backup/repo-list
|
|
|
|
let path = match base.place_cache_file("repo-list") {
|
|
|
|
Ok(v) => v,
|
|
|
|
_ => return,
|
|
|
|
};
|
|
|
|
|
2019-08-03 11:05:38 +00:00
|
|
|
let mut data = file_get_json(&path, None).unwrap_or(json!({}));
|
2019-03-13 08:47:12 +00:00
|
|
|
|
|
|
|
let repo = repo.to_string();
|
|
|
|
|
|
|
|
data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 };
|
|
|
|
|
|
|
|
let mut map = serde_json::map::Map::new();
|
|
|
|
|
|
|
|
loop {
|
|
|
|
let mut max_used = 0;
|
|
|
|
let mut max_repo = None;
|
|
|
|
for (repo, count) in data.as_object().unwrap() {
|
|
|
|
if map.contains_key(repo) { continue; }
|
|
|
|
if let Some(count) = count.as_i64() {
|
|
|
|
if count > max_used {
|
|
|
|
max_used = count;
|
|
|
|
max_repo = Some(repo);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if let Some(repo) = max_repo {
|
|
|
|
map.insert(repo.to_owned(), json!(max_used));
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if map.len() > 10 { // store max. 10 repos
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let new_data = json!(map);
|
|
|
|
|
2019-08-03 11:05:38 +00:00
|
|
|
let _ = file_set_contents(path, new_data.to_string().as_bytes(), None);
|
2019-03-13 08:47:12 +00:00
|
|
|
}
|
|
|
|
|
2019-03-13 11:26:01 +00:00
|
|
|
fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
2019-03-13 08:47:12 +00:00
|
|
|
|
|
|
|
let mut result = vec![];
|
|
|
|
|
|
|
|
let base = match BaseDirectories::with_prefix("proxmox-backup") {
|
|
|
|
Ok(v) => v,
|
|
|
|
_ => return result,
|
|
|
|
};
|
|
|
|
|
|
|
|
// usually $HOME/.cache/proxmox-backup/repo-list
|
|
|
|
let path = match base.place_cache_file("repo-list") {
|
|
|
|
Ok(v) => v,
|
|
|
|
_ => return result,
|
|
|
|
};
|
|
|
|
|
2019-08-03 11:05:38 +00:00
|
|
|
let data = file_get_json(&path, None).unwrap_or(json!({}));
|
2019-03-13 08:47:12 +00:00
|
|
|
|
|
|
|
if let Some(map) = data.as_object() {
|
2019-03-13 11:26:01 +00:00
|
|
|
for (repo, _count) in map {
|
2019-03-13 08:47:12 +00:00
|
|
|
result.push(repo.to_owned());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
result
|
|
|
|
}
|
|
|
|
|
2019-03-01 08:35:41 +00:00
|
|
|
fn backup_directory<P: AsRef<Path>>(
|
2019-05-28 08:12:44 +00:00
|
|
|
client: &BackupClient,
|
2019-03-01 08:35:41 +00:00
|
|
|
dir_path: P,
|
2019-02-19 14:19:12 +00:00
|
|
|
archive_name: &str,
|
2019-05-30 11:28:24 +00:00
|
|
|
chunk_size: Option<usize>,
|
2019-07-24 05:48:59 +00:00
|
|
|
device_set: Option<HashSet<u64>>,
|
2019-03-04 07:01:09 +00:00
|
|
|
verbose: bool,
|
2019-07-24 10:21:25 +00:00
|
|
|
skip_lost_and_found: bool,
|
2019-06-13 09:47:23 +00:00
|
|
|
crypt_config: Option<Arc<CryptConfig>>,
|
2019-08-01 10:39:02 +00:00
|
|
|
) -> Result<BackupStats, Error> {
|
2019-02-13 11:30:52 +00:00
|
|
|
|
2019-07-24 10:21:25 +00:00
|
|
|
let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), device_set, verbose, skip_lost_and_found)?;
|
2019-05-30 11:28:24 +00:00
|
|
|
let chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
|
2019-01-18 11:01:37 +00:00
|
|
|
|
2019-05-28 08:12:44 +00:00
|
|
|
let (tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
|
2019-01-02 10:02:56 +00:00
|
|
|
|
2019-05-28 08:12:44 +00:00
|
|
|
let stream = rx
|
|
|
|
.map_err(Error::from)
|
|
|
|
.and_then(|x| x); // flatten
|
2019-03-01 08:35:41 +00:00
|
|
|
|
2019-05-28 08:12:44 +00:00
|
|
|
// spawn chunker inside a separate task so that it can run parallel
|
|
|
|
tokio::spawn(
|
|
|
|
tx.send_all(chunk_stream.then(|r| Ok(r)))
|
2019-06-03 04:43:47 +00:00
|
|
|
.map_err(|_| {}).map(|_| ())
|
2019-05-28 08:12:44 +00:00
|
|
|
);
|
2019-03-01 08:35:41 +00:00
|
|
|
|
2019-08-01 10:39:02 +00:00
|
|
|
let stats = client.upload_stream(archive_name, stream, "dynamic", None, crypt_config).wait()?;
|
2018-12-27 09:11:11 +00:00
|
|
|
|
2019-08-01 10:39:02 +00:00
|
|
|
Ok(stats)
|
2018-12-27 09:11:11 +00:00
|
|
|
}
|
|
|
|
|
2019-05-30 10:46:01 +00:00
|
|
|
fn backup_image<P: AsRef<Path>>(
|
|
|
|
client: &BackupClient,
|
|
|
|
image_path: P,
|
|
|
|
archive_name: &str,
|
|
|
|
image_size: u64,
|
2019-05-30 11:28:24 +00:00
|
|
|
chunk_size: Option<usize>,
|
2019-06-03 04:43:47 +00:00
|
|
|
_verbose: bool,
|
2019-06-13 09:47:23 +00:00
|
|
|
crypt_config: Option<Arc<CryptConfig>>,
|
2019-08-01 10:39:02 +00:00
|
|
|
) -> Result<BackupStats, Error> {
|
2019-05-30 10:46:01 +00:00
|
|
|
|
|
|
|
let path = image_path.as_ref().to_owned();
|
|
|
|
|
|
|
|
let file = tokio::fs::File::open(path).wait()?;
|
|
|
|
|
|
|
|
let stream = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
|
|
|
|
.map_err(Error::from);
|
|
|
|
|
2019-05-30 11:28:24 +00:00
|
|
|
let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
|
2019-05-30 10:46:01 +00:00
|
|
|
|
2019-08-01 10:39:02 +00:00
|
|
|
let stats = client.upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config).wait()?;
|
2019-05-30 10:46:01 +00:00
|
|
|
|
2019-08-01 10:39:02 +00:00
|
|
|
Ok(stats)
|
2019-05-30 10:46:01 +00:00
|
|
|
}
|
|
|
|
|
2019-07-31 09:23:53 +00:00
|
|
|
fn strip_server_file_expenstion(name: &str) -> String {
|
|
|
|
|
|
|
|
if name.ends_with(".didx") {
|
|
|
|
return name[..name.len()-5].to_owned();
|
|
|
|
} else if name.ends_with(".fidx") {
|
|
|
|
return name[..name.len()-5].to_owned();
|
|
|
|
} else if name.ends_with(".blob") {
|
|
|
|
return name[..name.len()-5].to_owned();
|
|
|
|
} else {
|
|
|
|
return name.to_owned(); // should not happen
|
2019-03-11 09:51:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-02 10:29:05 +00:00
|
|
|
fn list_backup_groups(
|
|
|
|
param: Value,
|
|
|
|
_info: &ApiMethod,
|
2019-06-07 11:10:56 +00:00
|
|
|
_rpcenv: &mut dyn RpcEnvironment,
|
2019-03-02 10:29:05 +00:00
|
|
|
) -> Result<Value, Error> {
|
|
|
|
|
2019-07-16 05:30:04 +00:00
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
2019-03-02 10:29:05 +00:00
|
|
|
|
2019-04-30 09:44:35 +00:00
|
|
|
let client = HttpClient::new(repo.host(), repo.user())?;
|
2019-03-02 10:29:05 +00:00
|
|
|
|
2019-03-13 08:47:12 +00:00
|
|
|
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
|
2019-03-02 10:29:05 +00:00
|
|
|
|
2019-05-13 07:12:03 +00:00
|
|
|
let mut result = client.get(&path, None).wait()?;
|
2019-03-02 10:29:05 +00:00
|
|
|
|
2019-03-13 08:47:12 +00:00
|
|
|
record_repository(&repo);
|
|
|
|
|
2019-03-02 10:29:05 +00:00
|
|
|
// fixme: implement and use output formatter instead ..
|
2019-03-06 06:20:51 +00:00
|
|
|
let list = result["data"].as_array_mut().unwrap();
|
|
|
|
|
|
|
|
list.sort_unstable_by(|a, b| {
|
|
|
|
let a_id = a["backup-id"].as_str().unwrap();
|
|
|
|
let a_backup_type = a["backup-type"].as_str().unwrap();
|
|
|
|
let b_id = b["backup-id"].as_str().unwrap();
|
|
|
|
let b_backup_type = b["backup-type"].as_str().unwrap();
|
|
|
|
|
|
|
|
let type_order = a_backup_type.cmp(b_backup_type);
|
|
|
|
if type_order == std::cmp::Ordering::Equal {
|
|
|
|
a_id.cmp(b_id)
|
|
|
|
} else {
|
|
|
|
type_order
|
|
|
|
}
|
|
|
|
});
|
2019-03-02 10:29:05 +00:00
|
|
|
|
2019-07-16 11:35:25 +00:00
|
|
|
let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
|
|
|
|
|
|
|
|
let mut result = vec![];
|
|
|
|
|
2019-03-02 10:29:05 +00:00
|
|
|
for item in list {
|
|
|
|
|
2019-03-02 15:28:36 +00:00
|
|
|
let id = item["backup-id"].as_str().unwrap();
|
|
|
|
let btype = item["backup-type"].as_str().unwrap();
|
|
|
|
let epoch = item["last-backup"].as_i64().unwrap();
|
2019-07-22 08:12:51 +00:00
|
|
|
let last_backup = Utc.timestamp(epoch, 0);
|
2019-03-02 15:28:36 +00:00
|
|
|
let backup_count = item["backup-count"].as_u64().unwrap();
|
2019-03-02 10:29:05 +00:00
|
|
|
|
2019-03-04 12:38:23 +00:00
|
|
|
let group = BackupGroup::new(btype, id);
|
2019-03-02 10:29:05 +00:00
|
|
|
|
|
|
|
let path = group.group_path().to_str().unwrap().to_owned();
|
2019-03-02 15:28:36 +00:00
|
|
|
|
2019-07-31 09:23:53 +00:00
|
|
|
let files = item["files"].as_array().unwrap().iter()
|
|
|
|
.map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
|
2019-03-02 15:28:36 +00:00
|
|
|
|
2019-07-16 11:35:25 +00:00
|
|
|
if output_format == "text" {
|
2019-07-22 08:12:51 +00:00
|
|
|
println!(
|
|
|
|
"{:20} | {} | {:5} | {}",
|
|
|
|
path,
|
|
|
|
BackupDir::backup_time_to_string(last_backup),
|
|
|
|
backup_count,
|
|
|
|
tools::join(&files, ' '),
|
|
|
|
);
|
2019-07-16 11:35:25 +00:00
|
|
|
} else {
|
|
|
|
result.push(json!({
|
|
|
|
"backup-type": btype,
|
|
|
|
"backup-id": id,
|
|
|
|
"last-backup": epoch,
|
|
|
|
"backup-count": backup_count,
|
|
|
|
"files": files,
|
|
|
|
}));
|
|
|
|
}
|
2019-03-02 10:29:05 +00:00
|
|
|
}
|
|
|
|
|
2019-07-18 11:16:32 +00:00
|
|
|
if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
|
2019-07-16 11:35:25 +00:00
|
|
|
|
2019-03-02 10:29:05 +00:00
|
|
|
Ok(Value::Null)
|
|
|
|
}
|
|
|
|
|
2019-03-02 15:20:50 +00:00
|
|
|
fn list_snapshots(
|
|
|
|
param: Value,
|
|
|
|
_info: &ApiMethod,
|
2019-06-07 11:10:56 +00:00
|
|
|
_rpcenv: &mut dyn RpcEnvironment,
|
2019-03-02 15:20:50 +00:00
|
|
|
) -> Result<Value, Error> {
|
|
|
|
|
2019-07-16 05:30:04 +00:00
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
2019-03-02 15:20:50 +00:00
|
|
|
|
2019-07-16 11:35:25 +00:00
|
|
|
let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
|
|
|
|
|
2019-04-30 09:44:35 +00:00
|
|
|
let client = HttpClient::new(repo.host(), repo.user())?;
|
2019-03-02 15:20:50 +00:00
|
|
|
|
2019-05-13 07:12:03 +00:00
|
|
|
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
|
2019-03-02 15:20:50 +00:00
|
|
|
|
2019-07-18 07:11:43 +00:00
|
|
|
let mut args = json!({});
|
|
|
|
if let Some(path) = param["group"].as_str() {
|
|
|
|
let group = BackupGroup::parse(path)?;
|
|
|
|
args["backup-type"] = group.backup_type().into();
|
|
|
|
args["backup-id"] = group.backup_id().into();
|
|
|
|
}
|
|
|
|
|
|
|
|
let result = client.get(&path, Some(args)).wait()?;
|
2019-03-02 15:20:50 +00:00
|
|
|
|
2019-03-13 08:47:12 +00:00
|
|
|
record_repository(&repo);
|
|
|
|
|
2019-03-02 15:20:50 +00:00
|
|
|
let list = result["data"].as_array().unwrap();
|
|
|
|
|
2019-07-16 11:35:25 +00:00
|
|
|
let mut result = vec![];
|
|
|
|
|
2019-03-02 15:20:50 +00:00
|
|
|
for item in list {
|
|
|
|
|
|
|
|
let id = item["backup-id"].as_str().unwrap();
|
|
|
|
let btype = item["backup-type"].as_str().unwrap();
|
|
|
|
let epoch = item["backup-time"].as_i64().unwrap();
|
|
|
|
|
2019-03-05 08:16:54 +00:00
|
|
|
let snapshot = BackupDir::new(btype, id, epoch);
|
2019-03-02 15:20:50 +00:00
|
|
|
|
|
|
|
let path = snapshot.relative_path().to_str().unwrap().to_owned();
|
|
|
|
|
2019-07-31 09:23:53 +00:00
|
|
|
let files = item["files"].as_array().unwrap().iter()
|
|
|
|
.map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
|
2019-03-02 15:20:50 +00:00
|
|
|
|
2019-07-16 11:35:25 +00:00
|
|
|
if output_format == "text" {
|
2019-07-22 08:12:51 +00:00
|
|
|
println!("{} | {}", path, tools::join(&files, ' '));
|
2019-07-16 11:35:25 +00:00
|
|
|
} else {
|
|
|
|
result.push(json!({
|
|
|
|
"backup-type": btype,
|
|
|
|
"backup-id": id,
|
|
|
|
"backup-time": epoch,
|
|
|
|
"files": files,
|
|
|
|
}));
|
|
|
|
}
|
2019-03-02 15:20:50 +00:00
|
|
|
}
|
|
|
|
|
2019-07-18 07:52:11 +00:00
|
|
|
if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
|
2019-07-16 11:35:25 +00:00
|
|
|
|
2019-03-02 15:20:50 +00:00
|
|
|
Ok(Value::Null)
|
|
|
|
}
|
|
|
|
|
2019-03-03 10:29:00 +00:00
|
|
|
fn forget_snapshots(
|
|
|
|
param: Value,
|
|
|
|
_info: &ApiMethod,
|
2019-06-07 11:10:56 +00:00
|
|
|
_rpcenv: &mut dyn RpcEnvironment,
|
2019-03-03 10:29:00 +00:00
|
|
|
) -> Result<Value, Error> {
|
|
|
|
|
2019-07-16 05:30:04 +00:00
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
2019-03-03 10:29:00 +00:00
|
|
|
|
|
|
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
|
|
|
let snapshot = BackupDir::parse(path)?;
|
|
|
|
|
2019-04-30 09:44:35 +00:00
|
|
|
let mut client = HttpClient::new(repo.host(), repo.user())?;
|
2019-03-03 10:29:00 +00:00
|
|
|
|
2019-05-13 07:12:03 +00:00
|
|
|
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
|
2019-03-03 10:29:00 +00:00
|
|
|
|
2019-05-13 07:12:03 +00:00
|
|
|
let result = client.delete(&path, Some(json!({
|
|
|
|
"backup-type": snapshot.group().backup_type(),
|
|
|
|
"backup-id": snapshot.group().backup_id(),
|
|
|
|
"backup-time": snapshot.backup_time().timestamp(),
|
|
|
|
}))).wait()?;
|
2019-03-03 10:29:00 +00:00
|
|
|
|
2019-03-13 08:47:12 +00:00
|
|
|
record_repository(&repo);
|
|
|
|
|
2019-03-03 10:29:00 +00:00
|
|
|
Ok(result)
|
|
|
|
}
|
|
|
|
|
2019-07-31 09:23:53 +00:00
|
|
|
fn list_snapshot_files(
|
|
|
|
param: Value,
|
|
|
|
_info: &ApiMethod,
|
|
|
|
_rpcenv: &mut dyn RpcEnvironment,
|
|
|
|
) -> Result<Value, Error> {
|
|
|
|
|
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
|
|
|
|
|
|
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
|
|
|
let snapshot = BackupDir::parse(path)?;
|
|
|
|
|
|
|
|
let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
|
|
|
|
|
|
|
|
let client = HttpClient::new(repo.host(), repo.user())?;
|
|
|
|
|
|
|
|
let path = format!("api2/json/admin/datastore/{}/files", repo.store());
|
|
|
|
|
|
|
|
let result = client.get(&path, Some(json!({
|
|
|
|
"backup-type": snapshot.group().backup_type(),
|
|
|
|
"backup-id": snapshot.group().backup_id(),
|
|
|
|
"backup-time": snapshot.backup_time().timestamp(),
|
|
|
|
}))).wait()?;
|
|
|
|
|
|
|
|
record_repository(&repo);
|
|
|
|
|
|
|
|
let list: Vec<String> = result["data"].as_array().unwrap().iter()
|
|
|
|
.map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
|
|
|
|
|
|
|
|
if output_format == "text" {
|
|
|
|
for file in list {
|
|
|
|
println!("{}", file);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
format_and_print_result(&list.into(), &output_format);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(Value::Null)
|
|
|
|
}
|
|
|
|
|
2019-02-20 13:10:45 +00:00
|
|
|
fn start_garbage_collection(
|
|
|
|
param: Value,
|
|
|
|
_info: &ApiMethod,
|
2019-06-07 11:10:56 +00:00
|
|
|
_rpcenv: &mut dyn RpcEnvironment,
|
2019-02-20 13:10:45 +00:00
|
|
|
) -> Result<Value, Error> {
|
|
|
|
|
2019-07-16 05:30:04 +00:00
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
2019-02-20 13:10:45 +00:00
|
|
|
|
2019-04-30 09:44:35 +00:00
|
|
|
let mut client = HttpClient::new(repo.host(), repo.user())?;
|
2019-02-20 13:10:45 +00:00
|
|
|
|
2019-03-13 08:47:12 +00:00
|
|
|
let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
|
2019-02-20 13:10:45 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
let result = client.post(&path, None).wait()?;
|
2019-02-20 13:10:45 +00:00
|
|
|
|
2019-03-13 08:47:12 +00:00
|
|
|
record_repository(&repo);
|
|
|
|
|
2019-02-20 13:10:45 +00:00
|
|
|
Ok(result)
|
|
|
|
}
|
2019-02-13 11:30:52 +00:00
|
|
|
|
2019-02-26 11:27:28 +00:00
|
|
|
fn parse_backupspec(value: &str) -> Result<(&str, &str), Error> {
|
|
|
|
|
|
|
|
if let Some(caps) = BACKUPSPEC_REGEX.captures(value) {
|
|
|
|
return Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()));
|
|
|
|
}
|
|
|
|
bail!("unable to parse directory specification '{}'", value);
|
|
|
|
}
|
|
|
|
|
2019-01-26 13:50:37 +00:00
|
|
|
fn create_backup(
|
|
|
|
param: Value,
|
|
|
|
_info: &ApiMethod,
|
2019-06-07 11:10:56 +00:00
|
|
|
_rpcenv: &mut dyn RpcEnvironment,
|
2019-01-26 13:50:37 +00:00
|
|
|
) -> Result<Value, Error> {
|
2018-12-14 07:28:56 +00:00
|
|
|
|
2019-07-16 05:30:04 +00:00
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
2019-02-26 11:27:28 +00:00
|
|
|
|
|
|
|
let backupspec_list = tools::required_array_param(¶m, "backupspec")?;
|
2018-12-14 12:39:41 +00:00
|
|
|
|
2019-03-08 08:33:53 +00:00
|
|
|
let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
|
|
|
|
|
2019-07-24 10:21:25 +00:00
|
|
|
let skip_lost_and_found = param["skip-lost-and-found"].as_bool().unwrap_or(false);
|
|
|
|
|
2019-03-04 07:01:09 +00:00
|
|
|
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
|
|
|
|
2019-07-25 11:44:01 +00:00
|
|
|
let backup_time_opt = param["backup-time"].as_i64();
|
|
|
|
|
2019-05-30 11:28:24 +00:00
|
|
|
let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
|
2018-12-21 10:18:08 +00:00
|
|
|
|
2019-02-19 14:19:12 +00:00
|
|
|
if let Some(size) = chunk_size_opt {
|
|
|
|
verify_chunk_size(size)?;
|
2018-12-21 10:18:08 +00:00
|
|
|
}
|
|
|
|
|
2019-06-19 15:16:41 +00:00
|
|
|
let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
|
|
|
|
|
2019-07-26 07:07:29 +00:00
|
|
|
let backup_id = param["backup-id"].as_str().unwrap_or(&tools::nodename());
|
2019-03-06 06:02:52 +00:00
|
|
|
|
2019-07-26 07:07:29 +00:00
|
|
|
let backup_type = param["backup-type"].as_str().unwrap_or("host");
|
2019-07-25 11:44:01 +00:00
|
|
|
|
2019-07-24 05:48:59 +00:00
|
|
|
let include_dev = param["include-dev"].as_array();
|
|
|
|
|
|
|
|
let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
|
|
|
|
|
|
|
|
if let Some(include_dev) = include_dev {
|
|
|
|
if all_file_systems {
|
|
|
|
bail!("option 'all-file-systems' conflicts with option 'include-dev'");
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut set = HashSet::new();
|
|
|
|
for path in include_dev {
|
|
|
|
let path = path.as_str().unwrap();
|
|
|
|
let stat = nix::sys::stat::stat(path)
|
|
|
|
.map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?;
|
|
|
|
set.insert(stat.st_dev);
|
|
|
|
}
|
|
|
|
devices = Some(set);
|
|
|
|
}
|
|
|
|
|
2019-02-26 11:27:28 +00:00
|
|
|
let mut upload_list = vec![];
|
2018-12-14 12:39:41 +00:00
|
|
|
|
2019-07-24 11:07:02 +00:00
|
|
|
enum BackupType { PXAR, IMAGE, CONFIG, LOGFILE };
|
2019-05-30 10:46:01 +00:00
|
|
|
|
2019-02-26 11:27:28 +00:00
|
|
|
for backupspec in backupspec_list {
|
|
|
|
let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
|
2018-12-27 09:11:11 +00:00
|
|
|
|
2019-05-30 10:19:23 +00:00
|
|
|
use std::os::unix::fs::FileTypeExt;
|
|
|
|
|
|
|
|
let metadata = match std::fs::metadata(filename) {
|
|
|
|
Ok(m) => m,
|
2019-02-26 11:27:28 +00:00
|
|
|
Err(err) => bail!("unable to access '{}' - {}", filename, err),
|
|
|
|
};
|
2019-05-30 10:19:23 +00:00
|
|
|
let file_type = metadata.file_type();
|
2019-01-17 10:38:22 +00:00
|
|
|
|
2019-06-03 08:39:44 +00:00
|
|
|
let extension = Path::new(target).extension().map(|s| s.to_str().unwrap()).unwrap();
|
2018-12-27 09:11:11 +00:00
|
|
|
|
2019-06-03 08:39:44 +00:00
|
|
|
match extension {
|
|
|
|
"pxar" => {
|
|
|
|
if !file_type.is_dir() {
|
|
|
|
bail!("got unexpected file type (expected directory)");
|
|
|
|
}
|
|
|
|
upload_list.push((BackupType::PXAR, filename.to_owned(), target.to_owned(), 0));
|
|
|
|
}
|
|
|
|
"img" => {
|
2019-05-30 10:19:23 +00:00
|
|
|
|
2019-06-03 08:39:44 +00:00
|
|
|
if !(file_type.is_file() || file_type.is_block_device()) {
|
|
|
|
bail!("got unexpected file type (expected file or block device)");
|
|
|
|
}
|
2019-05-30 10:19:23 +00:00
|
|
|
|
2019-08-03 11:05:38 +00:00
|
|
|
let size = image_size(&PathBuf::from(filename))?;
|
2019-01-17 10:38:22 +00:00
|
|
|
|
2019-06-03 08:39:44 +00:00
|
|
|
if size == 0 { bail!("got zero-sized file '{}'", filename); }
|
2019-02-26 11:27:28 +00:00
|
|
|
|
2019-06-03 08:39:44 +00:00
|
|
|
upload_list.push((BackupType::IMAGE, filename.to_owned(), target.to_owned(), size));
|
|
|
|
}
|
|
|
|
"conf" => {
|
|
|
|
if !file_type.is_file() {
|
|
|
|
bail!("got unexpected file type (expected regular file)");
|
|
|
|
}
|
|
|
|
upload_list.push((BackupType::CONFIG, filename.to_owned(), target.to_owned(), metadata.len()));
|
|
|
|
}
|
2019-07-24 11:07:02 +00:00
|
|
|
"log" => {
|
|
|
|
if !file_type.is_file() {
|
|
|
|
bail!("got unexpected file type (expected regular file)");
|
|
|
|
}
|
|
|
|
upload_list.push((BackupType::LOGFILE, filename.to_owned(), target.to_owned(), metadata.len()));
|
|
|
|
}
|
2019-06-03 08:39:44 +00:00
|
|
|
_ => {
|
|
|
|
bail!("got unknown archive extension '{}'", extension);
|
|
|
|
}
|
2019-02-26 11:27:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-25 11:44:01 +00:00
|
|
|
let backup_time = Utc.timestamp(backup_time_opt.unwrap_or(Utc::now().timestamp()), 0);
|
2019-02-26 11:27:28 +00:00
|
|
|
|
2019-05-28 08:12:44 +00:00
|
|
|
let client = HttpClient::new(repo.host(), repo.user())?;
|
2019-03-13 08:47:12 +00:00
|
|
|
record_repository(&repo);
|
|
|
|
|
2019-07-25 11:44:01 +00:00
|
|
|
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
|
|
|
|
|
2019-03-04 08:38:34 +00:00
|
|
|
println!("Client name: {}", tools::nodename());
|
2019-07-25 11:44:01 +00:00
|
|
|
|
|
|
|
let start_time = Local::now();
|
|
|
|
|
2019-07-26 09:11:55 +00:00
|
|
|
println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
|
2019-03-01 05:48:41 +00:00
|
|
|
|
2019-06-26 05:32:34 +00:00
|
|
|
let (crypt_config, rsa_encrypted_key) = match keyfile {
|
|
|
|
None => (None, None),
|
2019-06-19 15:16:41 +00:00
|
|
|
Some(path) => {
|
2019-06-26 05:32:34 +00:00
|
|
|
let (key, created) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
|
|
|
|
|
|
|
|
let crypt_config = CryptConfig::new(key)?;
|
|
|
|
|
|
|
|
let path = master_pubkey_path()?;
|
|
|
|
if path.exists() {
|
2019-08-03 11:05:38 +00:00
|
|
|
let pem_data = file_get_contents(&path)?;
|
2019-06-26 05:32:34 +00:00
|
|
|
let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
|
|
|
|
let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
|
|
|
|
(Some(Arc::new(crypt_config)), Some(enc_key))
|
|
|
|
} else {
|
|
|
|
(Some(Arc::new(crypt_config)), None)
|
|
|
|
}
|
2019-06-19 15:16:41 +00:00
|
|
|
}
|
|
|
|
};
|
2019-06-13 09:47:23 +00:00
|
|
|
|
2019-07-25 11:44:01 +00:00
|
|
|
let client = client.start_backup(repo.store(), backup_type, &backup_id, backup_time, verbose).wait()?;
|
2019-05-28 08:12:44 +00:00
|
|
|
|
2019-08-01 10:39:02 +00:00
|
|
|
let mut file_list = vec![];
|
|
|
|
|
2019-05-30 10:46:01 +00:00
|
|
|
for (backup_type, filename, target, size) in upload_list {
|
|
|
|
match backup_type {
|
2019-06-03 08:39:44 +00:00
|
|
|
BackupType::CONFIG => {
|
|
|
|
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
|
2019-08-01 10:39:02 +00:00
|
|
|
let stats = client.upload_blob_from_file(&filename, &target, crypt_config.clone(), true).wait()?;
|
|
|
|
file_list.push((target, stats));
|
2019-06-03 08:39:44 +00:00
|
|
|
}
|
2019-07-25 11:44:01 +00:00
|
|
|
BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
|
2019-07-24 11:07:02 +00:00
|
|
|
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
|
2019-08-01 10:39:02 +00:00
|
|
|
let stats = client.upload_blob_from_file(&filename, &target, crypt_config.clone(), true).wait()?;
|
|
|
|
file_list.push((target, stats));
|
2019-07-24 11:07:02 +00:00
|
|
|
}
|
2019-05-30 10:46:01 +00:00
|
|
|
BackupType::PXAR => {
|
|
|
|
println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
|
2019-08-01 10:39:02 +00:00
|
|
|
let stats = backup_directory(
|
2019-06-13 09:47:23 +00:00
|
|
|
&client,
|
|
|
|
&filename,
|
|
|
|
&target,
|
|
|
|
chunk_size_opt,
|
2019-07-24 05:48:59 +00:00
|
|
|
devices.clone(),
|
2019-06-13 09:47:23 +00:00
|
|
|
verbose,
|
2019-07-24 10:21:25 +00:00
|
|
|
skip_lost_and_found,
|
2019-06-13 09:47:23 +00:00
|
|
|
crypt_config.clone(),
|
|
|
|
)?;
|
2019-08-01 10:39:02 +00:00
|
|
|
file_list.push((target, stats));
|
2019-05-30 10:46:01 +00:00
|
|
|
}
|
|
|
|
BackupType::IMAGE => {
|
|
|
|
println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
|
2019-08-01 10:39:02 +00:00
|
|
|
let stats = backup_image(
|
2019-06-13 09:47:23 +00:00
|
|
|
&client,
|
|
|
|
&filename,
|
|
|
|
&target,
|
|
|
|
size,
|
|
|
|
chunk_size_opt,
|
|
|
|
verbose,
|
|
|
|
crypt_config.clone(),
|
|
|
|
)?;
|
2019-08-01 10:39:02 +00:00
|
|
|
file_list.push((target, stats));
|
2019-05-30 10:46:01 +00:00
|
|
|
}
|
|
|
|
}
|
2018-12-16 13:44:44 +00:00
|
|
|
}
|
|
|
|
|
2019-06-26 05:32:34 +00:00
|
|
|
if let Some(rsa_encrypted_key) = rsa_encrypted_key {
|
|
|
|
let target = "rsa-encrypted.key";
|
|
|
|
println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
|
2019-08-02 08:00:15 +00:00
|
|
|
let stats = client.upload_blob_from_data(rsa_encrypted_key, target, None, false, false).wait()?;
|
2019-08-01 10:39:02 +00:00
|
|
|
file_list.push((target.to_owned(), stats));
|
2019-06-26 05:32:34 +00:00
|
|
|
|
|
|
|
// openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
|
|
|
|
/*
|
|
|
|
let mut buffer2 = vec![0u8; rsa.size() as usize];
|
2019-08-03 11:05:38 +00:00
|
|
|
let pem_data = file_get_contents("master-private.pem")?;
|
2019-06-26 05:32:34 +00:00
|
|
|
let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
|
|
|
|
let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
|
|
|
|
println!("TEST {} {:?}", len, buffer2);
|
|
|
|
*/
|
2019-06-24 11:56:37 +00:00
|
|
|
}
|
|
|
|
|
2019-08-01 10:39:02 +00:00
|
|
|
// create index.json
|
|
|
|
let file_list = file_list.iter()
|
|
|
|
.fold(json!({}), |mut acc, (filename, stats)| {
|
|
|
|
acc[filename] = json!({
|
|
|
|
"size": stats.size,
|
|
|
|
});
|
|
|
|
acc
|
|
|
|
});
|
|
|
|
|
|
|
|
let index = json!({
|
|
|
|
"backup-type": backup_type,
|
|
|
|
"backup-id": backup_id,
|
|
|
|
"backup-time": backup_time.timestamp(),
|
|
|
|
"files": file_list,
|
|
|
|
});
|
|
|
|
|
|
|
|
println!("Upload index.json to '{:?}'", repo);
|
|
|
|
let index_data = serde_json::to_string_pretty(&index)?.into();
|
2019-08-02 08:00:15 +00:00
|
|
|
client.upload_blob_from_data(index_data, "index.json", crypt_config.clone(), true, true).wait()?;
|
2019-08-01 10:39:02 +00:00
|
|
|
|
2019-05-28 08:12:44 +00:00
|
|
|
client.finish().wait()?;
|
|
|
|
|
2019-07-26 09:11:55 +00:00
|
|
|
let end_time = Local::now();
|
2019-07-25 11:44:01 +00:00
|
|
|
let elapsed = end_time.signed_duration_since(start_time);
|
2019-03-05 07:11:40 +00:00
|
|
|
println!("Duration: {}", elapsed);
|
|
|
|
|
2019-07-26 09:11:55 +00:00
|
|
|
println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
|
2018-12-18 10:06:03 +00:00
|
|
|
|
2018-12-14 07:28:56 +00:00
|
|
|
Ok(Value::Null)
|
2019-02-27 07:38:32 +00:00
|
|
|
}
|
|
|
|
|
2019-03-13 08:47:12 +00:00
|
|
|
fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
2019-02-27 07:38:32 +00:00
|
|
|
|
|
|
|
let mut result = vec![];
|
|
|
|
|
|
|
|
let data: Vec<&str> = arg.splitn(2, ':').collect();
|
|
|
|
|
2019-03-13 06:23:59 +00:00
|
|
|
if data.len() != 2 {
|
2019-03-14 09:54:09 +00:00
|
|
|
result.push(String::from("root.pxar:/"));
|
|
|
|
result.push(String::from("etc.pxar:/etc"));
|
2019-03-13 06:23:59 +00:00
|
|
|
return result;
|
|
|
|
}
|
2019-02-27 07:38:32 +00:00
|
|
|
|
2019-03-12 13:39:51 +00:00
|
|
|
let files = tools::complete_file_name(data[1], param);
|
2019-02-27 07:38:32 +00:00
|
|
|
|
|
|
|
for file in files {
|
|
|
|
result.push(format!("{}:{}", data[0], file));
|
|
|
|
}
|
|
|
|
|
|
|
|
result
|
2018-12-14 07:28:56 +00:00
|
|
|
}
|
|
|
|
|
2019-03-06 09:50:46 +00:00
|
|
|
fn restore(
|
|
|
|
param: Value,
|
|
|
|
_info: &ApiMethod,
|
2019-06-07 11:10:56 +00:00
|
|
|
_rpcenv: &mut dyn RpcEnvironment,
|
2019-03-06 09:50:46 +00:00
|
|
|
) -> Result<Value, Error> {
|
|
|
|
|
2019-07-16 05:30:04 +00:00
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
2019-03-06 09:50:46 +00:00
|
|
|
|
2019-07-03 13:45:12 +00:00
|
|
|
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
|
|
|
|
2019-07-29 10:49:15 +00:00
|
|
|
let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
|
|
|
|
|
2019-03-11 13:31:01 +00:00
|
|
|
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
|
|
|
|
2019-07-03 13:45:12 +00:00
|
|
|
let client = HttpClient::new(repo.host(), repo.user())?;
|
2019-03-13 08:47:12 +00:00
|
|
|
|
|
|
|
record_repository(&repo);
|
2019-03-11 13:31:01 +00:00
|
|
|
|
2019-03-06 09:50:46 +00:00
|
|
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
|
|
|
|
2019-07-03 13:45:12 +00:00
|
|
|
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
2019-03-11 13:31:01 +00:00
|
|
|
let group = BackupGroup::parse(path)?;
|
2019-03-06 09:50:46 +00:00
|
|
|
|
2019-05-13 07:12:03 +00:00
|
|
|
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
|
|
|
|
let result = client.get(&path, Some(json!({
|
2019-03-11 13:31:01 +00:00
|
|
|
"backup-type": group.backup_type(),
|
|
|
|
"backup-id": group.backup_id(),
|
2019-05-13 07:12:03 +00:00
|
|
|
}))).wait()?;
|
2019-03-06 09:50:46 +00:00
|
|
|
|
2019-03-11 13:31:01 +00:00
|
|
|
let list = result["data"].as_array().unwrap();
|
|
|
|
if list.len() == 0 {
|
|
|
|
bail!("backup group '{}' does not contain any snapshots:", path);
|
|
|
|
}
|
2019-03-06 09:50:46 +00:00
|
|
|
|
2019-07-03 13:45:12 +00:00
|
|
|
let epoch = list[0]["backup-time"].as_i64().unwrap();
|
2019-07-22 08:12:51 +00:00
|
|
|
let backup_time = Utc.timestamp(epoch, 0);
|
2019-07-03 13:45:12 +00:00
|
|
|
(group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
|
2019-03-11 13:31:01 +00:00
|
|
|
} else {
|
|
|
|
let snapshot = BackupDir::parse(path)?;
|
2019-07-03 13:45:12 +00:00
|
|
|
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
|
|
|
};
|
2019-03-06 09:50:46 +00:00
|
|
|
|
2019-03-11 13:31:01 +00:00
|
|
|
let target = tools::required_string_param(¶m, "target")?;
|
2019-07-05 10:14:50 +00:00
|
|
|
let target = if target == "-" { None } else { Some(target) };
|
2019-03-06 10:18:46 +00:00
|
|
|
|
2019-07-03 13:45:12 +00:00
|
|
|
let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
|
2019-03-06 10:18:46 +00:00
|
|
|
|
2019-07-03 13:45:12 +00:00
|
|
|
let crypt_config = match keyfile {
|
|
|
|
None => None,
|
|
|
|
Some(path) => {
|
|
|
|
let (key, _) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
|
|
|
|
Some(Arc::new(CryptConfig::new(key)?))
|
|
|
|
}
|
|
|
|
};
|
2019-03-11 13:31:01 +00:00
|
|
|
|
2019-07-05 07:19:56 +00:00
|
|
|
let server_archive_name = if archive_name.ends_with(".pxar") {
|
|
|
|
format!("{}.didx", archive_name)
|
|
|
|
} else if archive_name.ends_with(".img") {
|
|
|
|
format!("{}.fidx", archive_name)
|
|
|
|
} else {
|
2019-07-05 09:36:45 +00:00
|
|
|
format!("{}.blob", archive_name)
|
2019-07-05 07:19:56 +00:00
|
|
|
};
|
2019-03-06 09:50:46 +00:00
|
|
|
|
2019-07-03 13:45:12 +00:00
|
|
|
let client = client.start_backup_reader(repo.store(), &backup_type, &backup_id, backup_time, true).wait()?;
|
|
|
|
|
|
|
|
use std::os::unix::fs::OpenOptionsExt;
|
|
|
|
|
|
|
|
let tmpfile = std::fs::OpenOptions::new()
|
|
|
|
.write(true)
|
|
|
|
.read(true)
|
|
|
|
.custom_flags(libc::O_TMPFILE)
|
|
|
|
.open("/tmp")?;
|
|
|
|
|
2019-07-05 09:36:45 +00:00
|
|
|
if server_archive_name.ends_with(".blob") {
|
|
|
|
|
|
|
|
let writer = Vec::with_capacity(1024*1024);
|
|
|
|
let blob_data = client.download(&server_archive_name, writer).wait()?;
|
|
|
|
let blob = DataBlob::from_raw(blob_data)?;
|
|
|
|
blob.verify_crc()?;
|
|
|
|
|
|
|
|
let raw_data = match crypt_config {
|
|
|
|
Some(ref crypt_config) => blob.decode(Some(crypt_config))?,
|
|
|
|
None => blob.decode(None)?,
|
|
|
|
};
|
|
|
|
|
2019-07-05 10:14:50 +00:00
|
|
|
if let Some(target) = target {
|
2019-08-03 11:05:38 +00:00
|
|
|
file_set_contents(target, &raw_data, None)?;
|
2019-07-05 10:14:50 +00:00
|
|
|
} else {
|
|
|
|
let stdout = std::io::stdout();
|
|
|
|
let mut writer = stdout.lock();
|
|
|
|
writer.write_all(&raw_data)
|
|
|
|
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
|
|
|
}
|
2019-07-05 09:36:45 +00:00
|
|
|
|
|
|
|
} else if server_archive_name.ends_with(".didx") {
|
2019-07-05 07:19:56 +00:00
|
|
|
let tmpfile = client.download(&server_archive_name, tmpfile).wait()?;
|
2019-07-03 13:45:12 +00:00
|
|
|
|
2019-07-05 07:19:56 +00:00
|
|
|
let index = DynamicIndexReader::new(tmpfile)
|
|
|
|
.map_err(|err| format_err!("unable to read dynamic index '{}' - {}", archive_name, err))?;
|
2019-07-03 13:45:12 +00:00
|
|
|
|
2019-07-05 08:42:46 +00:00
|
|
|
let most_used = index.find_most_used_chunks(8);
|
|
|
|
|
|
|
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
|
|
|
|
2019-07-05 07:19:56 +00:00
|
|
|
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
2019-07-03 13:45:12 +00:00
|
|
|
|
2019-07-05 10:14:50 +00:00
|
|
|
if let Some(target) = target {
|
2019-07-03 13:45:12 +00:00
|
|
|
|
2019-08-02 13:19:33 +00:00
|
|
|
let feature_flags = pxar::flags::DEFAULT;
|
2019-07-05 10:14:50 +00:00
|
|
|
let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags, |path| {
|
|
|
|
if verbose {
|
|
|
|
println!("{:?}", path);
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
});
|
2019-07-29 12:01:45 +00:00
|
|
|
decoder.set_allow_existing_dirs(allow_existing_dirs);
|
|
|
|
|
2019-07-05 10:14:50 +00:00
|
|
|
|
2019-07-16 16:19:44 +00:00
|
|
|
decoder.restore(Path::new(target), &Vec::new())?;
|
2019-07-05 10:14:50 +00:00
|
|
|
} else {
|
|
|
|
let stdout = std::io::stdout();
|
|
|
|
let mut writer = stdout.lock();
|
2019-07-05 07:19:56 +00:00
|
|
|
|
2019-07-05 10:14:50 +00:00
|
|
|
std::io::copy(&mut reader, &mut writer)
|
|
|
|
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
|
|
|
}
|
2019-07-05 07:19:56 +00:00
|
|
|
} else if server_archive_name.ends_with(".fidx") {
|
|
|
|
let tmpfile = client.download(&server_archive_name, tmpfile).wait()?;
|
|
|
|
|
|
|
|
let index = FixedIndexReader::new(tmpfile)
|
|
|
|
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", archive_name, err))?;
|
2019-07-04 12:03:20 +00:00
|
|
|
|
2019-07-05 08:42:46 +00:00
|
|
|
let most_used = index.find_most_used_chunks(8);
|
|
|
|
|
|
|
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
|
|
|
|
2019-07-05 07:19:56 +00:00
|
|
|
let mut reader = BufferedFixedReader::new(index, chunk_reader);
|
|
|
|
|
2019-07-05 10:14:50 +00:00
|
|
|
if let Some(target) = target {
|
|
|
|
let mut writer = std::fs::OpenOptions::new()
|
|
|
|
.write(true)
|
|
|
|
.create(true)
|
|
|
|
.create_new(true)
|
|
|
|
.open(target)
|
|
|
|
.map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?;
|
|
|
|
|
|
|
|
std::io::copy(&mut reader, &mut writer)
|
|
|
|
.map_err(|err| format_err!("unable to store data - {}", err))?;
|
|
|
|
} else {
|
|
|
|
let stdout = std::io::stdout();
|
|
|
|
let mut writer = stdout.lock();
|
2019-07-05 07:19:56 +00:00
|
|
|
|
2019-07-05 10:14:50 +00:00
|
|
|
std::io::copy(&mut reader, &mut writer)
|
|
|
|
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
|
|
|
}
|
2019-06-25 09:17:24 +00:00
|
|
|
} else {
|
2019-07-05 09:36:45 +00:00
|
|
|
bail!("unknown archive file extension (expected .pxar of .img)");
|
2019-06-25 11:00:37 +00:00
|
|
|
}
|
2019-06-26 07:18:59 +00:00
|
|
|
|
|
|
|
Ok(Value::Null)
|
2019-06-25 09:17:24 +00:00
|
|
|
}
|
|
|
|
|
2019-07-25 10:18:15 +00:00
|
|
|
fn upload_log(
|
|
|
|
param: Value,
|
|
|
|
_info: &ApiMethod,
|
|
|
|
_rpcenv: &mut dyn RpcEnvironment,
|
|
|
|
) -> Result<Value, Error> {
|
|
|
|
|
|
|
|
let logfile = tools::required_string_param(¶m, "logfile")?;
|
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
|
|
|
|
|
|
|
let snapshot = tools::required_string_param(¶m, "snapshot")?;
|
|
|
|
let snapshot = BackupDir::parse(snapshot)?;
|
|
|
|
|
|
|
|
let mut client = HttpClient::new(repo.host(), repo.user())?;
|
|
|
|
|
|
|
|
let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
|
|
|
|
|
|
|
|
let crypt_config = match keyfile {
|
|
|
|
None => None,
|
|
|
|
Some(path) => {
|
|
|
|
let (key, _created) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
|
|
|
|
let crypt_config = CryptConfig::new(key)?;
|
|
|
|
Some(crypt_config)
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-08-03 11:05:38 +00:00
|
|
|
let data = file_get_contents(logfile)?;
|
2019-07-25 10:18:15 +00:00
|
|
|
|
|
|
|
let blob = if let Some(ref crypt_config) = crypt_config {
|
|
|
|
DataBlob::encode(&data, Some(crypt_config), true)?
|
|
|
|
} else {
|
|
|
|
DataBlob::encode(&data, None, true)?
|
|
|
|
};
|
|
|
|
|
|
|
|
let raw_data = blob.into_inner();
|
|
|
|
|
|
|
|
let path = format!("api2/json/admin/datastore/{}/upload-backup-log", repo.store());
|
|
|
|
|
|
|
|
let args = json!({
|
|
|
|
"backup-type": snapshot.group().backup_type(),
|
|
|
|
"backup-id": snapshot.group().backup_id(),
|
|
|
|
"backup-time": snapshot.backup_time().timestamp(),
|
|
|
|
});
|
|
|
|
|
|
|
|
let body = hyper::Body::from(raw_data);
|
|
|
|
|
|
|
|
let result = client.upload("application/octet-stream", body, &path, Some(args)).wait()?;
|
|
|
|
|
|
|
|
Ok(result)
|
|
|
|
}
|
|
|
|
|
2019-02-27 15:53:17 +00:00
|
|
|
fn prune(
|
2019-07-27 07:24:23 +00:00
|
|
|
mut param: Value,
|
2019-02-27 15:53:17 +00:00
|
|
|
_info: &ApiMethod,
|
2019-06-07 11:10:56 +00:00
|
|
|
_rpcenv: &mut dyn RpcEnvironment,
|
2019-02-27 15:53:17 +00:00
|
|
|
) -> Result<Value, Error> {
|
|
|
|
|
2019-07-16 05:30:04 +00:00
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
2019-02-27 15:53:17 +00:00
|
|
|
|
2019-04-30 09:44:35 +00:00
|
|
|
let mut client = HttpClient::new(repo.host(), repo.user())?;
|
2019-02-27 15:53:17 +00:00
|
|
|
|
2019-03-13 08:47:12 +00:00
|
|
|
let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
|
2019-02-27 15:53:17 +00:00
|
|
|
|
2019-07-27 06:49:14 +00:00
|
|
|
let group = tools::required_string_param(¶m, "group")?;
|
|
|
|
let group = BackupGroup::parse(group)?;
|
|
|
|
|
2019-07-27 07:24:23 +00:00
|
|
|
param.as_object_mut().unwrap().remove("repository");
|
|
|
|
param.as_object_mut().unwrap().remove("group");
|
|
|
|
|
|
|
|
param["backup-type"] = group.backup_type().into();
|
|
|
|
param["backup-id"] = group.backup_id().into();
|
2019-02-27 15:53:17 +00:00
|
|
|
|
2019-07-31 08:15:16 +00:00
|
|
|
let _result = client.post(&path, Some(param)).wait()?;
|
2019-02-27 15:53:17 +00:00
|
|
|
|
2019-03-13 08:47:12 +00:00
|
|
|
record_repository(&repo);
|
|
|
|
|
2019-07-31 08:15:16 +00:00
|
|
|
Ok(Value::Null)
|
2019-02-27 15:53:17 +00:00
|
|
|
}
|
|
|
|
|
2019-07-16 11:35:25 +00:00
|
|
|
fn status(
|
|
|
|
param: Value,
|
|
|
|
_info: &ApiMethod,
|
|
|
|
_rpcenv: &mut dyn RpcEnvironment,
|
|
|
|
) -> Result<Value, Error> {
|
|
|
|
|
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
|
|
|
|
|
|
|
let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
|
|
|
|
|
|
|
|
let client = HttpClient::new(repo.host(), repo.user())?;
|
|
|
|
|
|
|
|
let path = format!("api2/json/admin/datastore/{}/status", repo.store());
|
|
|
|
|
|
|
|
let result = client.get(&path, None).wait()?;
|
|
|
|
let data = &result["data"];
|
|
|
|
|
|
|
|
record_repository(&repo);
|
|
|
|
|
|
|
|
if output_format == "text" {
|
|
|
|
let total = data["total"].as_u64().unwrap();
|
|
|
|
let used = data["used"].as_u64().unwrap();
|
|
|
|
let avail = data["avail"].as_u64().unwrap();
|
|
|
|
let roundup = total/200;
|
|
|
|
|
|
|
|
println!(
|
|
|
|
"total: {} used: {} ({} %) available: {}",
|
|
|
|
total,
|
|
|
|
used,
|
|
|
|
((used+roundup)*100)/total,
|
|
|
|
avail,
|
|
|
|
);
|
|
|
|
} else {
|
2019-07-18 07:52:11 +00:00
|
|
|
format_and_print_result(data, &output_format);
|
2019-07-16 11:35:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(Value::Null)
|
|
|
|
}
|
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
// like get, but simply ignore errors and return Null instead
|
2019-03-13 12:31:29 +00:00
|
|
|
fn try_get(repo: &BackupRepository, url: &str) -> Value {
|
2019-03-13 10:56:37 +00:00
|
|
|
|
2019-04-30 09:44:35 +00:00
|
|
|
let client = match HttpClient::new(repo.host(), repo.user()) {
|
|
|
|
Ok(v) => v,
|
|
|
|
_ => return Value::Null,
|
|
|
|
};
|
2019-03-13 12:31:29 +00:00
|
|
|
|
2019-05-13 07:12:03 +00:00
|
|
|
let mut resp = match client.get(url, None).wait() {
|
2019-03-13 12:31:29 +00:00
|
|
|
Ok(v) => v,
|
|
|
|
_ => return Value::Null,
|
|
|
|
};
|
|
|
|
|
|
|
|
if let Some(map) = resp.as_object_mut() {
|
|
|
|
if let Some(data) = map.remove("data") {
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Value::Null
|
|
|
|
}
|
|
|
|
|
|
|
|
fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
2019-03-13 10:56:37 +00:00
|
|
|
|
2019-03-13 12:31:29 +00:00
|
|
|
let mut result = vec![];
|
|
|
|
|
2019-07-16 05:30:04 +00:00
|
|
|
let repo = match extract_repository_from_map(param) {
|
2019-03-13 12:31:29 +00:00
|
|
|
Some(v) => v,
|
2019-03-13 10:56:37 +00:00
|
|
|
_ => return result,
|
|
|
|
};
|
|
|
|
|
2019-03-13 12:31:29 +00:00
|
|
|
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
|
|
|
|
|
|
|
|
let data = try_get(&repo, &path);
|
|
|
|
|
|
|
|
if let Some(list) = data.as_array() {
|
2019-03-13 10:56:37 +00:00
|
|
|
for item in list {
|
2019-03-13 11:17:39 +00:00
|
|
|
if let (Some(backup_id), Some(backup_type)) =
|
|
|
|
(item["backup-id"].as_str(), item["backup-type"].as_str())
|
|
|
|
{
|
|
|
|
result.push(format!("{}/{}", backup_type, backup_id));
|
2019-03-13 10:56:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
result
|
|
|
|
}
|
|
|
|
|
2019-03-13 12:31:29 +00:00
|
|
|
fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
|
|
|
|
|
|
|
let mut result = vec![];
|
|
|
|
|
2019-07-16 05:30:04 +00:00
|
|
|
let repo = match extract_repository_from_map(param) {
|
2019-03-13 12:31:29 +00:00
|
|
|
Some(v) => v,
|
|
|
|
_ => return result,
|
|
|
|
};
|
|
|
|
|
|
|
|
if arg.matches('/').count() < 2 {
|
|
|
|
let groups = complete_backup_group(arg, param);
|
|
|
|
for group in groups {
|
|
|
|
result.push(group.to_string());
|
|
|
|
result.push(format!("{}/", group));
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut parts = arg.split('/');
|
|
|
|
let query = tools::json_object_to_query(json!({
|
|
|
|
"backup-type": parts.next().unwrap(),
|
|
|
|
"backup-id": parts.next().unwrap(),
|
|
|
|
})).unwrap();
|
|
|
|
|
|
|
|
let path = format!("api2/json/admin/datastore/{}/snapshots?{}", repo.store(), query);
|
|
|
|
|
|
|
|
let data = try_get(&repo, &path);
|
|
|
|
|
|
|
|
if let Some(list) = data.as_array() {
|
|
|
|
for item in list {
|
|
|
|
if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
|
|
|
|
(item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
|
|
|
|
{
|
|
|
|
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
|
|
|
result.push(snapshot.relative_path().to_str().unwrap().to_owned());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
result
|
|
|
|
}
|
|
|
|
|
2019-06-25 09:17:24 +00:00
|
|
|
fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
2019-03-14 07:09:35 +00:00
|
|
|
|
|
|
|
let mut result = vec![];
|
|
|
|
|
2019-07-16 05:30:04 +00:00
|
|
|
let repo = match extract_repository_from_map(param) {
|
2019-03-14 07:09:35 +00:00
|
|
|
Some(v) => v,
|
|
|
|
_ => return result,
|
|
|
|
};
|
|
|
|
|
|
|
|
let snapshot = match param.get("snapshot") {
|
|
|
|
Some(path) => {
|
|
|
|
match BackupDir::parse(path) {
|
|
|
|
Ok(v) => v,
|
|
|
|
_ => return result,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => return result,
|
|
|
|
};
|
|
|
|
|
|
|
|
let query = tools::json_object_to_query(json!({
|
|
|
|
"backup-type": snapshot.group().backup_type(),
|
|
|
|
"backup-id": snapshot.group().backup_id(),
|
|
|
|
"backup-time": snapshot.backup_time().timestamp(),
|
|
|
|
})).unwrap();
|
|
|
|
|
|
|
|
let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
|
|
|
|
|
|
|
|
let data = try_get(&repo, &path);
|
|
|
|
|
|
|
|
if let Some(list) = data.as_array() {
|
|
|
|
for item in list {
|
|
|
|
if let Some(filename) = item.as_str() {
|
|
|
|
result.push(filename.to_owned());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-25 09:17:24 +00:00
|
|
|
result
|
|
|
|
}
|
|
|
|
|
|
|
|
fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
|
|
|
|
2019-07-31 09:23:53 +00:00
|
|
|
complete_server_file_name(arg, param)
|
|
|
|
.iter().map(|v| strip_server_file_expenstion(&v)).collect()
|
2019-03-14 07:09:35 +00:00
|
|
|
}
|
|
|
|
|
2019-03-13 11:26:01 +00:00
|
|
|
fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
|
|
|
|
|
|
|
let mut result = vec![];
|
|
|
|
|
|
|
|
let mut size = 64;
|
|
|
|
loop {
|
|
|
|
result.push(size.to_string());
|
|
|
|
size = size * 2;
|
|
|
|
if size > 4096 { break; }
|
|
|
|
}
|
|
|
|
|
|
|
|
result
|
|
|
|
}
|
|
|
|
|
2019-06-18 09:17:22 +00:00
|
|
|
fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
|
2018-12-14 07:28:56 +00:00
|
|
|
|
2019-06-17 08:33:24 +00:00
|
|
|
// fixme: implement other input methods
|
|
|
|
|
|
|
|
use std::env::VarError::*;
|
|
|
|
match std::env::var("PBS_ENCRYPTION_PASSWORD") {
|
2019-06-18 09:17:22 +00:00
|
|
|
Ok(p) => return Ok(p.as_bytes().to_vec()),
|
2019-06-17 08:33:24 +00:00
|
|
|
Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
|
|
|
|
Err(NotPresent) => {
|
|
|
|
// Try another method
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're on a TTY, query the user for a password
|
|
|
|
if crate::tools::tty::stdin_isatty() {
|
2019-06-18 09:17:22 +00:00
|
|
|
return Ok(crate::tools::tty::read_password("Encryption Key Password: ")?);
|
2019-06-17 08:33:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bail!("no password input mechanism available");
|
|
|
|
}
|
|
|
|
|
2019-06-17 11:10:00 +00:00
|
|
|
fn key_create(
|
|
|
|
param: Value,
|
|
|
|
_info: &ApiMethod,
|
|
|
|
_rpcenv: &mut dyn RpcEnvironment,
|
|
|
|
) -> Result<Value, Error> {
|
|
|
|
|
2019-06-18 15:38:50 +00:00
|
|
|
let path = tools::required_string_param(¶m, "path")?;
|
|
|
|
let path = PathBuf::from(path);
|
2019-06-17 11:10:00 +00:00
|
|
|
|
2019-06-18 16:21:01 +00:00
|
|
|
let kdf = param["kdf"].as_str().unwrap_or("scrypt");
|
2019-06-17 11:10:00 +00:00
|
|
|
|
|
|
|
let key = proxmox::sys::linux::random_data(32)?;
|
|
|
|
|
2019-06-18 16:21:01 +00:00
|
|
|
if kdf == "scrypt" {
|
|
|
|
// always read passphrase from tty
|
|
|
|
if !crate::tools::tty::stdin_isatty() {
|
|
|
|
bail!("unable to read passphrase - no tty");
|
|
|
|
}
|
2019-06-17 11:10:00 +00:00
|
|
|
|
2019-06-18 16:21:01 +00:00
|
|
|
let password = crate::tools::tty::read_password("Encryption Key Password: ")?;
|
|
|
|
|
2019-06-20 05:58:33 +00:00
|
|
|
let key_config = encrypt_key_with_passphrase(&key, &password)?;
|
2019-06-21 05:12:19 +00:00
|
|
|
|
2019-06-20 05:58:33 +00:00
|
|
|
store_key_config(&path, false, key_config)?;
|
2019-06-18 16:21:01 +00:00
|
|
|
|
|
|
|
Ok(Value::Null)
|
|
|
|
} else if kdf == "none" {
|
|
|
|
let created = Local.timestamp(Local::now().timestamp(), 0);
|
|
|
|
|
|
|
|
store_key_config(&path, false, KeyConfig {
|
|
|
|
kdf: None,
|
|
|
|
created,
|
2019-06-20 05:58:33 +00:00
|
|
|
modified: created,
|
2019-06-18 16:21:01 +00:00
|
|
|
data: key,
|
|
|
|
})?;
|
|
|
|
|
|
|
|
Ok(Value::Null)
|
|
|
|
} else {
|
|
|
|
unreachable!();
|
|
|
|
}
|
2019-06-17 11:10:00 +00:00
|
|
|
}
|
|
|
|
|
2019-06-24 11:56:37 +00:00
|
|
|
fn master_pubkey_path() -> Result<PathBuf, Error> {
|
|
|
|
let base = BaseDirectories::with_prefix("proxmox-backup")?;
|
|
|
|
|
|
|
|
// usually $HOME/.config/proxmox-backup/master-public.pem
|
|
|
|
let path = base.place_config_file("master-public.pem")?;
|
|
|
|
|
|
|
|
Ok(path)
|
|
|
|
}
|
|
|
|
|
2019-06-21 05:34:17 +00:00
|
|
|
fn key_import_master_pubkey(
|
|
|
|
param: Value,
|
|
|
|
_info: &ApiMethod,
|
|
|
|
_rpcenv: &mut dyn RpcEnvironment,
|
|
|
|
) -> Result<Value, Error> {
|
|
|
|
|
|
|
|
let path = tools::required_string_param(¶m, "path")?;
|
|
|
|
let path = PathBuf::from(path);
|
|
|
|
|
2019-08-03 11:05:38 +00:00
|
|
|
let pem_data = file_get_contents(&path)?;
|
2019-06-21 05:34:17 +00:00
|
|
|
|
|
|
|
if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
|
|
|
|
bail!("Unable to decode PEM data - {}", err);
|
|
|
|
}
|
|
|
|
|
2019-06-24 11:56:37 +00:00
|
|
|
let target_path = master_pubkey_path()?;
|
2019-06-21 05:34:17 +00:00
|
|
|
|
2019-08-03 11:05:38 +00:00
|
|
|
file_set_contents(&target_path, &pem_data, None)?;
|
2019-06-21 05:34:17 +00:00
|
|
|
|
|
|
|
println!("Imported public master key to {:?}", target_path);
|
|
|
|
|
|
|
|
Ok(Value::Null)
|
|
|
|
}
|
|
|
|
|
2019-06-21 05:12:19 +00:00
|
|
|
fn key_create_master_key(
|
|
|
|
_param: Value,
|
|
|
|
_info: &ApiMethod,
|
|
|
|
_rpcenv: &mut dyn RpcEnvironment,
|
|
|
|
) -> Result<Value, Error> {
|
|
|
|
|
|
|
|
// we need a TTY to query the new password
|
|
|
|
if !crate::tools::tty::stdin_isatty() {
|
|
|
|
bail!("unable to create master key - no tty");
|
|
|
|
}
|
|
|
|
|
|
|
|
let rsa = openssl::rsa::Rsa::generate(4096)?;
|
|
|
|
let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
|
|
|
|
|
|
|
|
let new_pw = String::from_utf8(crate::tools::tty::read_password("Master Key Password: ")?)?;
|
|
|
|
let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
|
|
|
|
|
|
|
|
if new_pw != verify_pw {
|
|
|
|
bail!("Password verification fail!");
|
|
|
|
}
|
|
|
|
|
|
|
|
if new_pw.len() < 5 {
|
|
|
|
bail!("Password is too short!");
|
|
|
|
}
|
|
|
|
|
|
|
|
let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
|
|
|
|
let filename_pub = "master-public.pem";
|
|
|
|
println!("Writing public master key to {}", filename_pub);
|
2019-08-03 11:05:38 +00:00
|
|
|
file_set_contents(filename_pub, pub_key.as_slice(), None)?;
|
2019-06-21 05:12:19 +00:00
|
|
|
|
|
|
|
let cipher = openssl::symm::Cipher::aes_256_cbc();
|
|
|
|
let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, new_pw.as_bytes())?;
|
|
|
|
|
|
|
|
let filename_priv = "master-private.pem";
|
|
|
|
println!("Writing private master key to {}", filename_priv);
|
2019-08-03 11:05:38 +00:00
|
|
|
file_set_contents(filename_priv, priv_key.as_slice(), None)?;
|
2019-06-21 05:12:19 +00:00
|
|
|
|
|
|
|
Ok(Value::Null)
|
|
|
|
}
|
2019-06-17 11:10:00 +00:00
|
|
|
|
|
|
|
fn key_change_passphrase(
|
|
|
|
param: Value,
|
|
|
|
_info: &ApiMethod,
|
|
|
|
_rpcenv: &mut dyn RpcEnvironment,
|
|
|
|
) -> Result<Value, Error> {
|
|
|
|
|
2019-06-18 15:38:50 +00:00
|
|
|
let path = tools::required_string_param(¶m, "path")?;
|
|
|
|
let path = PathBuf::from(path);
|
2019-06-17 11:10:00 +00:00
|
|
|
|
2019-06-18 16:21:01 +00:00
|
|
|
let kdf = param["kdf"].as_str().unwrap_or("scrypt");
|
|
|
|
|
2019-06-17 11:10:00 +00:00
|
|
|
// we need a TTY to query the new password
|
|
|
|
if !crate::tools::tty::stdin_isatty() {
|
|
|
|
bail!("unable to change passphrase - no tty");
|
|
|
|
}
|
|
|
|
|
2019-06-20 05:58:33 +00:00
|
|
|
let (key, created) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
|
2019-06-17 11:10:00 +00:00
|
|
|
|
2019-06-18 16:21:01 +00:00
|
|
|
if kdf == "scrypt" {
|
2019-06-17 11:10:00 +00:00
|
|
|
|
2019-06-18 16:21:01 +00:00
|
|
|
let new_pw = String::from_utf8(crate::tools::tty::read_password("New Password: ")?)?;
|
|
|
|
let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
|
2019-06-17 11:10:00 +00:00
|
|
|
|
2019-06-18 16:21:01 +00:00
|
|
|
if new_pw != verify_pw {
|
|
|
|
bail!("Password verification fail!");
|
|
|
|
}
|
|
|
|
|
|
|
|
if new_pw.len() < 5 {
|
|
|
|
bail!("Password is too short!");
|
|
|
|
}
|
2019-06-17 11:10:00 +00:00
|
|
|
|
2019-06-20 05:58:33 +00:00
|
|
|
let mut new_key_config = encrypt_key_with_passphrase(&key, new_pw.as_bytes())?;
|
|
|
|
new_key_config.created = created; // keep original value
|
|
|
|
|
|
|
|
store_key_config(&path, true, new_key_config)?;
|
2019-06-17 11:10:00 +00:00
|
|
|
|
2019-06-18 16:21:01 +00:00
|
|
|
Ok(Value::Null)
|
|
|
|
} else if kdf == "none" {
|
2019-06-20 05:58:33 +00:00
|
|
|
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
2019-06-18 16:21:01 +00:00
|
|
|
|
|
|
|
store_key_config(&path, true, KeyConfig {
|
|
|
|
kdf: None,
|
2019-06-20 05:58:33 +00:00
|
|
|
created, // keep original value
|
|
|
|
modified,
|
2019-06-19 15:16:41 +00:00
|
|
|
data: key.to_vec(),
|
2019-06-18 16:21:01 +00:00
|
|
|
})?;
|
|
|
|
|
|
|
|
Ok(Value::Null)
|
|
|
|
} else {
|
|
|
|
unreachable!();
|
|
|
|
}
|
2019-06-17 08:33:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn key_mgmt_cli() -> CliCommandMap {
|
|
|
|
|
2019-06-18 16:21:01 +00:00
|
|
|
let kdf_schema: Arc<Schema> = Arc::new(
|
|
|
|
StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
|
|
|
|
.format(Arc::new(ApiStringFormat::Enum(&["scrypt", "none"])))
|
|
|
|
.default("scrypt")
|
|
|
|
.into()
|
|
|
|
);
|
|
|
|
|
2019-06-17 08:33:24 +00:00
|
|
|
let key_create_cmd_def = CliCommand::new(
|
|
|
|
ApiMethod::new(
|
|
|
|
key_create,
|
|
|
|
ObjectSchema::new("Create a new encryption key.")
|
2019-06-18 15:38:50 +00:00
|
|
|
.required("path", StringSchema::new("File system path."))
|
2019-06-18 16:21:01 +00:00
|
|
|
.optional("kdf", kdf_schema.clone())
|
2019-06-17 08:33:24 +00:00
|
|
|
))
|
2019-06-18 15:38:50 +00:00
|
|
|
.arg_param(vec!["path"])
|
|
|
|
.completion_cb("path", tools::complete_file_name);
|
2019-06-17 08:33:24 +00:00
|
|
|
|
2019-06-17 11:10:00 +00:00
|
|
|
let key_change_passphrase_cmd_def = CliCommand::new(
|
|
|
|
ApiMethod::new(
|
|
|
|
key_change_passphrase,
|
|
|
|
ObjectSchema::new("Change the passphrase required to decrypt the key.")
|
2019-06-18 15:38:50 +00:00
|
|
|
.required("path", StringSchema::new("File system path."))
|
2019-06-18 16:21:01 +00:00
|
|
|
.optional("kdf", kdf_schema.clone())
|
2019-06-18 15:38:50 +00:00
|
|
|
))
|
|
|
|
.arg_param(vec!["path"])
|
|
|
|
.completion_cb("path", tools::complete_file_name);
|
2019-06-17 11:10:00 +00:00
|
|
|
|
2019-06-21 05:12:19 +00:00
|
|
|
let key_create_master_key_cmd_def = CliCommand::new(
|
|
|
|
ApiMethod::new(
|
|
|
|
key_create_master_key,
|
|
|
|
ObjectSchema::new("Create a new 4096 bit RSA master pub/priv key pair.")
|
|
|
|
));
|
|
|
|
|
2019-06-21 05:34:17 +00:00
|
|
|
let key_import_master_pubkey_cmd_def = CliCommand::new(
|
|
|
|
ApiMethod::new(
|
|
|
|
key_import_master_pubkey,
|
|
|
|
ObjectSchema::new("Import a new RSA public key and use it as master key. The key is expected to be in '.pem' format.")
|
|
|
|
.required("path", StringSchema::new("File system path."))
|
|
|
|
))
|
|
|
|
.arg_param(vec!["path"])
|
|
|
|
.completion_cb("path", tools::complete_file_name);
|
|
|
|
|
2019-06-17 08:33:24 +00:00
|
|
|
let cmd_def = CliCommandMap::new()
|
2019-06-17 11:10:00 +00:00
|
|
|
.insert("create".to_owned(), key_create_cmd_def.into())
|
2019-06-21 05:12:19 +00:00
|
|
|
.insert("create-master-key".to_owned(), key_create_master_key_cmd_def.into())
|
2019-06-21 05:34:17 +00:00
|
|
|
.insert("import-master-pubkey".to_owned(), key_import_master_pubkey_cmd_def.into())
|
2019-06-17 11:10:00 +00:00
|
|
|
.insert("change-passphrase".to_owned(), key_change_passphrase_cmd_def.into());
|
2019-06-17 08:33:24 +00:00
|
|
|
|
|
|
|
cmd_def
|
|
|
|
}
|
|
|
|
|
|
|
|
fn main() {
|
2019-02-13 11:30:52 +00:00
|
|
|
|
2019-02-27 06:21:57 +00:00
|
|
|
let backup_source_schema: Arc<Schema> = Arc::new(
|
|
|
|
StringSchema::new("Backup source specification ([<label>:<path>]).")
|
|
|
|
.format(Arc::new(ApiStringFormat::Pattern(&BACKUPSPEC_REGEX)))
|
|
|
|
.into()
|
|
|
|
);
|
|
|
|
|
2019-03-03 10:46:53 +00:00
|
|
|
let backup_cmd_def = CliCommand::new(
|
2018-12-14 07:28:56 +00:00
|
|
|
ApiMethod::new(
|
2018-12-27 09:11:11 +00:00
|
|
|
create_backup,
|
2019-03-03 10:46:53 +00:00
|
|
|
ObjectSchema::new("Create (host) backup.")
|
2019-02-26 11:27:28 +00:00
|
|
|
.required(
|
|
|
|
"backupspec",
|
|
|
|
ArraySchema::new(
|
2019-03-11 09:23:16 +00:00
|
|
|
"List of backup source specifications ([<label.ext>:<path>] ...)",
|
2019-02-27 06:21:57 +00:00
|
|
|
backup_source_schema,
|
2019-02-26 11:27:28 +00:00
|
|
|
).min_length(1)
|
|
|
|
)
|
2019-07-16 05:30:04 +00:00
|
|
|
.optional("repository", REPO_URL_SCHEMA.clone())
|
2019-07-24 05:48:59 +00:00
|
|
|
.optional(
|
|
|
|
"include-dev",
|
|
|
|
ArraySchema::new(
|
|
|
|
"Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
|
|
|
|
StringSchema::new("Path to file.").into()
|
|
|
|
)
|
|
|
|
)
|
2019-06-19 15:16:41 +00:00
|
|
|
.optional(
|
|
|
|
"keyfile",
|
|
|
|
StringSchema::new("Path to encryption key. All data will be encrypted using this key."))
|
2019-03-04 07:01:09 +00:00
|
|
|
.optional(
|
|
|
|
"verbose",
|
|
|
|
BooleanSchema::new("Verbose output.").default(false))
|
2019-07-24 10:21:25 +00:00
|
|
|
.optional(
|
|
|
|
"skip-lost-and-found",
|
|
|
|
BooleanSchema::new("Skip lost+found directory").default(false))
|
2019-03-06 06:02:52 +00:00
|
|
|
.optional(
|
2019-07-26 07:07:29 +00:00
|
|
|
"backup-type",
|
|
|
|
BACKUP_TYPE_SCHEMA.clone()
|
|
|
|
)
|
|
|
|
.optional(
|
|
|
|
"backup-id",
|
|
|
|
BACKUP_ID_SCHEMA.clone()
|
|
|
|
)
|
2019-07-25 11:44:01 +00:00
|
|
|
.optional(
|
|
|
|
"backup-time",
|
2019-07-26 07:07:29 +00:00
|
|
|
BACKUP_TIME_SCHEMA.clone()
|
2019-07-25 11:44:01 +00:00
|
|
|
)
|
2018-12-21 10:18:08 +00:00
|
|
|
.optional(
|
|
|
|
"chunk-size",
|
|
|
|
IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
|
|
|
|
.minimum(64)
|
|
|
|
.maximum(4096)
|
|
|
|
.default(4096)
|
|
|
|
)
|
2018-12-14 07:28:56 +00:00
|
|
|
))
|
2019-07-16 05:30:04 +00:00
|
|
|
.arg_param(vec!["backupspec"])
|
2019-03-13 08:47:12 +00:00
|
|
|
.completion_cb("repository", complete_repository)
|
2019-03-13 11:26:01 +00:00
|
|
|
.completion_cb("backupspec", complete_backup_source)
|
2019-06-19 15:16:41 +00:00
|
|
|
.completion_cb("keyfile", tools::complete_file_name)
|
2019-03-13 11:26:01 +00:00
|
|
|
.completion_cb("chunk-size", complete_chunk_size);
|
2018-12-15 10:24:39 +00:00
|
|
|
|
2019-07-25 10:18:15 +00:00
|
|
|
let upload_log_cmd_def = CliCommand::new(
|
|
|
|
ApiMethod::new(
|
|
|
|
upload_log,
|
|
|
|
ObjectSchema::new("Upload backup log file.")
|
|
|
|
.required("snapshot", StringSchema::new("Snapshot path."))
|
|
|
|
.required("logfile", StringSchema::new("The path to the log file you want to upload."))
|
|
|
|
.optional("repository", REPO_URL_SCHEMA.clone())
|
|
|
|
.optional(
|
|
|
|
"keyfile",
|
|
|
|
StringSchema::new("Path to encryption key. All data will be encrypted using this key."))
|
|
|
|
))
|
|
|
|
.arg_param(vec!["snapshot", "logfile"])
|
|
|
|
.completion_cb("snapshot", complete_group_or_snapshot)
|
|
|
|
.completion_cb("logfile", tools::complete_file_name)
|
|
|
|
.completion_cb("keyfile", tools::complete_file_name)
|
|
|
|
.completion_cb("repository", complete_repository);
|
|
|
|
|
2019-01-21 17:58:14 +00:00
|
|
|
let list_cmd_def = CliCommand::new(
|
|
|
|
ApiMethod::new(
|
2019-03-02 10:29:05 +00:00
|
|
|
list_backup_groups,
|
|
|
|
ObjectSchema::new("List backup groups.")
|
2019-07-16 05:30:04 +00:00
|
|
|
.optional("repository", REPO_URL_SCHEMA.clone())
|
2019-07-16 11:35:25 +00:00
|
|
|
.optional("output-format", OUTPUT_FORMAT.clone())
|
2019-01-21 17:58:14 +00:00
|
|
|
))
|
2019-03-13 08:47:12 +00:00
|
|
|
.completion_cb("repository", complete_repository);
|
2019-01-21 17:58:14 +00:00
|
|
|
|
2019-03-02 15:20:50 +00:00
|
|
|
let snapshots_cmd_def = CliCommand::new(
|
|
|
|
ApiMethod::new(
|
|
|
|
list_snapshots,
|
|
|
|
ObjectSchema::new("List backup snapshots.")
|
2019-07-18 07:11:43 +00:00
|
|
|
.optional("group", StringSchema::new("Backup group."))
|
2019-07-16 05:30:04 +00:00
|
|
|
.optional("repository", REPO_URL_SCHEMA.clone())
|
2019-07-16 11:35:25 +00:00
|
|
|
.optional("output-format", OUTPUT_FORMAT.clone())
|
2019-03-02 15:20:50 +00:00
|
|
|
))
|
2019-07-16 05:30:04 +00:00
|
|
|
.arg_param(vec!["group"])
|
2019-03-13 10:56:37 +00:00
|
|
|
.completion_cb("group", complete_backup_group)
|
2019-03-13 08:47:12 +00:00
|
|
|
.completion_cb("repository", complete_repository);
|
2019-03-02 15:20:50 +00:00
|
|
|
|
2019-03-03 10:29:00 +00:00
|
|
|
let forget_cmd_def = CliCommand::new(
|
|
|
|
ApiMethod::new(
|
|
|
|
forget_snapshots,
|
|
|
|
ObjectSchema::new("Forget (remove) backup snapshots.")
|
|
|
|
.required("snapshot", StringSchema::new("Snapshot path."))
|
2019-07-16 05:30:04 +00:00
|
|
|
.optional("repository", REPO_URL_SCHEMA.clone())
|
2019-03-03 10:29:00 +00:00
|
|
|
))
|
2019-07-16 05:30:04 +00:00
|
|
|
.arg_param(vec!["snapshot"])
|
2019-03-13 12:31:29 +00:00
|
|
|
.completion_cb("repository", complete_repository)
|
|
|
|
.completion_cb("snapshot", complete_group_or_snapshot);
|
2019-03-03 10:29:00 +00:00
|
|
|
|
2019-02-20 13:10:45 +00:00
|
|
|
let garbage_collect_cmd_def = CliCommand::new(
|
|
|
|
ApiMethod::new(
|
|
|
|
start_garbage_collection,
|
|
|
|
ObjectSchema::new("Start garbage collection for a specific repository.")
|
2019-07-16 05:30:04 +00:00
|
|
|
.optional("repository", REPO_URL_SCHEMA.clone())
|
2019-02-20 13:10:45 +00:00
|
|
|
))
|
2019-03-13 08:47:12 +00:00
|
|
|
.completion_cb("repository", complete_repository);
|
2019-02-20 13:10:45 +00:00
|
|
|
|
2019-03-06 09:50:46 +00:00
|
|
|
let restore_cmd_def = CliCommand::new(
|
|
|
|
ApiMethod::new(
|
|
|
|
restore,
|
|
|
|
ObjectSchema::new("Restore backup repository.")
|
2019-03-11 13:31:01 +00:00
|
|
|
.required("snapshot", StringSchema::new("Group/Snapshot path."))
|
|
|
|
.required("archive-name", StringSchema::new("Backup archive name."))
|
2019-07-05 10:14:50 +00:00
|
|
|
.required("target", StringSchema::new(r###"Target directory path. Use '-' to write to stdandard output.
|
|
|
|
|
|
|
|
We do not extraxt '.pxar' archives when writing to stdandard output.
|
|
|
|
|
|
|
|
"###
|
|
|
|
))
|
2019-07-29 10:49:15 +00:00
|
|
|
.optional(
|
|
|
|
"allow-existing-dirs",
|
|
|
|
BooleanSchema::new("Do not fail if directories already exists.").default(false))
|
2019-07-16 05:30:04 +00:00
|
|
|
.optional("repository", REPO_URL_SCHEMA.clone())
|
2019-07-03 13:45:12 +00:00
|
|
|
.optional("keyfile", StringSchema::new("Path to encryption key."))
|
|
|
|
.optional(
|
|
|
|
"verbose",
|
|
|
|
BooleanSchema::new("Verbose output.").default(false)
|
|
|
|
)
|
2019-03-06 09:50:46 +00:00
|
|
|
))
|
2019-07-16 05:30:04 +00:00
|
|
|
.arg_param(vec!["snapshot", "archive-name", "target"])
|
2019-03-13 12:31:29 +00:00
|
|
|
.completion_cb("repository", complete_repository)
|
2019-03-14 07:09:35 +00:00
|
|
|
.completion_cb("snapshot", complete_group_or_snapshot)
|
|
|
|
.completion_cb("archive-name", complete_archive_name)
|
|
|
|
.completion_cb("target", tools::complete_file_name);
|
2019-03-06 09:50:46 +00:00
|
|
|
|
2019-07-31 09:23:53 +00:00
|
|
|
let files_cmd_def = CliCommand::new(
|
|
|
|
ApiMethod::new(
|
|
|
|
list_snapshot_files,
|
|
|
|
ObjectSchema::new("List snapshot files.")
|
|
|
|
.required("snapshot", StringSchema::new("Snapshot path."))
|
2019-07-31 09:45:55 +00:00
|
|
|
.optional("repository", REPO_URL_SCHEMA.clone())
|
2019-07-31 09:23:53 +00:00
|
|
|
.optional("output-format", OUTPUT_FORMAT.clone())
|
|
|
|
))
|
|
|
|
.arg_param(vec!["snapshot"])
|
|
|
|
.completion_cb("repository", complete_repository)
|
|
|
|
.completion_cb("snapshot", complete_group_or_snapshot);
|
|
|
|
|
2019-02-27 15:53:17 +00:00
|
|
|
let prune_cmd_def = CliCommand::new(
|
|
|
|
ApiMethod::new(
|
|
|
|
prune,
|
|
|
|
proxmox_backup::api2::admin::datastore::add_common_prune_prameters(
|
|
|
|
ObjectSchema::new("Prune backup repository.")
|
2019-07-27 06:49:14 +00:00
|
|
|
.required("group", StringSchema::new("Backup group."))
|
2019-07-16 05:30:04 +00:00
|
|
|
.optional("repository", REPO_URL_SCHEMA.clone())
|
2019-02-27 15:53:17 +00:00
|
|
|
)
|
|
|
|
))
|
2019-07-27 06:49:14 +00:00
|
|
|
.arg_param(vec!["group"])
|
|
|
|
.completion_cb("group", complete_backup_group)
|
2019-03-13 08:47:12 +00:00
|
|
|
.completion_cb("repository", complete_repository);
|
2019-03-06 09:50:46 +00:00
|
|
|
|
2019-07-16 11:35:25 +00:00
|
|
|
let status_cmd_def = CliCommand::new(
|
|
|
|
ApiMethod::new(
|
|
|
|
status,
|
|
|
|
ObjectSchema::new("Get repository status.")
|
|
|
|
.optional("repository", REPO_URL_SCHEMA.clone())
|
|
|
|
.optional("output-format", OUTPUT_FORMAT.clone())
|
|
|
|
))
|
|
|
|
.completion_cb("repository", complete_repository);
|
|
|
|
|
2019-01-21 17:58:14 +00:00
|
|
|
let cmd_def = CliCommandMap::new()
|
2019-03-03 10:46:53 +00:00
|
|
|
.insert("backup".to_owned(), backup_cmd_def.into())
|
2019-07-25 10:18:15 +00:00
|
|
|
.insert("upload-log".to_owned(), upload_log_cmd_def.into())
|
2019-03-03 10:29:00 +00:00
|
|
|
.insert("forget".to_owned(), forget_cmd_def.into())
|
2019-02-20 13:10:45 +00:00
|
|
|
.insert("garbage-collect".to_owned(), garbage_collect_cmd_def.into())
|
2019-02-27 15:53:17 +00:00
|
|
|
.insert("list".to_owned(), list_cmd_def.into())
|
2019-03-02 15:20:50 +00:00
|
|
|
.insert("prune".to_owned(), prune_cmd_def.into())
|
2019-03-06 09:50:46 +00:00
|
|
|
.insert("restore".to_owned(), restore_cmd_def.into())
|
2019-06-17 08:33:24 +00:00
|
|
|
.insert("snapshots".to_owned(), snapshots_cmd_def.into())
|
2019-07-31 09:23:53 +00:00
|
|
|
.insert("files".to_owned(), files_cmd_def.into())
|
2019-07-16 11:35:25 +00:00
|
|
|
.insert("status".to_owned(), status_cmd_def.into())
|
2019-06-17 08:33:24 +00:00
|
|
|
.insert("key".to_owned(), key_mgmt_cli().into());
|
2018-12-14 12:39:41 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
hyper::rt::run(futures::future::lazy(move || {
|
|
|
|
run_cli_command(cmd_def.into());
|
|
|
|
Ok(())
|
|
|
|
}));
|
2019-03-12 13:39:51 +00:00
|
|
|
|
2018-12-14 07:28:56 +00:00
|
|
|
}
|