2019-07-24 05:48:59 +00:00
|
|
|
use std::collections::{HashSet, HashMap};
|
2020-07-08 11:52:17 +00:00
|
|
|
use std::convert::TryFrom;
|
|
|
|
use std::io::{self, Read, Write, Seek, SeekFrom};
|
|
|
|
use std::os::unix::io::{FromRawFd, RawFd};
|
2020-03-23 14:03:18 +00:00
|
|
|
use std::path::{Path, PathBuf};
|
|
|
|
use std::pin::Pin;
|
|
|
|
use std::sync::{Arc, Mutex};
|
2020-06-24 09:57:12 +00:00
|
|
|
use std::task::Context;
|
2020-03-23 14:03:18 +00:00
|
|
|
|
|
|
|
use anyhow::{bail, format_err, Error};
|
|
|
|
use futures::future::FutureExt;
|
|
|
|
use futures::stream::{StreamExt, TryStreamExt};
|
|
|
|
use serde_json::{json, Value};
|
|
|
|
use tokio::sync::mpsc;
|
2021-01-11 08:50:04 +00:00
|
|
|
use tokio_stream::wrappers::ReceiverStream;
|
2020-03-23 14:03:18 +00:00
|
|
|
use xdg::BaseDirectories;
|
2019-08-09 07:46:49 +00:00
|
|
|
|
2020-03-23 14:03:18 +00:00
|
|
|
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
|
2020-09-12 13:10:47 +00:00
|
|
|
use proxmox::{
|
|
|
|
tools::{
|
|
|
|
time::{strftime_local, epoch_i64},
|
|
|
|
fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size},
|
|
|
|
},
|
|
|
|
api::{
|
|
|
|
api,
|
|
|
|
ApiHandler,
|
|
|
|
ApiMethod,
|
|
|
|
RpcEnvironment,
|
|
|
|
schema::*,
|
|
|
|
cli::*,
|
|
|
|
},
|
|
|
|
};
|
2020-06-24 09:57:12 +00:00
|
|
|
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
2018-12-14 07:28:56 +00:00
|
|
|
|
2018-12-20 09:32:49 +00:00
|
|
|
use proxmox_backup::tools;
|
2020-11-06 13:44:44 +00:00
|
|
|
use proxmox_backup::api2::access::user::UserWithTokens;
|
2019-07-26 07:07:29 +00:00
|
|
|
use proxmox_backup::api2::types::*;
|
2020-07-10 07:34:07 +00:00
|
|
|
use proxmox_backup::api2::version;
|
2019-02-14 10:11:39 +00:00
|
|
|
use proxmox_backup::client::*;
|
2020-03-23 14:03:18 +00:00
|
|
|
use proxmox_backup::pxar::catalog::*;
|
2020-06-12 09:38:21 +00:00
|
|
|
use proxmox_backup::backup::{
|
|
|
|
archive_type,
|
2020-07-08 11:52:17 +00:00
|
|
|
decrypt_key,
|
2020-12-16 13:41:07 +00:00
|
|
|
rsa_encrypt_key_config,
|
2020-06-12 09:38:21 +00:00
|
|
|
verify_chunk_size,
|
|
|
|
ArchiveType,
|
2020-06-12 09:40:18 +00:00
|
|
|
AsyncReadChunk,
|
2020-06-12 09:38:21 +00:00
|
|
|
BackupDir,
|
|
|
|
BackupGroup,
|
|
|
|
BackupManifest,
|
|
|
|
BufferedDynamicReader,
|
2020-07-07 13:20:20 +00:00
|
|
|
CATALOG_NAME,
|
2020-06-12 09:38:21 +00:00
|
|
|
CatalogReader,
|
|
|
|
CatalogWriter,
|
|
|
|
ChunkStream,
|
|
|
|
CryptConfig,
|
2020-07-07 13:20:20 +00:00
|
|
|
CryptMode,
|
2020-06-12 09:38:21 +00:00
|
|
|
DynamicIndexReader,
|
2020-12-16 13:41:05 +00:00
|
|
|
ENCRYPTED_KEY_BLOB_NAME,
|
2020-06-12 09:38:21 +00:00
|
|
|
FixedChunkStream,
|
|
|
|
FixedIndexReader,
|
2020-12-16 13:41:07 +00:00
|
|
|
KeyConfig,
|
2020-06-12 09:38:21 +00:00
|
|
|
IndexFile,
|
|
|
|
MANIFEST_BLOB_NAME,
|
|
|
|
Shell,
|
|
|
|
};
|
2019-02-26 11:27:28 +00:00
|
|
|
|
2020-07-02 12:00:32 +00:00
|
|
|
mod proxmox_backup_client;
|
|
|
|
use proxmox_backup_client::*;
|
|
|
|
|
2020-02-11 10:01:57 +00:00
|
|
|
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
|
2020-02-11 10:10:13 +00:00
|
|
|
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
|
2020-02-11 10:01:57 +00:00
|
|
|
|
2019-02-13 11:30:52 +00:00
|
|
|
|
2020-07-02 12:00:32 +00:00
|
|
|
pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
|
2019-11-21 08:36:41 +00:00
|
|
|
.format(&BACKUP_REPO_URL)
|
|
|
|
.max_length(256)
|
|
|
|
.schema();
|
2019-03-13 08:47:12 +00:00
|
|
|
|
2021-02-05 15:35:32 +00:00
|
|
|
pub const KEYFILE_SCHEMA: Schema =
|
|
|
|
StringSchema::new("Path to encryption key. All data will be encrypted using this key.")
|
|
|
|
.schema();
|
|
|
|
|
|
|
|
pub const KEYFD_SCHEMA: Schema =
|
|
|
|
IntegerSchema::new("Pass an encryption key via an already opened file descriptor.")
|
|
|
|
.minimum(0)
|
|
|
|
.schema();
|
2019-12-16 12:34:49 +00:00
|
|
|
|
2021-02-05 15:35:32 +00:00
|
|
|
pub const MASTER_PUBKEY_FILE_SCHEMA: Schema = StringSchema::new(
|
|
|
|
"Path to master public key. The encryption key used for a backup will be encrypted using this key and appended to the backup.")
|
2020-07-08 11:52:17 +00:00
|
|
|
.schema();
|
|
|
|
|
2021-02-05 15:35:32 +00:00
|
|
|
pub const MASTER_PUBKEY_FD_SCHEMA: Schema =
|
|
|
|
IntegerSchema::new("Pass a master public key via an already opened file descriptor.")
|
|
|
|
.minimum(0)
|
|
|
|
.schema();
|
|
|
|
|
2019-12-16 12:34:49 +00:00
|
|
|
const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new(
|
|
|
|
"Chunk size in KB. Must be a power of 2.")
|
|
|
|
.minimum(64)
|
|
|
|
.maximum(4096)
|
|
|
|
.default(4096)
|
|
|
|
.schema();
|
|
|
|
|
2019-07-16 05:30:04 +00:00
|
|
|
fn get_default_repository() -> Option<String> {
|
|
|
|
std::env::var("PBS_REPOSITORY").ok()
|
|
|
|
}
|
|
|
|
|
2020-07-02 12:00:32 +00:00
|
|
|
pub fn extract_repository_from_value(
|
2019-07-16 05:30:04 +00:00
|
|
|
param: &Value,
|
|
|
|
) -> Result<BackupRepository, Error> {
|
|
|
|
|
|
|
|
let repo_url = param["repository"]
|
|
|
|
.as_str()
|
|
|
|
.map(String::from)
|
|
|
|
.or_else(get_default_repository)
|
|
|
|
.ok_or_else(|| format_err!("unable to get (default) repository"))?;
|
|
|
|
|
|
|
|
let repo: BackupRepository = repo_url.parse()?;
|
|
|
|
|
|
|
|
Ok(repo)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn extract_repository_from_map(
|
|
|
|
param: &HashMap<String, String>,
|
|
|
|
) -> Option<BackupRepository> {
|
|
|
|
|
|
|
|
param.get("repository")
|
|
|
|
.map(String::from)
|
|
|
|
.or_else(get_default_repository)
|
|
|
|
.and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
|
|
|
|
}
|
|
|
|
|
2019-03-13 08:47:12 +00:00
|
|
|
fn record_repository(repo: &BackupRepository) {
|
|
|
|
|
|
|
|
let base = match BaseDirectories::with_prefix("proxmox-backup") {
|
|
|
|
Ok(v) => v,
|
|
|
|
_ => return,
|
|
|
|
};
|
|
|
|
|
|
|
|
// usually $HOME/.cache/proxmox-backup/repo-list
|
|
|
|
let path = match base.place_cache_file("repo-list") {
|
|
|
|
Ok(v) => v,
|
|
|
|
_ => return,
|
|
|
|
};
|
|
|
|
|
2019-10-25 16:04:37 +00:00
|
|
|
let mut data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
|
2019-03-13 08:47:12 +00:00
|
|
|
|
|
|
|
let repo = repo.to_string();
|
|
|
|
|
|
|
|
data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 };
|
|
|
|
|
|
|
|
let mut map = serde_json::map::Map::new();
|
|
|
|
|
|
|
|
loop {
|
|
|
|
let mut max_used = 0;
|
|
|
|
let mut max_repo = None;
|
|
|
|
for (repo, count) in data.as_object().unwrap() {
|
|
|
|
if map.contains_key(repo) { continue; }
|
|
|
|
if let Some(count) = count.as_i64() {
|
|
|
|
if count > max_used {
|
|
|
|
max_used = count;
|
|
|
|
max_repo = Some(repo);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if let Some(repo) = max_repo {
|
|
|
|
map.insert(repo.to_owned(), json!(max_used));
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if map.len() > 10 { // store max. 10 repos
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let new_data = json!(map);
|
|
|
|
|
2019-12-18 10:05:30 +00:00
|
|
|
let _ = replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new());
|
2019-03-13 08:47:12 +00:00
|
|
|
}
|
|
|
|
|
2020-07-02 15:49:08 +00:00
|
|
|
pub fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
2019-03-13 08:47:12 +00:00
|
|
|
|
|
|
|
let mut result = vec![];
|
|
|
|
|
|
|
|
let base = match BaseDirectories::with_prefix("proxmox-backup") {
|
|
|
|
Ok(v) => v,
|
|
|
|
_ => return result,
|
|
|
|
};
|
|
|
|
|
|
|
|
// usually $HOME/.cache/proxmox-backup/repo-list
|
|
|
|
let path = match base.place_cache_file("repo-list") {
|
|
|
|
Ok(v) => v,
|
|
|
|
_ => return result,
|
|
|
|
};
|
|
|
|
|
2019-10-25 16:04:37 +00:00
|
|
|
let data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
|
2019-03-13 08:47:12 +00:00
|
|
|
|
|
|
|
if let Some(map) = data.as_object() {
|
2019-03-13 11:26:01 +00:00
|
|
|
for (repo, _count) in map {
|
2019-03-13 08:47:12 +00:00
|
|
|
result.push(repo.to_owned());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
result
|
|
|
|
}
|
|
|
|
|
2020-11-10 10:54:50 +00:00
|
|
|
fn connect(repo: &BackupRepository) -> Result<HttpClient, Error> {
|
|
|
|
connect_do(repo.host(), repo.port(), repo.auth_id())
|
|
|
|
.map_err(|err| format_err!("error building client for repository {} - {}", repo, err))
|
|
|
|
}
|
2020-01-25 11:18:00 +00:00
|
|
|
|
2020-11-10 10:54:50 +00:00
|
|
|
fn connect_do(server: &str, port: u16, auth_id: &Authid) -> Result<HttpClient, Error> {
|
2020-02-11 10:01:57 +00:00
|
|
|
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
|
|
|
|
|
2020-02-11 10:10:13 +00:00
|
|
|
use std::env::VarError::*;
|
|
|
|
let password = match std::env::var(ENV_VAR_PBS_PASSWORD) {
|
|
|
|
Ok(p) => Some(p),
|
|
|
|
Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", ENV_VAR_PBS_PASSWORD)),
|
|
|
|
Err(NotPresent) => None,
|
|
|
|
};
|
|
|
|
|
2021-01-25 13:42:57 +00:00
|
|
|
let options = HttpClientOptions::new_interactive(password, fingerprint);
|
2020-01-25 11:18:00 +00:00
|
|
|
|
2020-10-08 13:19:39 +00:00
|
|
|
HttpClient::new(server, port, auth_id, options)
|
2020-01-25 11:18:00 +00:00
|
|
|
}
|
|
|
|
|
2020-01-07 14:07:57 +00:00
|
|
|
async fn api_datastore_list_snapshots(
|
|
|
|
client: &HttpClient,
|
|
|
|
store: &str,
|
|
|
|
group: Option<BackupGroup>,
|
2020-02-27 10:27:44 +00:00
|
|
|
) -> Result<Value, Error> {
|
2020-01-07 14:07:57 +00:00
|
|
|
|
|
|
|
let path = format!("api2/json/admin/datastore/{}/snapshots", store);
|
|
|
|
|
|
|
|
let mut args = json!({});
|
|
|
|
if let Some(group) = group {
|
|
|
|
args["backup-type"] = group.backup_type().into();
|
|
|
|
args["backup-id"] = group.backup_id().into();
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut result = client.get(&path, Some(args)).await?;
|
|
|
|
|
2020-02-27 10:27:44 +00:00
|
|
|
Ok(result["data"].take())
|
2020-01-07 14:07:57 +00:00
|
|
|
}
|
|
|
|
|
2020-07-02 15:49:08 +00:00
|
|
|
pub async fn api_datastore_latest_snapshot(
|
2020-01-07 14:18:36 +00:00
|
|
|
client: &HttpClient,
|
|
|
|
store: &str,
|
|
|
|
group: BackupGroup,
|
2020-09-12 13:10:47 +00:00
|
|
|
) -> Result<(String, String, i64), Error> {
|
2020-01-07 14:18:36 +00:00
|
|
|
|
2020-02-27 10:27:44 +00:00
|
|
|
let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
|
|
|
|
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
|
2020-01-07 14:18:36 +00:00
|
|
|
|
|
|
|
if list.is_empty() {
|
|
|
|
bail!("backup group {:?} does not contain any snapshots.", group.group_path());
|
|
|
|
}
|
|
|
|
|
|
|
|
list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
|
|
|
|
|
2020-09-12 13:10:47 +00:00
|
|
|
let backup_time = list[0].backup_time;
|
2020-01-07 14:18:36 +00:00
|
|
|
|
|
|
|
Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
|
|
|
|
}
|
|
|
|
|
2019-08-28 15:20:32 +00:00
|
|
|
async fn backup_directory<P: AsRef<Path>>(
|
2019-10-12 11:53:11 +00:00
|
|
|
client: &BackupWriter,
|
2019-03-01 08:35:41 +00:00
|
|
|
dir_path: P,
|
2019-02-19 14:19:12 +00:00
|
|
|
archive_name: &str,
|
2019-05-30 11:28:24 +00:00
|
|
|
chunk_size: Option<usize>,
|
2020-01-22 11:49:08 +00:00
|
|
|
catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
2021-01-25 13:42:54 +00:00
|
|
|
pxar_create_options: proxmox_backup::pxar::PxarCreateOptions,
|
2021-01-25 13:42:52 +00:00
|
|
|
upload_options: UploadOptions,
|
2019-08-01 10:39:02 +00:00
|
|
|
) -> Result<BackupStats, Error> {
|
2019-02-13 11:30:52 +00:00
|
|
|
|
2020-01-10 11:50:06 +00:00
|
|
|
let pxar_stream = PxarBackupStream::open(
|
|
|
|
dir_path.as_ref(),
|
|
|
|
catalog,
|
2021-01-25 13:42:54 +00:00
|
|
|
pxar_create_options,
|
2020-01-10 11:50:06 +00:00
|
|
|
)?;
|
2019-08-28 15:20:32 +00:00
|
|
|
let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
|
2019-01-18 11:01:37 +00:00
|
|
|
|
2020-12-04 10:53:34 +00:00
|
|
|
let (tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
|
2019-01-02 10:02:56 +00:00
|
|
|
|
2021-01-11 08:50:04 +00:00
|
|
|
let stream = ReceiverStream::new(rx)
|
2019-08-28 15:20:32 +00:00
|
|
|
.map_err(Error::from);
|
2019-03-01 08:35:41 +00:00
|
|
|
|
2019-05-28 08:12:44 +00:00
|
|
|
// spawn chunker inside a separate task so that it can run parallel
|
2019-08-28 15:20:32 +00:00
|
|
|
tokio::spawn(async move {
|
2019-12-12 14:27:07 +00:00
|
|
|
while let Some(v) = chunk_stream.next().await {
|
|
|
|
let _ = tx.send(v).await;
|
|
|
|
}
|
2019-08-28 15:20:32 +00:00
|
|
|
});
|
2019-03-01 08:35:41 +00:00
|
|
|
|
2021-01-25 13:42:52 +00:00
|
|
|
if upload_options.fixed_size.is_some() {
|
|
|
|
bail!("cannot backup directory with fixed chunk size!");
|
|
|
|
}
|
|
|
|
|
2019-08-28 15:20:32 +00:00
|
|
|
let stats = client
|
2021-01-25 13:42:52 +00:00
|
|
|
.upload_stream(archive_name, stream, upload_options)
|
2019-08-28 15:20:32 +00:00
|
|
|
.await?;
|
2018-12-27 09:11:11 +00:00
|
|
|
|
2019-08-01 10:39:02 +00:00
|
|
|
Ok(stats)
|
2018-12-27 09:11:11 +00:00
|
|
|
}
|
|
|
|
|
2019-08-28 15:20:32 +00:00
|
|
|
async fn backup_image<P: AsRef<Path>>(
|
2019-10-12 11:53:11 +00:00
|
|
|
client: &BackupWriter,
|
2019-05-30 10:46:01 +00:00
|
|
|
image_path: P,
|
|
|
|
archive_name: &str,
|
2019-05-30 11:28:24 +00:00
|
|
|
chunk_size: Option<usize>,
|
2021-01-25 13:42:52 +00:00
|
|
|
upload_options: UploadOptions,
|
2019-08-01 10:39:02 +00:00
|
|
|
) -> Result<BackupStats, Error> {
|
2019-05-30 10:46:01 +00:00
|
|
|
|
|
|
|
let path = image_path.as_ref().to_owned();
|
|
|
|
|
2019-08-28 15:20:32 +00:00
|
|
|
let file = tokio::fs::File::open(path).await?;
|
2019-05-30 10:46:01 +00:00
|
|
|
|
2019-12-12 14:27:07 +00:00
|
|
|
let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
2019-05-30 10:46:01 +00:00
|
|
|
.map_err(Error::from);
|
|
|
|
|
2019-05-30 11:28:24 +00:00
|
|
|
let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
|
2019-05-30 10:46:01 +00:00
|
|
|
|
2021-01-25 13:42:52 +00:00
|
|
|
if upload_options.fixed_size.is_none() {
|
|
|
|
bail!("cannot backup image with dynamic chunk size!");
|
|
|
|
}
|
|
|
|
|
2019-08-28 15:20:32 +00:00
|
|
|
let stats = client
|
2021-01-25 13:42:52 +00:00
|
|
|
.upload_stream(archive_name, stream, upload_options)
|
2019-08-28 15:20:32 +00:00
|
|
|
.await?;
|
2019-05-30 10:46:01 +00:00
|
|
|
|
2019-08-01 10:39:02 +00:00
|
|
|
Ok(stats)
|
2019-05-30 10:46:01 +00:00
|
|
|
}
|
|
|
|
|
2019-12-16 12:34:49 +00:00
|
|
|
#[api(
|
|
|
|
input: {
|
|
|
|
properties: {
|
|
|
|
repository: {
|
|
|
|
schema: REPO_URL_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
"output-format": {
|
|
|
|
schema: OUTPUT_FORMAT,
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
)]
|
|
|
|
/// List backup groups.
|
|
|
|
async fn list_backup_groups(param: Value) -> Result<Value, Error> {
|
2019-03-02 10:29:05 +00:00
|
|
|
|
2020-02-27 12:31:23 +00:00
|
|
|
let output_format = get_output_format(¶m);
|
|
|
|
|
2019-07-16 05:30:04 +00:00
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
2019-03-02 10:29:05 +00:00
|
|
|
|
2020-11-10 10:54:50 +00:00
|
|
|
let client = connect(&repo)?;
|
2019-03-02 10:29:05 +00:00
|
|
|
|
2019-03-13 08:47:12 +00:00
|
|
|
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
|
2019-03-02 10:29:05 +00:00
|
|
|
|
2019-12-16 09:06:26 +00:00
|
|
|
let mut result = client.get(&path, None).await?;
|
2019-03-02 10:29:05 +00:00
|
|
|
|
2019-03-13 08:47:12 +00:00
|
|
|
record_repository(&repo);
|
|
|
|
|
2020-02-27 12:31:23 +00:00
|
|
|
let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
|
|
|
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
|
|
|
|
let group = BackupGroup::new(item.backup_type, item.backup_id);
|
|
|
|
Ok(group.group_path().to_str().unwrap().to_owned())
|
|
|
|
};
|
2019-03-02 10:29:05 +00:00
|
|
|
|
2020-02-28 08:09:22 +00:00
|
|
|
let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
|
|
|
|
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
|
2020-09-11 12:34:38 +00:00
|
|
|
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup)?;
|
2020-02-28 08:09:22 +00:00
|
|
|
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
2020-02-27 12:31:23 +00:00
|
|
|
};
|
2019-03-02 10:29:05 +00:00
|
|
|
|
2020-02-27 12:31:23 +00:00
|
|
|
let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
|
|
|
|
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
|
2020-02-28 06:30:35 +00:00
|
|
|
Ok(tools::format::render_backup_file_list(&item.files))
|
2020-02-27 12:31:23 +00:00
|
|
|
};
|
2019-03-02 10:29:05 +00:00
|
|
|
|
2020-02-27 12:31:23 +00:00
|
|
|
let options = default_table_format_options()
|
|
|
|
.sortby("backup-type", false)
|
|
|
|
.sortby("backup-id", false)
|
|
|
|
.column(ColumnConfig::new("backup-id").renderer(render_group_path).header("group"))
|
2020-02-28 08:09:22 +00:00
|
|
|
.column(
|
|
|
|
ColumnConfig::new("last-backup")
|
|
|
|
.renderer(render_last_backup)
|
|
|
|
.header("last snapshot")
|
|
|
|
.right_align(false)
|
|
|
|
)
|
2020-02-27 12:31:23 +00:00
|
|
|
.column(ColumnConfig::new("backup-count"))
|
|
|
|
.column(ColumnConfig::new("files").renderer(render_files));
|
2019-03-02 15:28:36 +00:00
|
|
|
|
2020-02-27 12:31:23 +00:00
|
|
|
let mut data: Value = result["data"].take();
|
2019-03-02 15:28:36 +00:00
|
|
|
|
2020-12-18 11:26:07 +00:00
|
|
|
let return_type = &proxmox_backup::api2::admin::datastore::API_METHOD_LIST_GROUPS.returns;
|
2019-03-02 10:29:05 +00:00
|
|
|
|
2020-12-18 11:26:07 +00:00
|
|
|
format_and_print_result_full(&mut data, return_type, &output_format, &options);
|
2019-07-16 11:35:25 +00:00
|
|
|
|
2019-03-02 10:29:05 +00:00
|
|
|
Ok(Value::Null)
|
|
|
|
}
|
|
|
|
|
2020-10-13 08:58:41 +00:00
|
|
|
#[api(
|
|
|
|
input: {
|
|
|
|
properties: {
|
|
|
|
repository: {
|
|
|
|
schema: REPO_URL_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
group: {
|
|
|
|
type: String,
|
|
|
|
description: "Backup group.",
|
|
|
|
},
|
|
|
|
"new-owner": {
|
2020-10-23 11:33:21 +00:00
|
|
|
type: Authid,
|
2020-10-13 08:58:41 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
)]
|
|
|
|
/// Change owner of a backup group
|
|
|
|
async fn change_backup_owner(group: String, mut param: Value) -> Result<(), Error> {
|
|
|
|
|
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
|
|
|
|
2020-11-10 10:54:50 +00:00
|
|
|
let mut client = connect(&repo)?;
|
2020-10-13 08:58:41 +00:00
|
|
|
|
|
|
|
param.as_object_mut().unwrap().remove("repository");
|
|
|
|
|
|
|
|
let group: BackupGroup = group.parse()?;
|
|
|
|
|
|
|
|
param["backup-type"] = group.backup_type().into();
|
|
|
|
param["backup-id"] = group.backup_id().into();
|
|
|
|
|
|
|
|
let path = format!("api2/json/admin/datastore/{}/change-owner", repo.store());
|
|
|
|
client.post(&path, Some(param)).await?;
|
|
|
|
|
|
|
|
record_repository(&repo);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-12-16 12:34:49 +00:00
|
|
|
#[api(
|
|
|
|
input: {
|
|
|
|
properties: {
|
|
|
|
repository: {
|
|
|
|
schema: REPO_URL_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
)]
|
|
|
|
/// Try to login. If successful, store ticket.
|
|
|
|
async fn api_login(param: Value) -> Result<Value, Error> {
|
2019-08-10 07:12:17 +00:00
|
|
|
|
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
|
|
|
|
2020-11-10 10:54:50 +00:00
|
|
|
let client = connect(&repo)?;
|
2019-12-16 09:06:26 +00:00
|
|
|
client.login().await?;
|
2019-08-10 07:12:17 +00:00
|
|
|
|
|
|
|
record_repository(&repo);
|
|
|
|
|
|
|
|
Ok(Value::Null)
|
|
|
|
}
|
|
|
|
|
2019-12-16 12:34:49 +00:00
|
|
|
#[api(
|
|
|
|
input: {
|
|
|
|
properties: {
|
|
|
|
repository: {
|
|
|
|
schema: REPO_URL_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
)]
|
|
|
|
/// Logout (delete stored ticket).
|
|
|
|
fn api_logout(param: Value) -> Result<Value, Error> {
|
2019-08-10 07:12:17 +00:00
|
|
|
|
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
|
|
|
|
2020-01-27 08:34:02 +00:00
|
|
|
delete_ticket_info("proxmox-backup", repo.host(), repo.user())?;
|
2019-08-10 07:12:17 +00:00
|
|
|
|
|
|
|
Ok(Value::Null)
|
|
|
|
}
|
|
|
|
|
2020-07-10 07:34:07 +00:00
|
|
|
#[api(
|
|
|
|
input: {
|
|
|
|
properties: {
|
|
|
|
repository: {
|
|
|
|
schema: REPO_URL_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
"output-format": {
|
|
|
|
schema: OUTPUT_FORMAT,
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
)]
|
|
|
|
/// Show client and optional server version
|
|
|
|
async fn api_version(param: Value) -> Result<(), Error> {
|
|
|
|
|
|
|
|
let output_format = get_output_format(¶m);
|
|
|
|
|
|
|
|
let mut version_info = json!({
|
|
|
|
"client": {
|
|
|
|
"version": version::PROXMOX_PKG_VERSION,
|
|
|
|
"release": version::PROXMOX_PKG_RELEASE,
|
|
|
|
"repoid": version::PROXMOX_PKG_REPOID,
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
let repo = extract_repository_from_value(¶m);
|
|
|
|
if let Ok(repo) = repo {
|
2020-11-10 10:54:50 +00:00
|
|
|
let client = connect(&repo)?;
|
2020-07-10 07:34:07 +00:00
|
|
|
|
|
|
|
match client.get("api2/json/version", None).await {
|
|
|
|
Ok(mut result) => version_info["server"] = result["data"].take(),
|
|
|
|
Err(e) => eprintln!("could not connect to server - {}", e),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if output_format == "text" {
|
|
|
|
println!("client version: {}.{}", version::PROXMOX_PKG_VERSION, version::PROXMOX_PKG_RELEASE);
|
|
|
|
if let Some(server) = version_info["server"].as_object() {
|
|
|
|
let server_version = server["version"].as_str().unwrap();
|
|
|
|
let server_release = server["release"].as_str().unwrap();
|
|
|
|
println!("server version: {}.{}", server_version, server_release);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
format_and_print_result(&version_info, &output_format);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-12-16 12:34:49 +00:00
|
|
|
#[api(
|
2019-12-19 06:57:53 +00:00
|
|
|
input: {
|
2019-12-16 12:34:49 +00:00
|
|
|
properties: {
|
|
|
|
repository: {
|
|
|
|
schema: REPO_URL_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
2019-12-19 06:57:53 +00:00
|
|
|
"output-format": {
|
|
|
|
schema: OUTPUT_FORMAT,
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-12-16 12:34:49 +00:00
|
|
|
)]
|
|
|
|
/// Start garbage collection for a specific repository.
|
|
|
|
async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
|
2019-02-20 13:10:45 +00:00
|
|
|
|
2019-07-16 05:30:04 +00:00
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
2020-02-27 11:41:15 +00:00
|
|
|
|
|
|
|
let output_format = get_output_format(¶m);
|
2019-02-20 13:10:45 +00:00
|
|
|
|
2020-11-10 10:54:50 +00:00
|
|
|
let mut client = connect(&repo)?;
|
2019-02-20 13:10:45 +00:00
|
|
|
|
2019-03-13 08:47:12 +00:00
|
|
|
let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
|
2019-02-20 13:10:45 +00:00
|
|
|
|
2019-12-16 09:06:26 +00:00
|
|
|
let result = client.post(&path, None).await?;
|
2019-02-20 13:10:45 +00:00
|
|
|
|
2019-12-16 09:06:26 +00:00
|
|
|
record_repository(&repo);
|
2019-03-13 08:47:12 +00:00
|
|
|
|
2021-01-29 10:21:57 +00:00
|
|
|
view_task_result(&mut client, result, &output_format).await?;
|
2019-12-09 12:29:23 +00:00
|
|
|
|
|
|
|
Ok(Value::Null)
|
2019-02-20 13:10:45 +00:00
|
|
|
}
|
2019-02-13 11:30:52 +00:00
|
|
|
|
2021-01-25 13:42:48 +00:00
|
|
|
struct CatalogUploadResult {
|
|
|
|
catalog_writer: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
|
|
|
result: tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>,
|
|
|
|
}
|
|
|
|
|
2019-11-08 09:35:48 +00:00
|
|
|
fn spawn_catalog_upload(
|
2020-07-08 08:42:20 +00:00
|
|
|
client: Arc<BackupWriter>,
|
2020-07-08 12:06:50 +00:00
|
|
|
encrypt: bool,
|
2021-01-25 13:42:48 +00:00
|
|
|
) -> Result<CatalogUploadResult, Error> {
|
2020-01-22 11:49:08 +00:00
|
|
|
let (catalog_tx, catalog_rx) = std::sync::mpsc::sync_channel(10); // allow to buffer 10 writes
|
|
|
|
let catalog_stream = crate::tools::StdChannelStream(catalog_rx);
|
2019-11-08 09:35:48 +00:00
|
|
|
let catalog_chunk_size = 512*1024;
|
|
|
|
let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size));
|
|
|
|
|
2021-01-25 13:42:48 +00:00
|
|
|
let catalog_writer = Arc::new(Mutex::new(CatalogWriter::new(crate::tools::StdChannelWriter::new(catalog_tx))?));
|
2019-11-08 09:35:48 +00:00
|
|
|
|
|
|
|
let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel();
|
|
|
|
|
2021-01-25 13:42:52 +00:00
|
|
|
let upload_options = UploadOptions {
|
|
|
|
encrypt,
|
|
|
|
compress: true,
|
|
|
|
..UploadOptions::default()
|
|
|
|
};
|
|
|
|
|
2019-11-08 09:35:48 +00:00
|
|
|
tokio::spawn(async move {
|
|
|
|
let catalog_upload_result = client
|
2021-01-25 13:42:52 +00:00
|
|
|
.upload_stream(CATALOG_NAME, catalog_chunk_stream, upload_options)
|
2019-11-08 09:35:48 +00:00
|
|
|
.await;
|
|
|
|
|
|
|
|
if let Err(ref err) = catalog_upload_result {
|
|
|
|
eprintln!("catalog upload error - {}", err);
|
|
|
|
client.cancel();
|
|
|
|
}
|
|
|
|
|
|
|
|
let _ = catalog_result_tx.send(catalog_upload_result);
|
|
|
|
});
|
|
|
|
|
2021-01-25 13:42:48 +00:00
|
|
|
Ok(CatalogUploadResult { catalog_writer, result: catalog_result_rx })
|
2019-11-08 09:35:48 +00:00
|
|
|
}
|
|
|
|
|
2021-02-05 15:35:31 +00:00
|
|
|
#[derive(Debug, Eq, PartialEq)]
|
|
|
|
struct CryptoParams {
|
|
|
|
mode: CryptMode,
|
|
|
|
enc_key: Option<Vec<u8>>,
|
2021-02-05 15:35:32 +00:00
|
|
|
// FIXME switch to openssl::rsa::rsa<openssl::pkey::Public> once that is Eq?
|
|
|
|
master_pubkey: Option<Vec<u8>>,
|
2021-02-05 15:35:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn crypto_parameters(param: &Value) -> Result<CryptoParams, Error> {
|
2020-07-07 13:20:20 +00:00
|
|
|
let keyfile = match param.get("keyfile") {
|
|
|
|
Some(Value::String(keyfile)) => Some(keyfile),
|
|
|
|
Some(_) => bail!("bad --keyfile parameter type"),
|
|
|
|
None => None,
|
|
|
|
};
|
|
|
|
|
2020-07-08 11:52:17 +00:00
|
|
|
let key_fd = match param.get("keyfd") {
|
|
|
|
Some(Value::Number(key_fd)) => Some(
|
|
|
|
RawFd::try_from(key_fd
|
|
|
|
.as_i64()
|
|
|
|
.ok_or_else(|| format_err!("bad key fd: {:?}", key_fd))?
|
|
|
|
)
|
|
|
|
.map_err(|err| format_err!("bad key fd: {:?}: {}", key_fd, err))?
|
|
|
|
),
|
|
|
|
Some(_) => bail!("bad --keyfd parameter type"),
|
|
|
|
None => None,
|
|
|
|
};
|
|
|
|
|
2021-02-05 15:35:32 +00:00
|
|
|
let master_pubkey_file = match param.get("master-pubkey-file") {
|
|
|
|
Some(Value::String(keyfile)) => Some(keyfile),
|
|
|
|
Some(_) => bail!("bad --master-pubkey-file parameter type"),
|
|
|
|
None => None,
|
|
|
|
};
|
|
|
|
|
|
|
|
let master_pubkey_fd = match param.get("master-pubkey-fd") {
|
|
|
|
Some(Value::Number(key_fd)) => Some(
|
|
|
|
RawFd::try_from(key_fd
|
|
|
|
.as_i64()
|
|
|
|
.ok_or_else(|| format_err!("bad master public key fd: {:?}", key_fd))?
|
|
|
|
)
|
|
|
|
.map_err(|err| format_err!("bad public master key fd: {:?}: {}", key_fd, err))?
|
|
|
|
),
|
|
|
|
Some(_) => bail!("bad --master-pubkey-fd parameter type"),
|
|
|
|
None => None,
|
|
|
|
};
|
|
|
|
|
2021-02-05 15:35:31 +00:00
|
|
|
let mode: Option<CryptMode> = match param.get("crypt-mode") {
|
2020-07-07 13:20:20 +00:00
|
|
|
Some(mode) => Some(serde_json::from_value(mode.clone())?),
|
|
|
|
None => None,
|
|
|
|
};
|
|
|
|
|
2020-07-08 11:52:17 +00:00
|
|
|
let keydata = match (keyfile, key_fd) {
|
|
|
|
(None, None) => None,
|
|
|
|
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
|
2020-11-11 15:33:22 +00:00
|
|
|
(Some(keyfile), None) => {
|
2020-11-25 13:28:51 +00:00
|
|
|
eprintln!("Using encryption key file: {}", keyfile);
|
2020-11-11 15:33:22 +00:00
|
|
|
Some(file_get_contents(keyfile)?)
|
|
|
|
},
|
2020-07-08 11:52:17 +00:00
|
|
|
(None, Some(fd)) => {
|
|
|
|
let input = unsafe { std::fs::File::from_raw_fd(fd) };
|
|
|
|
let mut data = Vec::new();
|
|
|
|
let _len: usize = { input }.read_to_end(&mut data)
|
|
|
|
.map_err(|err| {
|
|
|
|
format_err!("error reading encryption key from fd {}: {}", fd, err)
|
|
|
|
})?;
|
2020-11-25 13:28:51 +00:00
|
|
|
eprintln!("Using encryption key from file descriptor");
|
2020-07-08 11:52:17 +00:00
|
|
|
Some(data)
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-02-05 15:35:32 +00:00
|
|
|
let master_pubkey_data = match (master_pubkey_file, master_pubkey_fd) {
|
|
|
|
(None, None) => None,
|
|
|
|
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
|
|
|
|
(Some(keyfile), None) => {
|
|
|
|
eprintln!("Using master key from file: {}", keyfile);
|
|
|
|
Some(file_get_contents(keyfile)?)
|
|
|
|
},
|
|
|
|
(None, Some(fd)) => {
|
|
|
|
let input = unsafe { std::fs::File::from_raw_fd(fd) };
|
|
|
|
let mut data = Vec::new();
|
|
|
|
let _len: usize = { input }.read_to_end(&mut data)
|
|
|
|
.map_err(|err| {
|
|
|
|
format_err!("error reading master key from fd {}: {}", fd, err)
|
|
|
|
})?;
|
|
|
|
eprintln!("Using master key from file descriptor");
|
|
|
|
Some(data)
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
Ok(match (keydata, master_pubkey_data, mode) {
|
2020-07-06 12:35:28 +00:00
|
|
|
// no parameters:
|
2021-02-05 15:35:32 +00:00
|
|
|
(None, None, None) => match key::read_optional_default_encryption_key()? {
|
|
|
|
None => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
|
2021-02-05 15:35:31 +00:00
|
|
|
enc_key => {
|
2020-11-25 13:28:51 +00:00
|
|
|
eprintln!("Encrypting with default encryption key!");
|
2021-02-05 15:35:32 +00:00
|
|
|
let master_pubkey = key::read_optional_default_master_pubkey()?;
|
|
|
|
CryptoParams {
|
|
|
|
mode: CryptMode::Encrypt,
|
|
|
|
enc_key,
|
|
|
|
master_pubkey,
|
|
|
|
}
|
2020-11-11 15:33:21 +00:00
|
|
|
},
|
2020-07-08 08:56:16 +00:00
|
|
|
},
|
2020-07-06 12:35:28 +00:00
|
|
|
|
2020-07-07 13:20:20 +00:00
|
|
|
// just --crypt-mode=none
|
2021-02-05 15:35:32 +00:00
|
|
|
(None, None, Some(CryptMode::None)) => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
|
|
|
|
|
|
|
|
// --keyfile and --crypt-mode=none
|
|
|
|
(Some(_), _, Some(CryptMode::None)) => {
|
|
|
|
bail!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive");
|
|
|
|
},
|
|
|
|
|
|
|
|
// --master-pubkey-file and --crypt-mode=none
|
|
|
|
(_, Some(_), Some(CryptMode::None)) => {
|
|
|
|
bail!("--master-pubkey-file/--master-pubkey-fd and --crypt-mode=none are mutually exclusive");
|
|
|
|
},
|
2020-07-06 12:35:28 +00:00
|
|
|
|
2021-02-05 15:35:32 +00:00
|
|
|
// --master-pubkey-file and nothing else
|
|
|
|
(None, master_pubkey, None) => {
|
|
|
|
match key::read_optional_default_encryption_key()? {
|
|
|
|
None => bail!("--master-pubkey-file/--master-pubkey-fd specified, but no key available"),
|
|
|
|
enc_key => {
|
|
|
|
eprintln!("Encrypting with default encryption key!");
|
|
|
|
CryptoParams {
|
|
|
|
mode: CryptMode::Encrypt,
|
|
|
|
enc_key,
|
|
|
|
master_pubkey,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
}
|
|
|
|
},
|
|
|
|
|
|
|
|
// --crypt-mode other than none, without keyfile, with or without master key
|
|
|
|
(None, master_pubkey, Some(mode)) => match key::read_optional_default_encryption_key()? {
|
2020-07-07 13:20:20 +00:00
|
|
|
None => bail!("--crypt-mode without --keyfile and no default key file available"),
|
2021-02-05 15:35:31 +00:00
|
|
|
enc_key => {
|
2020-11-25 13:28:51 +00:00
|
|
|
eprintln!("Encrypting with default encryption key!");
|
2021-02-05 15:35:32 +00:00
|
|
|
let master_pubkey = match master_pubkey {
|
|
|
|
None => key::read_optional_default_master_pubkey()?,
|
|
|
|
master_pubkey => master_pubkey,
|
|
|
|
};
|
2021-02-05 15:35:31 +00:00
|
|
|
|
2021-02-05 15:35:32 +00:00
|
|
|
CryptoParams {
|
|
|
|
mode,
|
|
|
|
enc_key,
|
|
|
|
master_pubkey,
|
|
|
|
}
|
2020-11-11 15:33:22 +00:00
|
|
|
},
|
2020-07-06 12:35:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// just --keyfile
|
2021-02-05 15:35:32 +00:00
|
|
|
(enc_key, master_pubkey, None) => {
|
|
|
|
let master_pubkey = match master_pubkey {
|
|
|
|
None => key::read_optional_default_master_pubkey()?,
|
|
|
|
master_pubkey => master_pubkey,
|
|
|
|
};
|
2020-07-06 12:35:28 +00:00
|
|
|
|
2021-02-05 15:35:32 +00:00
|
|
|
CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey }
|
|
|
|
},
|
2020-07-06 12:35:28 +00:00
|
|
|
|
2020-07-07 13:20:20 +00:00
|
|
|
// --keyfile and --crypt-mode other than none
|
2021-02-05 15:35:32 +00:00
|
|
|
(enc_key, master_pubkey, Some(mode)) => {
|
|
|
|
let master_pubkey = match master_pubkey {
|
|
|
|
None => key::read_optional_default_master_pubkey()?,
|
|
|
|
master_pubkey => master_pubkey,
|
|
|
|
};
|
|
|
|
|
|
|
|
CryptoParams { mode, enc_key, master_pubkey }
|
|
|
|
},
|
2020-07-06 12:35:28 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-02-05 15:35:30 +00:00
|
|
|
#[test]
|
2021-02-05 15:35:31 +00:00
|
|
|
// WARNING: there must only be one test for crypto_parameters as the default key handling is not
|
2021-02-05 15:35:30 +00:00
|
|
|
// safe w.r.t. concurrency
|
2021-02-05 15:35:31 +00:00
|
|
|
fn test_crypto_parameters_handling() -> Result<(), Error> {
|
2021-02-05 15:35:30 +00:00
|
|
|
let some_key = Some(vec![1;1]);
|
|
|
|
let default_key = Some(vec![2;1]);
|
|
|
|
|
2021-02-05 15:35:33 +00:00
|
|
|
let some_master_key = Some(vec![3;1]);
|
|
|
|
let default_master_key = Some(vec![4;1]);
|
|
|
|
|
2021-02-05 15:35:32 +00:00
|
|
|
let no_key_res = CryptoParams {
|
|
|
|
enc_key: None,
|
|
|
|
master_pubkey: None,
|
|
|
|
mode: CryptMode::None,
|
|
|
|
};
|
|
|
|
let some_key_res = CryptoParams {
|
|
|
|
enc_key: some_key.clone(),
|
|
|
|
master_pubkey: None,
|
|
|
|
mode: CryptMode::Encrypt,
|
|
|
|
};
|
2021-02-05 15:35:33 +00:00
|
|
|
let some_key_some_master_res = CryptoParams {
|
|
|
|
enc_key: some_key.clone(),
|
|
|
|
master_pubkey: some_master_key.clone(),
|
|
|
|
mode: CryptMode::Encrypt,
|
|
|
|
};
|
|
|
|
let some_key_default_master_res = CryptoParams {
|
|
|
|
enc_key: some_key.clone(),
|
|
|
|
master_pubkey: default_master_key.clone(),
|
|
|
|
mode: CryptMode::Encrypt,
|
|
|
|
};
|
|
|
|
|
2021-02-05 15:35:32 +00:00
|
|
|
let some_key_sign_res = CryptoParams {
|
|
|
|
enc_key: some_key.clone(),
|
|
|
|
master_pubkey: None,
|
|
|
|
mode: CryptMode::SignOnly,
|
|
|
|
};
|
|
|
|
let default_key_res = CryptoParams {
|
|
|
|
enc_key: default_key.clone(),
|
|
|
|
master_pubkey: None,
|
|
|
|
mode: CryptMode::Encrypt,
|
|
|
|
};
|
|
|
|
let default_key_sign_res = CryptoParams {
|
|
|
|
enc_key: default_key.clone(),
|
|
|
|
master_pubkey: None,
|
|
|
|
mode: CryptMode::SignOnly,
|
|
|
|
};
|
2021-02-05 15:35:30 +00:00
|
|
|
|
|
|
|
let keypath = "./tests/keyfile.test";
|
|
|
|
replace_file(&keypath, some_key.as_ref().unwrap(), CreateOptions::default())?;
|
2021-02-05 15:35:33 +00:00
|
|
|
let master_keypath = "./tests/masterkeyfile.test";
|
|
|
|
replace_file(&master_keypath, some_master_key.as_ref().unwrap(), CreateOptions::default())?;
|
2021-02-05 15:35:30 +00:00
|
|
|
let invalid_keypath = "./tests/invalid_keyfile.test";
|
|
|
|
|
|
|
|
// no params, no default key == no key
|
2021-02-05 15:35:31 +00:00
|
|
|
let res = crypto_parameters(&json!({}));
|
2021-02-05 15:35:30 +00:00
|
|
|
assert_eq!(res.unwrap(), no_key_res);
|
|
|
|
|
|
|
|
// keyfile param == key from keyfile
|
2021-02-05 15:35:31 +00:00
|
|
|
let res = crypto_parameters(&json!({"keyfile": keypath}));
|
2021-02-05 15:35:30 +00:00
|
|
|
assert_eq!(res.unwrap(), some_key_res);
|
|
|
|
|
|
|
|
// crypt mode none == no key
|
2021-02-05 15:35:31 +00:00
|
|
|
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
|
2021-02-05 15:35:30 +00:00
|
|
|
assert_eq!(res.unwrap(), no_key_res);
|
|
|
|
|
|
|
|
// crypt mode encrypt/sign-only, no keyfile, no default key == Error
|
2021-02-05 15:35:31 +00:00
|
|
|
assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
|
|
|
|
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
|
2021-02-05 15:35:30 +00:00
|
|
|
|
|
|
|
// crypt mode none with explicit key == Error
|
2021-02-05 15:35:31 +00:00
|
|
|
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
|
2021-02-05 15:35:30 +00:00
|
|
|
|
|
|
|
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
|
2021-02-05 15:35:31 +00:00
|
|
|
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
|
2021-02-05 15:35:30 +00:00
|
|
|
assert_eq!(res.unwrap(), some_key_sign_res);
|
2021-02-05 15:35:31 +00:00
|
|
|
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
|
2021-02-05 15:35:30 +00:00
|
|
|
assert_eq!(res.unwrap(), some_key_res);
|
|
|
|
|
|
|
|
// invalid keyfile parameter always errors
|
2021-02-05 15:35:31 +00:00
|
|
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
|
|
|
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
|
|
|
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
|
|
|
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
|
2021-02-05 15:35:30 +00:00
|
|
|
|
|
|
|
// now set a default key
|
|
|
|
unsafe { key::set_test_encryption_key(Ok(default_key.clone())); }
|
|
|
|
|
|
|
|
// and repeat
|
|
|
|
|
|
|
|
// no params but default key == default key
|
2021-02-05 15:35:31 +00:00
|
|
|
let res = crypto_parameters(&json!({}));
|
2021-02-05 15:35:30 +00:00
|
|
|
assert_eq!(res.unwrap(), default_key_res);
|
|
|
|
|
|
|
|
// keyfile param == key from keyfile
|
2021-02-05 15:35:31 +00:00
|
|
|
let res = crypto_parameters(&json!({"keyfile": keypath}));
|
2021-02-05 15:35:30 +00:00
|
|
|
assert_eq!(res.unwrap(), some_key_res);
|
|
|
|
|
|
|
|
// crypt mode none == no key
|
2021-02-05 15:35:31 +00:00
|
|
|
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
|
2021-02-05 15:35:30 +00:00
|
|
|
assert_eq!(res.unwrap(), no_key_res);
|
|
|
|
|
|
|
|
// crypt mode encrypt/sign-only, no keyfile, default key == default key with correct mode
|
2021-02-05 15:35:31 +00:00
|
|
|
let res = crypto_parameters(&json!({"crypt-mode": "sign-only"}));
|
2021-02-05 15:35:30 +00:00
|
|
|
assert_eq!(res.unwrap(), default_key_sign_res);
|
2021-02-05 15:35:31 +00:00
|
|
|
let res = crypto_parameters(&json!({"crypt-mode": "encrypt"}));
|
2021-02-05 15:35:30 +00:00
|
|
|
assert_eq!(res.unwrap(), default_key_res);
|
|
|
|
|
|
|
|
// crypt mode none with explicit key == Error
|
2021-02-05 15:35:31 +00:00
|
|
|
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
|
2021-02-05 15:35:30 +00:00
|
|
|
|
|
|
|
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
|
2021-02-05 15:35:31 +00:00
|
|
|
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
|
2021-02-05 15:35:30 +00:00
|
|
|
assert_eq!(res.unwrap(), some_key_sign_res);
|
2021-02-05 15:35:31 +00:00
|
|
|
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
|
2021-02-05 15:35:30 +00:00
|
|
|
assert_eq!(res.unwrap(), some_key_res);
|
|
|
|
|
|
|
|
// invalid keyfile parameter always errors
|
2021-02-05 15:35:31 +00:00
|
|
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
|
|
|
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
|
|
|
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
|
|
|
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
|
2021-02-05 15:35:30 +00:00
|
|
|
|
|
|
|
// now make default key retrieval error
|
|
|
|
unsafe { key::set_test_encryption_key(Err(format_err!("test error"))); }
|
|
|
|
|
|
|
|
// and repeat
|
|
|
|
|
|
|
|
// no params, default key retrieval errors == Error
|
2021-02-05 15:35:31 +00:00
|
|
|
assert!(crypto_parameters(&json!({})).is_err());
|
2021-02-05 15:35:30 +00:00
|
|
|
|
|
|
|
// keyfile param == key from keyfile
|
2021-02-05 15:35:31 +00:00
|
|
|
let res = crypto_parameters(&json!({"keyfile": keypath}));
|
2021-02-05 15:35:30 +00:00
|
|
|
assert_eq!(res.unwrap(), some_key_res);
|
|
|
|
|
|
|
|
// crypt mode none == no key
|
2021-02-05 15:35:31 +00:00
|
|
|
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
|
2021-02-05 15:35:30 +00:00
|
|
|
assert_eq!(res.unwrap(), no_key_res);
|
|
|
|
|
|
|
|
// crypt mode encrypt/sign-only, no keyfile, default key error == Error
|
2021-02-05 15:35:31 +00:00
|
|
|
assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
|
|
|
|
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
|
2021-02-05 15:35:30 +00:00
|
|
|
|
|
|
|
// crypt mode none with explicit key == Error
|
2021-02-05 15:35:31 +00:00
|
|
|
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
|
2021-02-05 15:35:30 +00:00
|
|
|
|
|
|
|
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
|
2021-02-05 15:35:31 +00:00
|
|
|
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
|
2021-02-05 15:35:30 +00:00
|
|
|
assert_eq!(res.unwrap(), some_key_sign_res);
|
2021-02-05 15:35:31 +00:00
|
|
|
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
|
2021-02-05 15:35:30 +00:00
|
|
|
assert_eq!(res.unwrap(), some_key_res);
|
|
|
|
|
|
|
|
// invalid keyfile parameter always errors
|
2021-02-05 15:35:31 +00:00
|
|
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
|
|
|
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
|
|
|
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
|
|
|
|
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
|
2021-02-05 15:35:33 +00:00
|
|
|
|
|
|
|
// now remove default key again
|
|
|
|
unsafe { key::set_test_encryption_key(Ok(None)); }
|
|
|
|
// set a default master key
|
|
|
|
unsafe { key::set_test_default_master_pubkey(Ok(default_master_key.clone())); }
|
|
|
|
|
|
|
|
// and use an explicit master key
|
|
|
|
assert!(crypto_parameters(&json!({"master-pubkey-file": master_keypath})).is_err());
|
|
|
|
// just a default == no key
|
|
|
|
let res = crypto_parameters(&json!({}));
|
|
|
|
assert_eq!(res.unwrap(), no_key_res);
|
|
|
|
|
|
|
|
// keyfile param == key from keyfile
|
|
|
|
let res = crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": master_keypath}));
|
|
|
|
assert_eq!(res.unwrap(), some_key_some_master_res);
|
|
|
|
// same with fallback to default master key
|
|
|
|
let res = crypto_parameters(&json!({"keyfile": keypath}));
|
|
|
|
assert_eq!(res.unwrap(), some_key_default_master_res);
|
|
|
|
|
|
|
|
// crypt mode none == error
|
|
|
|
assert!(crypto_parameters(&json!({"crypt-mode": "none", "master-pubkey-file": master_keypath})).is_err());
|
|
|
|
// with just default master key == no key
|
|
|
|
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
|
|
|
|
assert_eq!(res.unwrap(), no_key_res);
|
|
|
|
|
|
|
|
// crypt mode encrypt without enc key == error
|
|
|
|
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt", "master-pubkey-file": master_keypath})).is_err());
|
|
|
|
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
|
|
|
|
|
|
|
|
// crypt mode none with explicit key == Error
|
|
|
|
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath, "master-pubkey-file": master_keypath})).is_err());
|
|
|
|
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
|
|
|
|
|
|
|
|
// crypt mode encrypt with keyfile == key from keyfile with correct mode
|
|
|
|
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath, "master-pubkey-file": master_keypath}));
|
|
|
|
assert_eq!(res.unwrap(), some_key_some_master_res);
|
|
|
|
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
|
|
|
|
assert_eq!(res.unwrap(), some_key_default_master_res);
|
|
|
|
|
|
|
|
// invalid master keyfile parameter always errors when a key is passed, even with a valid
|
|
|
|
// default master key
|
|
|
|
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath})).is_err());
|
|
|
|
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "none"})).is_err());
|
|
|
|
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "sign-only"})).is_err());
|
|
|
|
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "encrypt"})).is_err());
|
|
|
|
|
2021-02-05 15:35:30 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-12-16 12:34:49 +00:00
|
|
|
#[api(
|
|
|
|
input: {
|
|
|
|
properties: {
|
|
|
|
backupspec: {
|
|
|
|
type: Array,
|
|
|
|
description: "List of backup source specifications ([<label.ext>:<path>] ...)",
|
|
|
|
items: {
|
|
|
|
schema: BACKUP_SOURCE_SCHEMA,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
repository: {
|
|
|
|
schema: REPO_URL_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
"include-dev": {
|
|
|
|
description: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
|
|
|
|
optional: true,
|
|
|
|
items: {
|
|
|
|
type: String,
|
|
|
|
description: "Path to file.",
|
|
|
|
}
|
|
|
|
},
|
2020-11-16 09:22:47 +00:00
|
|
|
"all-file-systems": {
|
|
|
|
type: Boolean,
|
|
|
|
description: "Include all mounted subdirectories.",
|
|
|
|
optional: true,
|
|
|
|
},
|
2019-12-16 12:34:49 +00:00
|
|
|
keyfile: {
|
|
|
|
schema: KEYFILE_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
2020-07-08 11:52:17 +00:00
|
|
|
"keyfd": {
|
|
|
|
schema: KEYFD_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
2021-02-05 15:35:32 +00:00
|
|
|
"master-pubkey-file": {
|
|
|
|
schema: MASTER_PUBKEY_FILE_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
"master-pubkey-fd": {
|
|
|
|
schema: MASTER_PUBKEY_FD_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
2020-07-08 08:09:15 +00:00
|
|
|
"crypt-mode": {
|
|
|
|
type: CryptMode,
|
2020-07-06 12:35:28 +00:00
|
|
|
optional: true,
|
|
|
|
},
|
2019-12-16 12:34:49 +00:00
|
|
|
"skip-lost-and-found": {
|
|
|
|
type: Boolean,
|
|
|
|
description: "Skip lost+found directory.",
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
"backup-type": {
|
|
|
|
schema: BACKUP_TYPE_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
"backup-id": {
|
|
|
|
schema: BACKUP_ID_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
"backup-time": {
|
|
|
|
schema: BACKUP_TIME_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
"chunk-size": {
|
|
|
|
schema: CHUNK_SIZE_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
2020-02-27 12:13:12 +00:00
|
|
|
"exclude": {
|
|
|
|
type: Array,
|
|
|
|
description: "List of paths or patterns for matching files to exclude.",
|
|
|
|
optional: true,
|
|
|
|
items: {
|
|
|
|
type: String,
|
|
|
|
description: "Path or match pattern.",
|
|
|
|
}
|
|
|
|
},
|
2020-01-10 11:50:06 +00:00
|
|
|
"entries-max": {
|
|
|
|
type: Integer,
|
|
|
|
description: "Max number of entries to hold in memory.",
|
|
|
|
optional: true,
|
2020-03-23 14:03:18 +00:00
|
|
|
default: proxmox_backup::pxar::ENCODER_MAX_ENTRIES as isize,
|
2020-01-10 11:50:06 +00:00
|
|
|
},
|
2020-02-24 11:48:40 +00:00
|
|
|
"verbose": {
|
|
|
|
type: Boolean,
|
|
|
|
description: "Verbose output.",
|
|
|
|
optional: true,
|
|
|
|
},
|
2019-12-16 12:34:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
)]
|
|
|
|
/// Create (host) backup.
|
|
|
|
async fn create_backup(
|
2019-01-26 13:50:37 +00:00
|
|
|
param: Value,
|
|
|
|
_info: &ApiMethod,
|
2019-06-07 11:10:56 +00:00
|
|
|
_rpcenv: &mut dyn RpcEnvironment,
|
2019-01-26 13:50:37 +00:00
|
|
|
) -> Result<Value, Error> {
|
2018-12-14 07:28:56 +00:00
|
|
|
|
2019-07-16 05:30:04 +00:00
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
2019-02-26 11:27:28 +00:00
|
|
|
|
|
|
|
let backupspec_list = tools::required_array_param(¶m, "backupspec")?;
|
2018-12-14 12:39:41 +00:00
|
|
|
|
2019-03-08 08:33:53 +00:00
|
|
|
let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
|
|
|
|
|
2019-07-24 10:21:25 +00:00
|
|
|
let skip_lost_and_found = param["skip-lost-and-found"].as_bool().unwrap_or(false);
|
|
|
|
|
2019-03-04 07:01:09 +00:00
|
|
|
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
|
|
|
|
2019-07-25 11:44:01 +00:00
|
|
|
let backup_time_opt = param["backup-time"].as_i64();
|
|
|
|
|
2019-05-30 11:28:24 +00:00
|
|
|
let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
|
2018-12-21 10:18:08 +00:00
|
|
|
|
2019-02-19 14:19:12 +00:00
|
|
|
if let Some(size) = chunk_size_opt {
|
|
|
|
verify_chunk_size(size)?;
|
2018-12-21 10:18:08 +00:00
|
|
|
}
|
|
|
|
|
2021-02-05 15:35:31 +00:00
|
|
|
let crypto = crypto_parameters(¶m)?;
|
2019-06-19 15:16:41 +00:00
|
|
|
|
2019-08-03 15:06:23 +00:00
|
|
|
let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
|
2019-03-06 06:02:52 +00:00
|
|
|
|
2019-07-26 07:07:29 +00:00
|
|
|
let backup_type = param["backup-type"].as_str().unwrap_or("host");
|
2019-07-25 11:44:01 +00:00
|
|
|
|
2019-07-24 05:48:59 +00:00
|
|
|
let include_dev = param["include-dev"].as_array();
|
|
|
|
|
2020-03-23 14:03:18 +00:00
|
|
|
let entries_max = param["entries-max"].as_u64()
|
|
|
|
.unwrap_or(proxmox_backup::pxar::ENCODER_MAX_ENTRIES as u64);
|
2020-01-10 11:50:06 +00:00
|
|
|
|
2020-02-27 12:13:12 +00:00
|
|
|
let empty = Vec::new();
|
2020-03-23 14:03:18 +00:00
|
|
|
let exclude_args = param["exclude"].as_array().unwrap_or(&empty);
|
|
|
|
|
2020-06-09 11:17:55 +00:00
|
|
|
let mut pattern_list = Vec::with_capacity(exclude_args.len());
|
2020-03-23 14:03:18 +00:00
|
|
|
for entry in exclude_args {
|
|
|
|
let entry = entry.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
|
2020-06-09 11:17:55 +00:00
|
|
|
pattern_list.push(
|
2020-03-23 14:03:18 +00:00
|
|
|
MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude)
|
|
|
|
.map_err(|err| format_err!("invalid exclude pattern entry: {}", err))?
|
|
|
|
);
|
2020-02-27 12:13:12 +00:00
|
|
|
}
|
|
|
|
|
2019-07-24 05:48:59 +00:00
|
|
|
let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
|
|
|
|
|
|
|
|
if let Some(include_dev) = include_dev {
|
|
|
|
if all_file_systems {
|
|
|
|
bail!("option 'all-file-systems' conflicts with option 'include-dev'");
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut set = HashSet::new();
|
|
|
|
for path in include_dev {
|
|
|
|
let path = path.as_str().unwrap();
|
|
|
|
let stat = nix::sys::stat::stat(path)
|
|
|
|
.map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?;
|
|
|
|
set.insert(stat.st_dev);
|
|
|
|
}
|
|
|
|
devices = Some(set);
|
|
|
|
}
|
|
|
|
|
2019-02-26 11:27:28 +00:00
|
|
|
let mut upload_list = vec![];
|
2020-07-23 16:16:36 +00:00
|
|
|
let mut target_set = HashSet::new();
|
2018-12-14 12:39:41 +00:00
|
|
|
|
2019-02-26 11:27:28 +00:00
|
|
|
for backupspec in backupspec_list {
|
2020-05-30 08:54:38 +00:00
|
|
|
let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
|
|
|
|
let filename = &spec.config_string;
|
|
|
|
let target = &spec.archive_name;
|
2018-12-27 09:11:11 +00:00
|
|
|
|
2020-07-23 16:16:36 +00:00
|
|
|
if target_set.contains(target) {
|
|
|
|
bail!("got target twice: '{}'", target);
|
|
|
|
}
|
|
|
|
target_set.insert(target.to_string());
|
|
|
|
|
2019-05-30 10:19:23 +00:00
|
|
|
use std::os::unix::fs::FileTypeExt;
|
|
|
|
|
2019-09-03 14:17:23 +00:00
|
|
|
let metadata = std::fs::metadata(filename)
|
|
|
|
.map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
|
2019-05-30 10:19:23 +00:00
|
|
|
let file_type = metadata.file_type();
|
2019-01-17 10:38:22 +00:00
|
|
|
|
2020-05-30 08:54:38 +00:00
|
|
|
match spec.spec_type {
|
|
|
|
BackupSpecificationType::PXAR => {
|
2019-06-03 08:39:44 +00:00
|
|
|
if !file_type.is_dir() {
|
|
|
|
bail!("got unexpected file type (expected directory)");
|
|
|
|
}
|
2020-05-30 08:54:38 +00:00
|
|
|
upload_list.push((BackupSpecificationType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
|
2019-06-03 08:39:44 +00:00
|
|
|
}
|
2020-05-30 08:54:38 +00:00
|
|
|
BackupSpecificationType::IMAGE => {
|
2019-06-03 08:39:44 +00:00
|
|
|
if !(file_type.is_file() || file_type.is_block_device()) {
|
|
|
|
bail!("got unexpected file type (expected file or block device)");
|
|
|
|
}
|
2019-05-30 10:19:23 +00:00
|
|
|
|
2019-08-03 11:05:38 +00:00
|
|
|
let size = image_size(&PathBuf::from(filename))?;
|
2019-01-17 10:38:22 +00:00
|
|
|
|
2019-06-03 08:39:44 +00:00
|
|
|
if size == 0 { bail!("got zero-sized file '{}'", filename); }
|
2019-02-26 11:27:28 +00:00
|
|
|
|
2020-05-30 08:54:38 +00:00
|
|
|
upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
|
2019-06-03 08:39:44 +00:00
|
|
|
}
|
2020-05-30 08:54:38 +00:00
|
|
|
BackupSpecificationType::CONFIG => {
|
2019-06-03 08:39:44 +00:00
|
|
|
if !file_type.is_file() {
|
|
|
|
bail!("got unexpected file type (expected regular file)");
|
|
|
|
}
|
2020-05-30 08:54:38 +00:00
|
|
|
upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
|
2019-06-03 08:39:44 +00:00
|
|
|
}
|
2020-05-30 08:54:38 +00:00
|
|
|
BackupSpecificationType::LOGFILE => {
|
2019-07-24 11:07:02 +00:00
|
|
|
if !file_type.is_file() {
|
|
|
|
bail!("got unexpected file type (expected regular file)");
|
|
|
|
}
|
2020-05-30 08:54:38 +00:00
|
|
|
upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
|
2019-06-03 08:39:44 +00:00
|
|
|
}
|
2019-02-26 11:27:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-15 14:21:34 +00:00
|
|
|
let backup_time = backup_time_opt.unwrap_or_else(epoch_i64);
|
2019-02-26 11:27:28 +00:00
|
|
|
|
2020-11-10 10:54:50 +00:00
|
|
|
let client = connect(&repo)?;
|
2019-03-13 08:47:12 +00:00
|
|
|
record_repository(&repo);
|
|
|
|
|
2020-09-12 13:10:47 +00:00
|
|
|
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?);
|
2019-07-25 11:44:01 +00:00
|
|
|
|
2019-08-03 15:06:23 +00:00
|
|
|
println!("Client name: {}", proxmox::tools::nodename());
|
2019-07-25 11:44:01 +00:00
|
|
|
|
2020-09-12 13:10:47 +00:00
|
|
|
let start_time = std::time::Instant::now();
|
2019-07-25 11:44:01 +00:00
|
|
|
|
2020-09-12 13:10:47 +00:00
|
|
|
println!("Starting backup protocol: {}", strftime_local("%c", epoch_i64())?);
|
2019-03-01 05:48:41 +00:00
|
|
|
|
2021-02-05 15:35:31 +00:00
|
|
|
let (crypt_config, rsa_encrypted_key) = match crypto.enc_key {
|
2019-06-26 05:32:34 +00:00
|
|
|
None => (None, None),
|
2020-07-08 11:52:17 +00:00
|
|
|
Some(key) => {
|
2020-11-20 16:38:33 +00:00
|
|
|
let (key, created, fingerprint) = decrypt_key(&key, &key::get_encryption_key_password)?;
|
|
|
|
println!("Encryption key fingerprint: {}", fingerprint);
|
2019-06-26 05:32:34 +00:00
|
|
|
|
2021-01-15 13:38:27 +00:00
|
|
|
let crypt_config = CryptConfig::new(key)?;
|
2019-06-26 05:32:34 +00:00
|
|
|
|
2021-02-05 15:35:32 +00:00
|
|
|
match crypto.master_pubkey {
|
|
|
|
Some(pem_data) => {
|
2020-07-08 08:56:16 +00:00
|
|
|
let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
|
2021-01-19 11:35:15 +00:00
|
|
|
|
2021-01-21 10:56:54 +00:00
|
|
|
let mut key_config = KeyConfig::without_password(key)?;
|
2021-01-19 11:35:15 +00:00
|
|
|
key_config.created = created; // keep original value
|
|
|
|
|
2020-12-16 13:41:07 +00:00
|
|
|
let enc_key = rsa_encrypt_key_config(rsa, &key_config)?;
|
2020-11-20 16:38:33 +00:00
|
|
|
|
2020-07-08 08:56:16 +00:00
|
|
|
(Some(Arc::new(crypt_config)), Some(enc_key))
|
|
|
|
}
|
|
|
|
_ => (Some(Arc::new(crypt_config)), None),
|
2019-06-26 05:32:34 +00:00
|
|
|
}
|
2019-06-19 15:16:41 +00:00
|
|
|
}
|
|
|
|
};
|
2019-06-13 09:47:23 +00:00
|
|
|
|
2019-12-16 09:06:26 +00:00
|
|
|
let client = BackupWriter::start(
|
|
|
|
client,
|
2020-06-25 10:23:30 +00:00
|
|
|
crypt_config.clone(),
|
2019-12-16 09:06:26 +00:00
|
|
|
repo.store(),
|
|
|
|
backup_type,
|
|
|
|
&backup_id,
|
|
|
|
backup_time,
|
|
|
|
verbose,
|
2020-09-02 09:41:22 +00:00
|
|
|
false
|
2019-12-16 09:06:26 +00:00
|
|
|
).await?;
|
|
|
|
|
2020-11-20 16:38:40 +00:00
|
|
|
let download_previous_manifest = match client.previous_backup_time().await {
|
|
|
|
Ok(Some(backup_time)) => {
|
|
|
|
println!(
|
|
|
|
"Downloading previous manifest ({})",
|
|
|
|
strftime_local("%c", backup_time)?
|
|
|
|
);
|
|
|
|
true
|
|
|
|
}
|
|
|
|
Ok(None) => {
|
|
|
|
println!("No previous manifest available.");
|
|
|
|
false
|
|
|
|
}
|
|
|
|
Err(_) => {
|
|
|
|
// Fallback for outdated server, TODO remove/bubble up with 2.0
|
|
|
|
true
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let previous_manifest = if download_previous_manifest {
|
|
|
|
match client.download_previous_manifest().await {
|
|
|
|
Ok(previous_manifest) => {
|
|
|
|
match previous_manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref)) {
|
|
|
|
Ok(()) => Some(Arc::new(previous_manifest)),
|
|
|
|
Err(err) => {
|
|
|
|
println!("Couldn't re-use previous manifest - {}", err);
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
2020-11-20 16:38:37 +00:00
|
|
|
}
|
2020-11-20 16:38:40 +00:00
|
|
|
Err(err) => {
|
|
|
|
println!("Couldn't download previous manifest - {}", err);
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
None
|
2020-06-25 10:23:30 +00:00
|
|
|
};
|
|
|
|
|
2020-09-12 13:10:47 +00:00
|
|
|
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
2019-12-16 09:06:26 +00:00
|
|
|
let mut manifest = BackupManifest::new(snapshot);
|
|
|
|
|
2020-06-12 08:04:59 +00:00
|
|
|
let mut catalog = None;
|
2021-01-25 13:42:48 +00:00
|
|
|
let mut catalog_result_rx = None;
|
2019-12-16 09:06:26 +00:00
|
|
|
|
|
|
|
for (backup_type, filename, target, size) in upload_list {
|
|
|
|
match backup_type {
|
2020-05-30 08:54:38 +00:00
|
|
|
BackupSpecificationType::CONFIG => {
|
2021-01-25 13:42:52 +00:00
|
|
|
let upload_options = UploadOptions {
|
|
|
|
compress: true,
|
2021-02-05 15:35:31 +00:00
|
|
|
encrypt: crypto.mode == CryptMode::Encrypt,
|
2021-01-25 13:42:52 +00:00
|
|
|
..UploadOptions::default()
|
|
|
|
};
|
|
|
|
|
2020-07-23 07:43:20 +00:00
|
|
|
println!("Upload config file '{}' to '{}' as {}", filename, repo, target);
|
2019-12-16 09:06:26 +00:00
|
|
|
let stats = client
|
2021-01-25 13:42:52 +00:00
|
|
|
.upload_blob_from_file(&filename, &target, upload_options)
|
2019-12-16 09:06:26 +00:00
|
|
|
.await?;
|
2021-02-05 15:35:31 +00:00
|
|
|
manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
|
2019-12-16 09:06:26 +00:00
|
|
|
}
|
2020-05-30 08:54:38 +00:00
|
|
|
BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
|
2021-01-25 13:42:52 +00:00
|
|
|
let upload_options = UploadOptions {
|
|
|
|
compress: true,
|
2021-02-05 15:35:31 +00:00
|
|
|
encrypt: crypto.mode == CryptMode::Encrypt,
|
2021-01-25 13:42:52 +00:00
|
|
|
..UploadOptions::default()
|
|
|
|
};
|
|
|
|
|
2020-07-23 07:43:20 +00:00
|
|
|
println!("Upload log file '{}' to '{}' as {}", filename, repo, target);
|
2019-12-16 09:06:26 +00:00
|
|
|
let stats = client
|
2021-01-25 13:42:52 +00:00
|
|
|
.upload_blob_from_file(&filename, &target, upload_options)
|
2019-12-16 09:06:26 +00:00
|
|
|
.await?;
|
2021-02-05 15:35:31 +00:00
|
|
|
manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
|
2019-12-16 09:06:26 +00:00
|
|
|
}
|
2020-05-30 08:54:38 +00:00
|
|
|
BackupSpecificationType::PXAR => {
|
2020-06-12 08:04:59 +00:00
|
|
|
// start catalog upload on first use
|
|
|
|
if catalog.is_none() {
|
2021-02-05 15:35:31 +00:00
|
|
|
let catalog_upload_res = spawn_catalog_upload(client.clone(), crypto.mode == CryptMode::Encrypt)?;
|
2021-01-25 13:42:48 +00:00
|
|
|
catalog = Some(catalog_upload_res.catalog_writer);
|
|
|
|
catalog_result_rx = Some(catalog_upload_res.result);
|
2020-06-12 08:04:59 +00:00
|
|
|
}
|
|
|
|
let catalog = catalog.as_ref().unwrap();
|
|
|
|
|
2020-07-23 07:43:20 +00:00
|
|
|
println!("Upload directory '{}' to '{}' as {}", filename, repo, target);
|
2019-12-16 09:06:26 +00:00
|
|
|
catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
|
2021-01-25 13:42:54 +00:00
|
|
|
|
|
|
|
let pxar_options = proxmox_backup::pxar::PxarCreateOptions {
|
|
|
|
device_set: devices.clone(),
|
|
|
|
patterns: pattern_list.clone(),
|
|
|
|
entries_max: entries_max as usize,
|
|
|
|
skip_lost_and_found,
|
|
|
|
verbose,
|
|
|
|
};
|
|
|
|
|
2021-01-25 13:42:52 +00:00
|
|
|
let upload_options = UploadOptions {
|
|
|
|
previous_manifest: previous_manifest.clone(),
|
|
|
|
compress: true,
|
2021-02-05 15:35:31 +00:00
|
|
|
encrypt: crypto.mode == CryptMode::Encrypt,
|
2021-01-25 13:42:52 +00:00
|
|
|
..UploadOptions::default()
|
|
|
|
};
|
|
|
|
|
2019-12-16 09:06:26 +00:00
|
|
|
let stats = backup_directory(
|
|
|
|
&client,
|
|
|
|
&filename,
|
|
|
|
&target,
|
|
|
|
chunk_size_opt,
|
|
|
|
catalog.clone(),
|
2021-01-25 13:42:54 +00:00
|
|
|
pxar_options,
|
2021-01-25 13:42:52 +00:00
|
|
|
upload_options,
|
2019-12-16 09:06:26 +00:00
|
|
|
).await?;
|
2021-02-05 15:35:31 +00:00
|
|
|
manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
|
2019-12-16 09:06:26 +00:00
|
|
|
catalog.lock().unwrap().end_directory()?;
|
|
|
|
}
|
2020-05-30 08:54:38 +00:00
|
|
|
BackupSpecificationType::IMAGE => {
|
2019-12-16 09:06:26 +00:00
|
|
|
println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
|
2021-01-25 13:42:52 +00:00
|
|
|
|
|
|
|
let upload_options = UploadOptions {
|
|
|
|
previous_manifest: previous_manifest.clone(),
|
|
|
|
fixed_size: Some(size),
|
|
|
|
compress: true,
|
2021-02-05 15:35:31 +00:00
|
|
|
encrypt: crypto.mode == CryptMode::Encrypt,
|
2021-01-25 13:42:52 +00:00
|
|
|
};
|
|
|
|
|
2019-12-16 09:06:26 +00:00
|
|
|
let stats = backup_image(
|
|
|
|
&client,
|
2021-01-25 13:42:52 +00:00
|
|
|
&filename,
|
2019-12-16 09:06:26 +00:00
|
|
|
&target,
|
|
|
|
chunk_size_opt,
|
2021-01-25 13:42:52 +00:00
|
|
|
upload_options,
|
2019-12-16 09:06:26 +00:00
|
|
|
).await?;
|
2021-02-05 15:35:31 +00:00
|
|
|
manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
|
2019-05-30 10:46:01 +00:00
|
|
|
}
|
|
|
|
}
|
2019-12-16 09:06:26 +00:00
|
|
|
}
|
2018-12-16 13:44:44 +00:00
|
|
|
|
2019-12-16 09:06:26 +00:00
|
|
|
// finalize and upload catalog
|
2020-06-12 08:04:59 +00:00
|
|
|
if let Some(catalog) = catalog {
|
2019-12-16 09:06:26 +00:00
|
|
|
let mutex = Arc::try_unwrap(catalog)
|
|
|
|
.map_err(|_| format_err!("unable to get catalog (still used)"))?;
|
|
|
|
let mut catalog = mutex.into_inner().unwrap();
|
2019-11-08 09:35:48 +00:00
|
|
|
|
2019-12-16 09:06:26 +00:00
|
|
|
catalog.finish()?;
|
2019-08-09 07:46:49 +00:00
|
|
|
|
2019-12-16 09:06:26 +00:00
|
|
|
drop(catalog); // close upload stream
|
2019-08-09 07:46:49 +00:00
|
|
|
|
2021-01-25 13:42:48 +00:00
|
|
|
if let Some(catalog_result_rx) = catalog_result_rx {
|
2020-06-12 08:04:59 +00:00
|
|
|
let stats = catalog_result_rx.await??;
|
2021-02-05 15:35:31 +00:00
|
|
|
manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypto.mode)?;
|
2020-06-12 08:04:59 +00:00
|
|
|
}
|
2019-12-16 09:06:26 +00:00
|
|
|
}
|
2019-08-09 07:46:49 +00:00
|
|
|
|
2019-12-16 09:06:26 +00:00
|
|
|
if let Some(rsa_encrypted_key) = rsa_encrypted_key {
|
2020-12-16 13:41:05 +00:00
|
|
|
let target = ENCRYPTED_KEY_BLOB_NAME;
|
2019-12-16 09:06:26 +00:00
|
|
|
println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
|
2021-01-25 13:42:52 +00:00
|
|
|
let options = UploadOptions { compress: false, encrypt: false, ..UploadOptions::default() };
|
2019-12-16 09:06:26 +00:00
|
|
|
let stats = client
|
2021-01-25 13:42:52 +00:00
|
|
|
.upload_blob_from_data(rsa_encrypted_key, target, options)
|
2019-12-16 09:06:26 +00:00
|
|
|
.await?;
|
2021-02-05 15:35:31 +00:00
|
|
|
manifest.add_file(target.to_string(), stats.size, stats.csum, crypto.mode)?;
|
2019-12-16 09:06:26 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
// create manifest (index.json)
|
2020-07-08 12:06:50 +00:00
|
|
|
// manifests are never encrypted, but include a signature
|
2020-07-09 09:28:05 +00:00
|
|
|
let manifest = manifest.to_string(crypt_config.as_ref().map(Arc::as_ref))
|
2020-07-09 07:20:49 +00:00
|
|
|
.map_err(|err| format_err!("unable to format manifest - {}", err))?;
|
2020-07-08 12:06:50 +00:00
|
|
|
|
2020-07-09 07:20:49 +00:00
|
|
|
|
2020-07-23 08:04:36 +00:00
|
|
|
if verbose { println!("Upload index.json to '{}'", repo) };
|
2021-01-25 13:42:52 +00:00
|
|
|
let options = UploadOptions { compress: true, encrypt: false, ..UploadOptions::default() };
|
2019-12-16 09:06:26 +00:00
|
|
|
client
|
2021-01-25 13:42:52 +00:00
|
|
|
.upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, options)
|
2019-12-16 09:06:26 +00:00
|
|
|
.await?;
|
2019-08-01 10:39:02 +00:00
|
|
|
|
2019-12-16 09:06:26 +00:00
|
|
|
client.finish().await?;
|
2019-05-28 08:12:44 +00:00
|
|
|
|
2020-09-12 13:10:47 +00:00
|
|
|
let end_time = std::time::Instant::now();
|
|
|
|
let elapsed = end_time.duration_since(start_time);
|
|
|
|
println!("Duration: {:.2}s", elapsed.as_secs_f64());
|
2019-03-05 07:11:40 +00:00
|
|
|
|
2020-09-12 13:10:47 +00:00
|
|
|
println!("End Time: {}", strftime_local("%c", epoch_i64())?);
|
2018-12-18 10:06:03 +00:00
|
|
|
|
2019-12-16 09:06:26 +00:00
|
|
|
Ok(Value::Null)
|
2019-02-27 07:38:32 +00:00
|
|
|
}
|
|
|
|
|
2019-03-13 08:47:12 +00:00
|
|
|
fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
2019-02-27 07:38:32 +00:00
|
|
|
|
|
|
|
let mut result = vec![];
|
|
|
|
|
|
|
|
let data: Vec<&str> = arg.splitn(2, ':').collect();
|
|
|
|
|
2019-03-13 06:23:59 +00:00
|
|
|
if data.len() != 2 {
|
2019-03-14 09:54:09 +00:00
|
|
|
result.push(String::from("root.pxar:/"));
|
|
|
|
result.push(String::from("etc.pxar:/etc"));
|
2019-03-13 06:23:59 +00:00
|
|
|
return result;
|
|
|
|
}
|
2019-02-27 07:38:32 +00:00
|
|
|
|
2019-03-12 13:39:51 +00:00
|
|
|
let files = tools::complete_file_name(data[1], param);
|
2019-02-27 07:38:32 +00:00
|
|
|
|
|
|
|
for file in files {
|
|
|
|
result.push(format!("{}:{}", data[0], file));
|
|
|
|
}
|
|
|
|
|
|
|
|
result
|
2018-12-14 07:28:56 +00:00
|
|
|
}
|
|
|
|
|
2020-06-12 09:40:18 +00:00
|
|
|
async fn dump_image<W: Write>(
|
2019-10-05 09:48:51 +00:00
|
|
|
client: Arc<BackupReader>,
|
|
|
|
crypt_config: Option<Arc<CryptConfig>>,
|
2020-08-10 11:25:07 +00:00
|
|
|
crypt_mode: CryptMode,
|
2019-10-05 09:48:51 +00:00
|
|
|
index: FixedIndexReader,
|
|
|
|
mut writer: W,
|
2019-10-08 11:04:10 +00:00
|
|
|
verbose: bool,
|
2019-10-05 09:48:51 +00:00
|
|
|
) -> Result<(), Error> {
|
|
|
|
|
|
|
|
let most_used = index.find_most_used_chunks(8);
|
|
|
|
|
2020-08-10 11:25:07 +00:00
|
|
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, crypt_mode, most_used);
|
2019-10-05 09:48:51 +00:00
|
|
|
|
|
|
|
// Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
|
|
|
|
// and thus slows down reading. Instead, directly use RemoteChunkReader
|
2019-10-08 11:04:10 +00:00
|
|
|
let mut per = 0;
|
|
|
|
let mut bytes = 0;
|
|
|
|
let start_time = std::time::Instant::now();
|
|
|
|
|
2019-10-05 09:48:51 +00:00
|
|
|
for pos in 0..index.index_count() {
|
|
|
|
let digest = index.index_digest(pos).unwrap();
|
2020-06-12 09:40:18 +00:00
|
|
|
let raw_data = chunk_reader.read_chunk(&digest).await?;
|
2019-10-05 09:48:51 +00:00
|
|
|
writer.write_all(&raw_data)?;
|
2019-10-08 11:04:10 +00:00
|
|
|
bytes += raw_data.len();
|
|
|
|
if verbose {
|
|
|
|
let next_per = ((pos+1)*100)/index.index_count();
|
|
|
|
if per != next_per {
|
|
|
|
eprintln!("progress {}% (read {} bytes, duration {} sec)",
|
|
|
|
next_per, bytes, start_time.elapsed().as_secs());
|
|
|
|
per = next_per;
|
|
|
|
}
|
|
|
|
}
|
2019-10-05 09:48:51 +00:00
|
|
|
}
|
|
|
|
|
2019-10-08 11:04:10 +00:00
|
|
|
let end_time = std::time::Instant::now();
|
|
|
|
let elapsed = end_time.duration_since(start_time);
|
|
|
|
eprintln!("restore image complete (bytes={}, duration={:.2}s, speed={:.2}MB/s)",
|
|
|
|
bytes,
|
|
|
|
elapsed.as_secs_f64(),
|
|
|
|
bytes as f64/(1024.0*1024.0*elapsed.as_secs_f64())
|
|
|
|
);
|
|
|
|
|
|
|
|
|
2019-10-05 09:48:51 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-06-02 16:41:31 +00:00
|
|
|
fn parse_archive_type(name: &str) -> (String, ArchiveType) {
|
2020-06-02 16:41:32 +00:00
|
|
|
if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
|
|
|
|
(name.into(), archive_type(name).unwrap())
|
|
|
|
} else if name.ends_with(".pxar") {
|
2020-06-02 16:41:31 +00:00
|
|
|
(format!("{}.didx", name), ArchiveType::DynamicIndex)
|
|
|
|
} else if name.ends_with(".img") {
|
|
|
|
(format!("{}.fidx", name), ArchiveType::FixedIndex)
|
|
|
|
} else {
|
|
|
|
(format!("{}.blob", name), ArchiveType::Blob)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-16 12:34:49 +00:00
|
|
|
#[api(
|
|
|
|
input: {
|
|
|
|
properties: {
|
|
|
|
repository: {
|
|
|
|
schema: REPO_URL_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
snapshot: {
|
|
|
|
type: String,
|
|
|
|
description: "Group/Snapshot path.",
|
|
|
|
},
|
|
|
|
"archive-name": {
|
|
|
|
description: "Backup archive name.",
|
|
|
|
type: String,
|
|
|
|
},
|
|
|
|
target: {
|
|
|
|
type: String,
|
2020-01-24 12:31:46 +00:00
|
|
|
description: r###"Target directory path. Use '-' to write to standard output.
|
2019-12-16 09:06:26 +00:00
|
|
|
|
2020-01-24 12:07:35 +00:00
|
|
|
We do not extraxt '.pxar' archives when writing to standard output.
|
2019-12-16 09:06:26 +00:00
|
|
|
|
2019-12-16 12:34:49 +00:00
|
|
|
"###
|
|
|
|
},
|
|
|
|
"allow-existing-dirs": {
|
|
|
|
type: Boolean,
|
|
|
|
description: "Do not fail if directories already exists.",
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
keyfile: {
|
|
|
|
schema: KEYFILE_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
2020-07-08 11:52:17 +00:00
|
|
|
"keyfd": {
|
|
|
|
schema: KEYFD_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
2020-07-08 08:09:15 +00:00
|
|
|
"crypt-mode": {
|
|
|
|
type: CryptMode,
|
2020-07-06 12:35:28 +00:00
|
|
|
optional: true,
|
|
|
|
},
|
2019-12-16 12:34:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
)]
|
|
|
|
/// Restore backup repository.
|
|
|
|
async fn restore(param: Value) -> Result<Value, Error> {
|
2019-07-16 05:30:04 +00:00
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
2019-03-06 09:50:46 +00:00
|
|
|
|
2019-07-03 13:45:12 +00:00
|
|
|
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
|
|
|
|
2019-07-29 10:49:15 +00:00
|
|
|
let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
|
|
|
|
|
2019-03-11 13:31:01 +00:00
|
|
|
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
|
|
|
|
2020-11-10 10:54:50 +00:00
|
|
|
let client = connect(&repo)?;
|
2019-03-13 08:47:12 +00:00
|
|
|
|
|
|
|
record_repository(&repo);
|
2019-03-11 13:31:01 +00:00
|
|
|
|
2019-03-06 09:50:46 +00:00
|
|
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
|
|
|
|
2019-07-03 13:45:12 +00:00
|
|
|
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
2020-06-23 06:16:56 +00:00
|
|
|
let group: BackupGroup = path.parse()?;
|
2020-01-07 14:18:36 +00:00
|
|
|
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
2019-03-11 13:31:01 +00:00
|
|
|
} else {
|
2020-06-23 06:09:52 +00:00
|
|
|
let snapshot: BackupDir = path.parse()?;
|
2019-07-03 13:45:12 +00:00
|
|
|
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
|
|
|
};
|
2019-03-06 09:50:46 +00:00
|
|
|
|
2019-03-11 13:31:01 +00:00
|
|
|
let target = tools::required_string_param(¶m, "target")?;
|
2019-07-05 10:14:50 +00:00
|
|
|
let target = if target == "-" { None } else { Some(target) };
|
2019-03-06 10:18:46 +00:00
|
|
|
|
2021-02-05 15:35:31 +00:00
|
|
|
let crypto = crypto_parameters(¶m)?;
|
2019-03-06 10:18:46 +00:00
|
|
|
|
2021-02-05 15:35:31 +00:00
|
|
|
let crypt_config = match crypto.enc_key {
|
2019-07-03 13:45:12 +00:00
|
|
|
None => None,
|
2020-07-08 11:52:17 +00:00
|
|
|
Some(key) => {
|
2020-11-20 16:38:33 +00:00
|
|
|
let (key, _, fingerprint) = decrypt_key(&key, &key::get_encryption_key_password)?;
|
2020-11-25 13:28:51 +00:00
|
|
|
eprintln!("Encryption key fingerprint: '{}'", fingerprint);
|
2019-07-03 13:45:12 +00:00
|
|
|
Some(Arc::new(CryptConfig::new(key)?))
|
|
|
|
}
|
|
|
|
};
|
2019-03-11 13:31:01 +00:00
|
|
|
|
2019-10-12 13:50:26 +00:00
|
|
|
let client = BackupReader::start(
|
|
|
|
client,
|
|
|
|
crypt_config.clone(),
|
|
|
|
repo.store(),
|
|
|
|
&backup_type,
|
|
|
|
&backup_id,
|
|
|
|
backup_time,
|
|
|
|
true,
|
|
|
|
).await?;
|
2019-07-03 13:45:12 +00:00
|
|
|
|
2020-12-16 13:41:06 +00:00
|
|
|
let (archive_name, archive_type) = parse_archive_type(archive_name);
|
|
|
|
|
2020-07-08 14:07:14 +00:00
|
|
|
let (manifest, backup_index_data) = client.download_manifest().await?;
|
2019-09-02 12:14:32 +00:00
|
|
|
|
2020-12-16 13:41:06 +00:00
|
|
|
if archive_name == ENCRYPTED_KEY_BLOB_NAME && crypt_config.is_none() {
|
|
|
|
eprintln!("Restoring encrypted key blob without original key - skipping manifest fingerprint check!")
|
|
|
|
} else {
|
|
|
|
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
|
|
|
|
}
|
2020-06-02 16:41:31 +00:00
|
|
|
|
|
|
|
if archive_name == MANIFEST_BLOB_NAME {
|
2019-09-02 12:14:32 +00:00
|
|
|
if let Some(target) = target {
|
2020-07-08 14:07:14 +00:00
|
|
|
replace_file(target, &backup_index_data, CreateOptions::new())?;
|
2019-09-02 12:14:32 +00:00
|
|
|
} else {
|
|
|
|
let stdout = std::io::stdout();
|
|
|
|
let mut writer = stdout.lock();
|
2020-07-08 14:07:14 +00:00
|
|
|
writer.write_all(&backup_index_data)
|
2019-09-02 12:14:32 +00:00
|
|
|
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
|
|
|
}
|
|
|
|
|
2020-08-10 11:25:07 +00:00
|
|
|
return Ok(Value::Null);
|
|
|
|
}
|
|
|
|
|
|
|
|
let file_info = manifest.lookup_file_info(&archive_name)?;
|
|
|
|
|
|
|
|
if archive_type == ArchiveType::Blob {
|
2019-09-04 06:47:14 +00:00
|
|
|
|
2020-06-02 16:41:31 +00:00
|
|
|
let mut reader = client.download_blob(&manifest, &archive_name).await?;
|
2019-07-05 09:36:45 +00:00
|
|
|
|
2019-07-05 10:14:50 +00:00
|
|
|
if let Some(target) = target {
|
2019-09-02 12:13:31 +00:00
|
|
|
let mut writer = std::fs::OpenOptions::new()
|
|
|
|
.write(true)
|
|
|
|
.create(true)
|
|
|
|
.create_new(true)
|
|
|
|
.open(target)
|
|
|
|
.map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?;
|
|
|
|
std::io::copy(&mut reader, &mut writer)?;
|
2019-07-05 10:14:50 +00:00
|
|
|
} else {
|
|
|
|
let stdout = std::io::stdout();
|
|
|
|
let mut writer = stdout.lock();
|
2019-09-02 12:13:31 +00:00
|
|
|
std::io::copy(&mut reader, &mut writer)
|
2019-07-05 10:14:50 +00:00
|
|
|
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
|
|
|
}
|
2019-07-05 09:36:45 +00:00
|
|
|
|
2020-06-02 16:41:31 +00:00
|
|
|
} else if archive_type == ArchiveType::DynamicIndex {
|
2019-07-03 13:45:12 +00:00
|
|
|
|
2020-06-02 16:41:31 +00:00
|
|
|
let index = client.download_dynamic_index(&manifest, &archive_name).await?;
|
2019-09-03 11:12:16 +00:00
|
|
|
|
2019-07-05 08:42:46 +00:00
|
|
|
let most_used = index.find_most_used_chunks(8);
|
|
|
|
|
2020-08-10 11:25:07 +00:00
|
|
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
2019-07-05 08:42:46 +00:00
|
|
|
|
2019-07-05 07:19:56 +00:00
|
|
|
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
2019-07-03 13:45:12 +00:00
|
|
|
|
2021-01-25 13:42:55 +00:00
|
|
|
let options = proxmox_backup::pxar::PxarExtractOptions {
|
|
|
|
match_list: &[],
|
|
|
|
extract_match_default: true,
|
|
|
|
allow_existing_dirs,
|
|
|
|
on_error: None,
|
|
|
|
};
|
|
|
|
|
2019-07-05 10:14:50 +00:00
|
|
|
if let Some(target) = target {
|
2020-03-23 14:03:18 +00:00
|
|
|
proxmox_backup::pxar::extract_archive(
|
|
|
|
pxar::decoder::Decoder::from_std(reader)?,
|
|
|
|
Path::new(target),
|
2020-06-10 09:03:42 +00:00
|
|
|
proxmox_backup::pxar::Flags::DEFAULT,
|
2020-03-23 14:03:18 +00:00
|
|
|
|path| {
|
|
|
|
if verbose {
|
|
|
|
println!("{:?}", path);
|
|
|
|
}
|
|
|
|
},
|
2021-01-25 13:42:55 +00:00
|
|
|
options,
|
2020-03-23 14:03:18 +00:00
|
|
|
)
|
|
|
|
.map_err(|err| format_err!("error extracting archive - {}", err))?;
|
2019-07-05 10:14:50 +00:00
|
|
|
} else {
|
2019-10-05 09:48:51 +00:00
|
|
|
let mut writer = std::fs::OpenOptions::new()
|
|
|
|
.write(true)
|
|
|
|
.open("/dev/stdout")
|
|
|
|
.map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?;
|
2019-07-05 07:19:56 +00:00
|
|
|
|
2019-07-05 10:14:50 +00:00
|
|
|
std::io::copy(&mut reader, &mut writer)
|
|
|
|
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
|
|
|
}
|
2020-06-02 16:41:31 +00:00
|
|
|
} else if archive_type == ArchiveType::FixedIndex {
|
2019-07-05 07:19:56 +00:00
|
|
|
|
2020-06-02 16:41:31 +00:00
|
|
|
let index = client.download_fixed_index(&manifest, &archive_name).await?;
|
2019-09-03 11:12:16 +00:00
|
|
|
|
2019-10-05 09:48:51 +00:00
|
|
|
let mut writer = if let Some(target) = target {
|
|
|
|
std::fs::OpenOptions::new()
|
2019-07-05 10:14:50 +00:00
|
|
|
.write(true)
|
|
|
|
.create(true)
|
|
|
|
.create_new(true)
|
|
|
|
.open(target)
|
2019-10-05 09:48:51 +00:00
|
|
|
.map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?
|
2019-07-05 10:14:50 +00:00
|
|
|
} else {
|
2019-10-05 09:48:51 +00:00
|
|
|
std::fs::OpenOptions::new()
|
|
|
|
.write(true)
|
|
|
|
.open("/dev/stdout")
|
|
|
|
.map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
|
|
|
|
};
|
2019-07-05 07:19:56 +00:00
|
|
|
|
2020-08-10 11:25:07 +00:00
|
|
|
dump_image(client.clone(), crypt_config.clone(), file_info.chunk_crypt_mode(), index, &mut writer, verbose).await?;
|
2019-06-25 11:00:37 +00:00
|
|
|
}
|
2019-06-26 07:18:59 +00:00
|
|
|
|
|
|
|
Ok(Value::Null)
|
2019-06-25 09:17:24 +00:00
|
|
|
}
|
|
|
|
|
2020-01-31 09:31:00 +00:00
|
|
|
const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
|
|
|
|
&ApiHandler::Async(&prune),
|
|
|
|
&ObjectSchema::new(
|
|
|
|
"Prune a backup repository.",
|
|
|
|
&proxmox_backup::add_common_prune_prameters!([
|
|
|
|
("dry-run", true, &BooleanSchema::new(
|
|
|
|
"Just show what prune would do, but do not delete anything.")
|
|
|
|
.schema()),
|
|
|
|
("group", false, &StringSchema::new("Backup group.").schema()),
|
|
|
|
], [
|
|
|
|
("output-format", true, &OUTPUT_FORMAT),
|
2020-06-03 08:11:37 +00:00
|
|
|
(
|
|
|
|
"quiet",
|
|
|
|
true,
|
|
|
|
&BooleanSchema::new("Minimal output - only show removals.")
|
|
|
|
.schema()
|
|
|
|
),
|
2020-01-31 09:31:00 +00:00
|
|
|
("repository", true, &REPO_URL_SCHEMA),
|
|
|
|
])
|
|
|
|
)
|
|
|
|
);
|
|
|
|
|
|
|
|
fn prune<'a>(
|
|
|
|
param: Value,
|
|
|
|
_info: &ApiMethod,
|
|
|
|
_rpcenv: &'a mut dyn RpcEnvironment,
|
|
|
|
) -> proxmox::api::ApiFuture<'a> {
|
|
|
|
async move {
|
|
|
|
prune_async(param).await
|
|
|
|
}.boxed()
|
|
|
|
}
|
2019-02-27 15:53:17 +00:00
|
|
|
|
2020-01-31 09:31:00 +00:00
|
|
|
async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
2019-07-16 05:30:04 +00:00
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
2019-02-27 15:53:17 +00:00
|
|
|
|
2020-11-10 10:54:50 +00:00
|
|
|
let mut client = connect(&repo)?;
|
2019-02-27 15:53:17 +00:00
|
|
|
|
2019-03-13 08:47:12 +00:00
|
|
|
let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
|
2019-02-27 15:53:17 +00:00
|
|
|
|
2019-07-27 06:49:14 +00:00
|
|
|
let group = tools::required_string_param(¶m, "group")?;
|
2020-06-23 06:16:56 +00:00
|
|
|
let group: BackupGroup = group.parse()?;
|
2020-02-27 11:41:15 +00:00
|
|
|
|
|
|
|
let output_format = get_output_format(¶m);
|
2019-07-27 06:49:14 +00:00
|
|
|
|
2020-06-03 08:11:37 +00:00
|
|
|
let quiet = param["quiet"].as_bool().unwrap_or(false);
|
|
|
|
|
2019-07-27 07:24:23 +00:00
|
|
|
param.as_object_mut().unwrap().remove("repository");
|
|
|
|
param.as_object_mut().unwrap().remove("group");
|
2019-12-07 15:11:26 +00:00
|
|
|
param.as_object_mut().unwrap().remove("output-format");
|
2020-06-03 08:11:37 +00:00
|
|
|
param.as_object_mut().unwrap().remove("quiet");
|
2019-07-27 07:24:23 +00:00
|
|
|
|
|
|
|
param["backup-type"] = group.backup_type().into();
|
|
|
|
param["backup-id"] = group.backup_id().into();
|
2019-02-27 15:53:17 +00:00
|
|
|
|
2020-05-05 04:45:37 +00:00
|
|
|
let mut result = client.post(&path, Some(param)).await?;
|
2019-12-06 12:35:25 +00:00
|
|
|
|
2019-12-16 10:00:43 +00:00
|
|
|
record_repository(&repo);
|
2019-12-08 09:59:47 +00:00
|
|
|
|
2020-05-05 04:45:37 +00:00
|
|
|
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
|
|
|
let item: PruneListItem = serde_json::from_value(record.to_owned())?;
|
2020-09-11 12:34:38 +00:00
|
|
|
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
2020-05-05 04:45:37 +00:00
|
|
|
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
|
|
|
};
|
|
|
|
|
2020-06-03 08:11:37 +00:00
|
|
|
let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> {
|
|
|
|
Ok(match v.as_bool() {
|
|
|
|
Some(true) => "keep",
|
|
|
|
Some(false) => "remove",
|
|
|
|
None => "unknown",
|
|
|
|
}.to_string())
|
|
|
|
};
|
|
|
|
|
2020-05-05 04:45:37 +00:00
|
|
|
let options = default_table_format_options()
|
|
|
|
.sortby("backup-type", false)
|
|
|
|
.sortby("backup-id", false)
|
|
|
|
.sortby("backup-time", false)
|
|
|
|
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
|
2020-05-05 05:33:58 +00:00
|
|
|
.column(ColumnConfig::new("backup-time").renderer(tools::format::render_epoch).header("date"))
|
2020-06-03 08:11:37 +00:00
|
|
|
.column(ColumnConfig::new("keep").renderer(render_prune_action).header("action"))
|
2020-05-05 04:45:37 +00:00
|
|
|
;
|
|
|
|
|
2020-12-18 11:26:07 +00:00
|
|
|
let return_type = &proxmox_backup::api2::admin::datastore::API_METHOD_PRUNE.returns;
|
2020-05-05 04:45:37 +00:00
|
|
|
|
|
|
|
let mut data = result["data"].take();
|
|
|
|
|
2020-06-03 08:11:37 +00:00
|
|
|
if quiet {
|
|
|
|
let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| {
|
|
|
|
item["keep"].as_bool() == Some(false)
|
2021-01-19 10:30:30 +00:00
|
|
|
}).cloned().collect();
|
2020-06-03 08:11:37 +00:00
|
|
|
data = list.into();
|
|
|
|
}
|
|
|
|
|
2020-12-18 11:26:07 +00:00
|
|
|
format_and_print_result_full(&mut data, return_type, &output_format, &options);
|
2019-03-13 08:47:12 +00:00
|
|
|
|
2019-07-31 08:15:16 +00:00
|
|
|
Ok(Value::Null)
|
2019-02-27 15:53:17 +00:00
|
|
|
}
|
|
|
|
|
2019-12-16 12:34:49 +00:00
|
|
|
#[api(
|
|
|
|
input: {
|
|
|
|
properties: {
|
|
|
|
repository: {
|
|
|
|
schema: REPO_URL_SCHEMA,
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
"output-format": {
|
|
|
|
schema: OUTPUT_FORMAT,
|
|
|
|
optional: true,
|
|
|
|
},
|
|
|
|
}
|
2020-10-28 21:59:39 +00:00
|
|
|
},
|
|
|
|
returns: {
|
|
|
|
type: StorageStatus,
|
|
|
|
},
|
2019-12-16 12:34:49 +00:00
|
|
|
)]
|
|
|
|
/// Get repository status.
|
|
|
|
async fn status(param: Value) -> Result<Value, Error> {
|
2019-07-16 11:35:25 +00:00
|
|
|
|
|
|
|
let repo = extract_repository_from_value(¶m)?;
|
|
|
|
|
2020-02-27 11:41:15 +00:00
|
|
|
let output_format = get_output_format(¶m);
|
2019-07-16 11:35:25 +00:00
|
|
|
|
2020-11-10 10:54:50 +00:00
|
|
|
let client = connect(&repo)?;
|
2019-07-16 11:35:25 +00:00
|
|
|
|
|
|
|
let path = format!("api2/json/admin/datastore/{}/status", repo.store());
|
|
|
|
|
2020-01-23 11:42:40 +00:00
|
|
|
let mut result = client.get(&path, None).await?;
|
2020-10-29 10:51:26 +00:00
|
|
|
let mut data = result["data"].take();
|
2019-07-16 11:35:25 +00:00
|
|
|
|
|
|
|
record_repository(&repo);
|
|
|
|
|
2020-02-26 12:29:00 +00:00
|
|
|
let render_total_percentage = |v: &Value, record: &Value| -> Result<String, Error> {
|
|
|
|
let v = v.as_u64().unwrap();
|
|
|
|
let total = record["total"].as_u64().unwrap();
|
|
|
|
let roundup = total/200;
|
|
|
|
let per = ((v+roundup)*100)/total;
|
2020-02-27 10:38:09 +00:00
|
|
|
let info = format!(" ({} %)", per);
|
|
|
|
Ok(format!("{} {:>8}", v, info))
|
2020-02-26 12:29:00 +00:00
|
|
|
};
|
2020-01-23 11:42:40 +00:00
|
|
|
|
2020-02-27 11:41:15 +00:00
|
|
|
let options = default_table_format_options()
|
2020-02-27 10:31:05 +00:00
|
|
|
.noheader(true)
|
2020-02-27 10:38:09 +00:00
|
|
|
.column(ColumnConfig::new("total").renderer(render_total_percentage))
|
2020-02-26 12:29:00 +00:00
|
|
|
.column(ColumnConfig::new("used").renderer(render_total_percentage))
|
|
|
|
.column(ColumnConfig::new("avail").renderer(render_total_percentage));
|
2019-07-16 11:35:25 +00:00
|
|
|
|
2020-12-18 11:26:07 +00:00
|
|
|
let return_type = &API_METHOD_STATUS.returns;
|
2020-02-26 12:29:00 +00:00
|
|
|
|
2020-12-18 11:26:07 +00:00
|
|
|
format_and_print_result_full(&mut data, return_type, &output_format, &options);
|
2019-07-16 11:35:25 +00:00
|
|
|
|
|
|
|
Ok(Value::Null)
|
|
|
|
}
|
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
// like get, but simply ignore errors and return Null instead
|
2019-08-28 15:20:32 +00:00
|
|
|
async fn try_get(repo: &BackupRepository, url: &str) -> Value {
|
2019-03-13 10:56:37 +00:00
|
|
|
|
2020-02-11 10:01:57 +00:00
|
|
|
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
|
2020-02-11 10:10:13 +00:00
|
|
|
let password = std::env::var(ENV_VAR_PBS_PASSWORD).ok();
|
2020-02-11 10:01:57 +00:00
|
|
|
|
2021-01-25 13:42:57 +00:00
|
|
|
// ticket cache, but no questions asked
|
|
|
|
let options = HttpClientOptions::new_interactive(password, fingerprint)
|
|
|
|
.interactive(false);
|
2020-01-25 11:18:00 +00:00
|
|
|
|
2020-10-08 13:19:39 +00:00
|
|
|
let client = match HttpClient::new(repo.host(), repo.port(), repo.auth_id(), options) {
|
2019-04-30 09:44:35 +00:00
|
|
|
Ok(v) => v,
|
|
|
|
_ => return Value::Null,
|
|
|
|
};
|
2019-03-13 12:31:29 +00:00
|
|
|
|
2019-08-28 15:20:32 +00:00
|
|
|
let mut resp = match client.get(url, None).await {
|
2019-03-13 12:31:29 +00:00
|
|
|
Ok(v) => v,
|
|
|
|
_ => return Value::Null,
|
|
|
|
};
|
|
|
|
|
|
|
|
if let Some(map) = resp.as_object_mut() {
|
|
|
|
if let Some(data) = map.remove("data") {
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Value::Null
|
|
|
|
}
|
|
|
|
|
|
|
|
fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
2020-01-21 10:34:45 +00:00
|
|
|
proxmox_backup::tools::runtime::main(async { complete_backup_group_do(param).await })
|
2019-08-28 15:20:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
|
2019-03-13 10:56:37 +00:00
|
|
|
|
2019-03-13 12:31:29 +00:00
|
|
|
let mut result = vec![];
|
|
|
|
|
2019-07-16 05:30:04 +00:00
|
|
|
let repo = match extract_repository_from_map(param) {
|
2019-03-13 12:31:29 +00:00
|
|
|
Some(v) => v,
|
2019-03-13 10:56:37 +00:00
|
|
|
_ => return result,
|
|
|
|
};
|
|
|
|
|
2019-03-13 12:31:29 +00:00
|
|
|
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
|
|
|
|
|
2019-08-28 15:20:32 +00:00
|
|
|
let data = try_get(&repo, &path).await;
|
2019-03-13 12:31:29 +00:00
|
|
|
|
|
|
|
if let Some(list) = data.as_array() {
|
2019-03-13 10:56:37 +00:00
|
|
|
for item in list {
|
2019-03-13 11:17:39 +00:00
|
|
|
if let (Some(backup_id), Some(backup_type)) =
|
|
|
|
(item["backup-id"].as_str(), item["backup-type"].as_str())
|
|
|
|
{
|
|
|
|
result.push(format!("{}/{}", backup_type, backup_id));
|
2019-03-13 10:56:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
result
|
|
|
|
}
|
|
|
|
|
2020-07-02 15:49:08 +00:00
|
|
|
pub fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
2020-01-21 10:34:45 +00:00
|
|
|
proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
|
2019-08-28 15:20:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
2019-03-13 12:31:29 +00:00
|
|
|
|
|
|
|
if arg.matches('/').count() < 2 {
|
2019-08-28 15:20:32 +00:00
|
|
|
let groups = complete_backup_group_do(param).await;
|
2019-08-09 08:08:45 +00:00
|
|
|
let mut result = vec![];
|
2019-03-13 12:31:29 +00:00
|
|
|
for group in groups {
|
|
|
|
result.push(group.to_string());
|
|
|
|
result.push(format!("{}/", group));
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-08-28 15:20:32 +00:00
|
|
|
complete_backup_snapshot_do(param).await
|
2019-08-09 08:08:45 +00:00
|
|
|
}
|
2019-03-13 12:31:29 +00:00
|
|
|
|
2019-08-09 08:22:32 +00:00
|
|
|
fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
2020-01-21 10:34:45 +00:00
|
|
|
proxmox_backup::tools::runtime::main(async { complete_backup_snapshot_do(param).await })
|
2019-08-28 15:20:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
|
2019-08-09 08:08:45 +00:00
|
|
|
|
|
|
|
let mut result = vec![];
|
|
|
|
|
|
|
|
let repo = match extract_repository_from_map(param) {
|
|
|
|
Some(v) => v,
|
|
|
|
_ => return result,
|
|
|
|
};
|
|
|
|
|
|
|
|
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
|
2019-03-13 12:31:29 +00:00
|
|
|
|
2019-08-28 15:20:32 +00:00
|
|
|
let data = try_get(&repo, &path).await;
|
2019-03-13 12:31:29 +00:00
|
|
|
|
|
|
|
if let Some(list) = data.as_array() {
|
|
|
|
for item in list {
|
|
|
|
if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
|
|
|
|
(item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
|
|
|
|
{
|
2020-09-11 12:34:38 +00:00
|
|
|
if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
|
|
|
|
result.push(snapshot.relative_path().to_str().unwrap().to_owned());
|
|
|
|
}
|
2019-03-13 12:31:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
result
|
|
|
|
}
|
|
|
|
|
2019-06-25 09:17:24 +00:00
|
|
|
fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
2020-01-21 10:34:45 +00:00
|
|
|
proxmox_backup::tools::runtime::main(async { complete_server_file_name_do(param).await })
|
2019-08-28 15:20:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
|
2019-03-14 07:09:35 +00:00
|
|
|
|
|
|
|
let mut result = vec![];
|
|
|
|
|
2019-07-16 05:30:04 +00:00
|
|
|
let repo = match extract_repository_from_map(param) {
|
2019-03-14 07:09:35 +00:00
|
|
|
Some(v) => v,
|
|
|
|
_ => return result,
|
|
|
|
};
|
|
|
|
|
2020-06-23 06:09:52 +00:00
|
|
|
let snapshot: BackupDir = match param.get("snapshot") {
|
2019-03-14 07:09:35 +00:00
|
|
|
Some(path) => {
|
2020-06-23 06:09:52 +00:00
|
|
|
match path.parse() {
|
2019-03-14 07:09:35 +00:00
|
|
|
Ok(v) => v,
|
|
|
|
_ => return result,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => return result,
|
|
|
|
};
|
|
|
|
|
|
|
|
let query = tools::json_object_to_query(json!({
|
|
|
|
"backup-type": snapshot.group().backup_type(),
|
|
|
|
"backup-id": snapshot.group().backup_id(),
|
2020-09-12 13:10:47 +00:00
|
|
|
"backup-time": snapshot.backup_time(),
|
2019-03-14 07:09:35 +00:00
|
|
|
})).unwrap();
|
|
|
|
|
|
|
|
let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
|
|
|
|
|
2019-08-28 15:20:32 +00:00
|
|
|
let data = try_get(&repo, &path).await;
|
2019-03-14 07:09:35 +00:00
|
|
|
|
|
|
|
if let Some(list) = data.as_array() {
|
|
|
|
for item in list {
|
2019-08-07 04:48:18 +00:00
|
|
|
if let Some(filename) = item["filename"].as_str() {
|
2019-03-14 07:09:35 +00:00
|
|
|
result.push(filename.to_owned());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-25 09:17:24 +00:00
|
|
|
result
|
|
|
|
}
|
|
|
|
|
|
|
|
fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
2019-07-31 09:23:53 +00:00
|
|
|
complete_server_file_name(arg, param)
|
2019-08-28 15:20:32 +00:00
|
|
|
.iter()
|
2020-10-07 11:53:02 +00:00
|
|
|
.map(|v| tools::format::strip_server_file_extension(&v))
|
2019-08-28 15:20:32 +00:00
|
|
|
.collect()
|
2019-03-14 07:09:35 +00:00
|
|
|
}
|
|
|
|
|
2020-07-02 15:49:08 +00:00
|
|
|
pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
2019-12-09 12:14:32 +00:00
|
|
|
complete_server_file_name(arg, param)
|
|
|
|
.iter()
|
2020-10-06 07:17:58 +00:00
|
|
|
.filter_map(|name| {
|
|
|
|
if name.ends_with(".pxar.didx") {
|
2020-10-07 11:53:02 +00:00
|
|
|
Some(tools::format::strip_server_file_extension(name))
|
2020-10-06 07:17:58 +00:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn complete_img_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
|
|
|
complete_server_file_name(arg, param)
|
|
|
|
.iter()
|
|
|
|
.filter_map(|name| {
|
|
|
|
if name.ends_with(".img.fidx") {
|
2020-10-07 11:53:02 +00:00
|
|
|
Some(tools::format::strip_server_file_extension(name))
|
2019-12-09 12:14:32 +00:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2019-03-13 11:26:01 +00:00
|
|
|
fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
|
|
|
|
|
|
|
let mut result = vec![];
|
|
|
|
|
|
|
|
let mut size = 64;
|
|
|
|
loop {
|
|
|
|
result.push(size.to_string());
|
2019-10-25 16:04:37 +00:00
|
|
|
size *= 2;
|
2019-03-13 11:26:01 +00:00
|
|
|
if size > 4096 { break; }
|
|
|
|
}
|
|
|
|
|
|
|
|
result
|
|
|
|
}
|
|
|
|
|
2020-11-06 13:44:44 +00:00
|
|
|
fn complete_auth_id(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
|
|
|
proxmox_backup::tools::runtime::main(async { complete_auth_id_do(param).await })
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn complete_auth_id_do(param: &HashMap<String, String>) -> Vec<String> {
|
|
|
|
|
|
|
|
let mut result = vec![];
|
|
|
|
|
|
|
|
let repo = match extract_repository_from_map(param) {
|
|
|
|
Some(v) => v,
|
|
|
|
_ => return result,
|
|
|
|
};
|
|
|
|
|
|
|
|
let data = try_get(&repo, "api2/json/access/users?include_tokens=true").await;
|
|
|
|
|
|
|
|
if let Ok(parsed) = serde_json::from_value::<Vec<UserWithTokens>>(data) {
|
|
|
|
for user in parsed {
|
|
|
|
result.push(user.userid.to_string());
|
|
|
|
for token in user.tokens {
|
|
|
|
result.push(token.tokenid.to_string());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
result
|
|
|
|
}
|
|
|
|
|
2020-03-23 14:03:18 +00:00
|
|
|
use proxmox_backup::client::RemoteChunkReader;
|
|
|
|
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
|
|
|
/// async use!
|
|
|
|
///
|
|
|
|
/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
|
|
|
|
/// so that we can properly access it from multiple threads simultaneously while not issuing
|
|
|
|
/// duplicate simultaneous reads over http.
|
2020-07-02 15:49:08 +00:00
|
|
|
pub struct BufferedDynamicReadAt {
|
2020-03-23 14:03:18 +00:00
|
|
|
inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl BufferedDynamicReadAt {
|
|
|
|
fn new(inner: BufferedDynamicReader<RemoteChunkReader>) -> Self {
|
|
|
|
Self {
|
|
|
|
inner: Mutex::new(inner),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-24 09:57:12 +00:00
|
|
|
impl ReadAt for BufferedDynamicReadAt {
|
|
|
|
fn start_read_at<'a>(
|
|
|
|
self: Pin<&'a Self>,
|
2020-03-23 14:03:18 +00:00
|
|
|
_cx: &mut Context,
|
2020-06-24 09:57:12 +00:00
|
|
|
buf: &'a mut [u8],
|
2020-03-23 14:03:18 +00:00
|
|
|
offset: u64,
|
2020-06-24 09:57:12 +00:00
|
|
|
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
|
|
|
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
2020-03-23 14:03:18 +00:00
|
|
|
let mut reader = self.inner.lock().unwrap();
|
|
|
|
reader.seek(SeekFrom::Start(offset))?;
|
2020-06-24 09:57:12 +00:00
|
|
|
Ok(reader.read(buf)?)
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn poll_complete<'a>(
|
|
|
|
self: Pin<&'a Self>,
|
|
|
|
_op: ReadAtOperation<'a>,
|
|
|
|
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
|
|
|
panic!("LocalDynamicReadAt::start_read_at returned Pending");
|
2020-03-23 14:03:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-17 08:33:24 +00:00
|
|
|
fn main() {
|
2019-02-13 11:30:52 +00:00
|
|
|
|
2019-11-21 08:36:41 +00:00
|
|
|
let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
|
2019-11-24 10:00:53 +00:00
|
|
|
.arg_param(&["backupspec"])
|
2019-03-13 08:47:12 +00:00
|
|
|
.completion_cb("repository", complete_repository)
|
2019-03-13 11:26:01 +00:00
|
|
|
.completion_cb("backupspec", complete_backup_source)
|
2019-06-19 15:16:41 +00:00
|
|
|
.completion_cb("keyfile", tools::complete_file_name)
|
2021-02-05 15:35:32 +00:00
|
|
|
.completion_cb("master-pubkey-file", tools::complete_file_name)
|
2019-03-13 11:26:01 +00:00
|
|
|
.completion_cb("chunk-size", complete_chunk_size);
|
2018-12-15 10:24:39 +00:00
|
|
|
|
2020-07-02 12:00:32 +00:00
|
|
|
let benchmark_cmd_def = CliCommand::new(&API_METHOD_BENCHMARK)
|
|
|
|
.completion_cb("repository", complete_repository)
|
|
|
|
.completion_cb("keyfile", tools::complete_file_name);
|
|
|
|
|
2019-11-21 08:36:41 +00:00
|
|
|
let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
|
2019-03-13 08:47:12 +00:00
|
|
|
.completion_cb("repository", complete_repository);
|
2019-01-21 17:58:14 +00:00
|
|
|
|
2019-11-21 08:36:41 +00:00
|
|
|
let garbage_collect_cmd_def = CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
|
2019-03-13 08:47:12 +00:00
|
|
|
.completion_cb("repository", complete_repository);
|
2019-02-20 13:10:45 +00:00
|
|
|
|
2019-11-21 08:36:41 +00:00
|
|
|
let restore_cmd_def = CliCommand::new(&API_METHOD_RESTORE)
|
2019-11-24 10:00:53 +00:00
|
|
|
.arg_param(&["snapshot", "archive-name", "target"])
|
2019-03-13 12:31:29 +00:00
|
|
|
.completion_cb("repository", complete_repository)
|
2019-03-14 07:09:35 +00:00
|
|
|
.completion_cb("snapshot", complete_group_or_snapshot)
|
|
|
|
.completion_cb("archive-name", complete_archive_name)
|
|
|
|
.completion_cb("target", tools::complete_file_name);
|
2019-03-06 09:50:46 +00:00
|
|
|
|
2019-11-21 08:36:41 +00:00
|
|
|
let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
|
2019-11-24 10:00:53 +00:00
|
|
|
.arg_param(&["group"])
|
2019-07-27 06:49:14 +00:00
|
|
|
.completion_cb("group", complete_backup_group)
|
2019-03-13 08:47:12 +00:00
|
|
|
.completion_cb("repository", complete_repository);
|
2019-03-06 09:50:46 +00:00
|
|
|
|
2019-11-21 08:36:41 +00:00
|
|
|
let status_cmd_def = CliCommand::new(&API_METHOD_STATUS)
|
2019-07-16 11:35:25 +00:00
|
|
|
.completion_cb("repository", complete_repository);
|
|
|
|
|
2019-11-21 08:36:41 +00:00
|
|
|
let login_cmd_def = CliCommand::new(&API_METHOD_API_LOGIN)
|
2019-08-10 07:12:17 +00:00
|
|
|
.completion_cb("repository", complete_repository);
|
|
|
|
|
2019-11-21 08:36:41 +00:00
|
|
|
let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
|
2019-08-10 07:12:17 +00:00
|
|
|
.completion_cb("repository", complete_repository);
|
2019-09-20 06:12:31 +00:00
|
|
|
|
2020-07-10 07:34:07 +00:00
|
|
|
let version_cmd_def = CliCommand::new(&API_METHOD_API_VERSION)
|
|
|
|
.completion_cb("repository", complete_repository);
|
|
|
|
|
2020-10-13 08:58:41 +00:00
|
|
|
let change_owner_cmd_def = CliCommand::new(&API_METHOD_CHANGE_BACKUP_OWNER)
|
|
|
|
.arg_param(&["group", "new-owner"])
|
|
|
|
.completion_cb("group", complete_backup_group)
|
2020-11-06 13:44:44 +00:00
|
|
|
.completion_cb("new-owner", complete_auth_id)
|
2020-10-13 08:58:41 +00:00
|
|
|
.completion_cb("repository", complete_repository);
|
|
|
|
|
2019-01-21 17:58:14 +00:00
|
|
|
let cmd_def = CliCommandMap::new()
|
2019-12-09 16:40:34 +00:00
|
|
|
.insert("backup", backup_cmd_def)
|
|
|
|
.insert("garbage-collect", garbage_collect_cmd_def)
|
|
|
|
.insert("list", list_cmd_def)
|
|
|
|
.insert("login", login_cmd_def)
|
|
|
|
.insert("logout", logout_cmd_def)
|
|
|
|
.insert("prune", prune_cmd_def)
|
|
|
|
.insert("restore", restore_cmd_def)
|
2020-11-24 09:09:29 +00:00
|
|
|
.insert("snapshot", snapshot_mgtm_cli())
|
2019-12-09 16:40:34 +00:00
|
|
|
.insert("status", status_cmd_def)
|
2020-07-06 09:39:24 +00:00
|
|
|
.insert("key", key::cli())
|
2020-07-02 15:49:08 +00:00
|
|
|
.insert("mount", mount_cmd_def())
|
client: implement map/unmap commands for .img backups
Allows mapping fixed-index .img files (usually from VM backups) to be
mapped to a local loopback device.
The architecture uses a FUSE-backed temp file mapped to a loopdev:
/dev/loopX -> FUSE /run/pbs-loopdev/xxx -> backup client -> PBS
Since unmapping requires some cleanup (unmap the loopdev, stop FUSE,
remove the temp files) a special 'unmap' command is added, which uses a
PID file to send SIGINT to the backup-client instance started with
'map', which will handle the cleanup itself.
The polling with select! in mount.rs needs to be split in two, since we
have a chicken and egg problem between running FUSE and setting up the
loop device - so we need to do them concurrently, until the loopdev is
assigned, at which point we can report success and daemonize, and then
continue polling the FUSE loop future.
A loopdev module is added to tools containing all required functions for
mapping a loop device to the FUSE file, with the ioctls moved into an
inline module to avoid exposing them directly.
The client code is placed in the 'mount' module, which, while
admittedly a loose fit, allows reuse of the daemonizing code.
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-10-05 08:57:58 +00:00
|
|
|
.insert("map", map_cmd_def())
|
|
|
|
.insert("unmap", unmap_cmd_def())
|
2019-12-10 12:43:53 +00:00
|
|
|
.insert("catalog", catalog_mgmt_cli())
|
2020-07-02 12:00:32 +00:00
|
|
|
.insert("task", task_mgmt_cli())
|
2020-07-10 07:34:07 +00:00
|
|
|
.insert("version", version_cmd_def)
|
2020-10-13 08:58:41 +00:00
|
|
|
.insert("benchmark", benchmark_cmd_def)
|
2020-11-24 12:01:06 +00:00
|
|
|
.insert("change-owner", change_owner_cmd_def)
|
|
|
|
|
2020-11-25 05:47:18 +00:00
|
|
|
.alias(&["files"], &["snapshot", "files"])
|
2020-11-25 05:51:23 +00:00
|
|
|
.alias(&["forget"], &["snapshot", "forget"])
|
2020-11-25 06:06:55 +00:00
|
|
|
.alias(&["upload-log"], &["snapshot", "upload-log"])
|
2020-11-24 12:01:06 +00:00
|
|
|
.alias(&["snapshots"], &["snapshot", "list"])
|
|
|
|
;
|
2019-12-09 16:40:34 +00:00
|
|
|
|
2020-05-06 05:51:05 +00:00
|
|
|
let rpcenv = CliEnvironment::new();
|
|
|
|
run_cli_command(cmd_def, rpcenv, Some(|future| {
|
2020-01-27 17:08:43 +00:00
|
|
|
proxmox_backup::tools::runtime::main(future)
|
|
|
|
}));
|
2018-12-14 07:28:56 +00:00
|
|
|
}
|