use const api definitions
This commit is contained in:
@ -66,31 +66,57 @@ fn create_ticket(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.subdir(
|
||||
"ticket",
|
||||
Router::new()
|
||||
.post(
|
||||
ApiMethod::new(
|
||||
create_ticket,
|
||||
ObjectSchema::new("Create or verify authentication ticket.")
|
||||
.required(
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"ticket", &Router::new()
|
||||
.post(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&create_ticket),
|
||||
&ObjectSchema::new(
|
||||
"Create or verify authentication ticket.",
|
||||
&[
|
||||
(
|
||||
"username",
|
||||
StringSchema::new("User name.")
|
||||
false,
|
||||
&StringSchema::new("User name.")
|
||||
.max_length(64)
|
||||
)
|
||||
.required(
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"password",
|
||||
StringSchema::new("The secret password. This can also be a valid ticket.")
|
||||
)
|
||||
).returns(
|
||||
ObjectSchema::new("Returns authentication ticket with additional infos.")
|
||||
.required("username", StringSchema::new("User name."))
|
||||
.required("ticket", StringSchema::new("Auth ticket."))
|
||||
.required("CSRFPreventionToken", StringSchema::new("Cross Site Request Forgery Prevention Token."))
|
||||
).protected(true)
|
||||
)
|
||||
)
|
||||
.list_subdirs()
|
||||
}
|
||||
false,
|
||||
&StringSchema::new("The secret password. This can also be a valid ticket.")
|
||||
.schema()
|
||||
),
|
||||
],
|
||||
)
|
||||
).returns(
|
||||
&ObjectSchema::new(
|
||||
"Returns authentication ticket with additional infos.",
|
||||
&[
|
||||
(
|
||||
"username",
|
||||
false,
|
||||
&StringSchema::new("User name.").schema()
|
||||
),
|
||||
(
|
||||
"ticket",
|
||||
false,
|
||||
&StringSchema::new("Auth ticket.").schema()
|
||||
),
|
||||
(
|
||||
"CSRFPreventionToken",
|
||||
false,
|
||||
&StringSchema::new("Cross Site Request Forgery Prevention Token.")
|
||||
.schema()
|
||||
),
|
||||
],
|
||||
).schema()
|
||||
).protected(true)
|
||||
)
|
||||
)
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
||||
|
@ -2,8 +2,10 @@ use crate::api_schema::router::*;
|
||||
|
||||
pub mod datastore;
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.subdir("datastore", datastore::router())
|
||||
.list_subdirs()
|
||||
}
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("datastore", &datastore::ROUTER)
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
||||
|
@ -10,7 +10,6 @@ use serde_json::{json, Value};
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use chrono::{DateTime, Datelike, TimeZone, Local};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use proxmox::tools::{try_block, fs::file_get_contents, fs::file_set_contents};
|
||||
|
||||
@ -237,19 +236,61 @@ fn status(
|
||||
}))
|
||||
}
|
||||
|
||||
fn api_method_status() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
status,
|
||||
add_common_prune_prameters(
|
||||
ObjectSchema::new("Get datastore status.")
|
||||
.required(
|
||||
"store",
|
||||
StringSchema::new("Datastore name.")
|
||||
)
|
||||
)
|
||||
)
|
||||
#[macro_export]
|
||||
macro_rules! add_common_prune_prameters {
|
||||
($( $list:tt )*) => {
|
||||
[
|
||||
(
|
||||
"keep-last",
|
||||
true,
|
||||
&IntegerSchema::new("Number of backups to keep.")
|
||||
.minimum(1)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"keep-daily",
|
||||
true,
|
||||
&IntegerSchema::new("Number of daily backups to keep.")
|
||||
.minimum(1)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"keep-weekly",
|
||||
true,
|
||||
&IntegerSchema::new("Number of weekly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"keep-monthly",
|
||||
true,
|
||||
&IntegerSchema::new("Number of monthly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"keep-yearly",
|
||||
true,
|
||||
&IntegerSchema::new("Number of yearly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema()
|
||||
),
|
||||
$( $list )*
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
const API_METHOD_STATUS: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&status),
|
||||
&ObjectSchema::new(
|
||||
"Get datastore status.",
|
||||
&add_common_prune_prameters!(
|
||||
("store", false, &StringSchema::new("Datastore name.").schema()),
|
||||
),
|
||||
)
|
||||
);
|
||||
|
||||
|
||||
fn prune(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
@ -341,50 +382,17 @@ fn prune(
|
||||
Ok(json!(null))
|
||||
}
|
||||
|
||||
pub fn add_common_prune_prameters(schema: ObjectSchema) -> ObjectSchema {
|
||||
|
||||
schema
|
||||
.optional(
|
||||
"keep-last",
|
||||
IntegerSchema::new("Number of backups to keep.")
|
||||
.minimum(1)
|
||||
)
|
||||
.optional(
|
||||
"keep-daily",
|
||||
IntegerSchema::new("Number of daily backups to keep.")
|
||||
.minimum(1)
|
||||
)
|
||||
.optional(
|
||||
"keep-weekly",
|
||||
IntegerSchema::new("Number of weekly backups to keep.")
|
||||
.minimum(1)
|
||||
)
|
||||
.optional(
|
||||
"keep-monthly",
|
||||
IntegerSchema::new("Number of monthly backups to keep.")
|
||||
.minimum(1)
|
||||
)
|
||||
.optional(
|
||||
"keep-yearly",
|
||||
IntegerSchema::new("Number of yearly backups to keep.")
|
||||
.minimum(1)
|
||||
)
|
||||
}
|
||||
|
||||
fn api_method_prune() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
prune,
|
||||
add_common_prune_prameters(
|
||||
ObjectSchema::new("Prune the datastore.")
|
||||
.required(
|
||||
"store",
|
||||
StringSchema::new("Datastore name.")
|
||||
)
|
||||
.required("backup-type", BACKUP_TYPE_SCHEMA.clone())
|
||||
.required("backup-id", BACKUP_ID_SCHEMA.clone())
|
||||
const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&prune),
|
||||
&ObjectSchema::new(
|
||||
"Prune the datastore.",
|
||||
&add_common_prune_prameters!(
|
||||
("store", false, &StringSchema::new("Datastore name.").schema()),
|
||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||
)
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn start_garbage_collection(
|
||||
param: Value,
|
||||
@ -410,13 +418,13 @@ fn start_garbage_collection(
|
||||
Ok(json!(upid_str))
|
||||
}
|
||||
|
||||
pub fn api_method_start_garbage_collection() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
start_garbage_collection,
|
||||
ObjectSchema::new("Start garbage collection.")
|
||||
.required("store", StringSchema::new("Datastore name."))
|
||||
pub const API_METHOD_START_GARBAGE_COLLECTION: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&start_garbage_collection),
|
||||
&ObjectSchema::new(
|
||||
"Start garbage collection.",
|
||||
&[ ("store", false, &StringSchema::new("Datastore name.").schema()) ]
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn garbage_collection_status(
|
||||
param: Value,
|
||||
@ -435,13 +443,13 @@ fn garbage_collection_status(
|
||||
Ok(serde_json::to_value(&status)?)
|
||||
}
|
||||
|
||||
pub fn api_method_garbage_collection_status() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
garbage_collection_status,
|
||||
ObjectSchema::new("Garbage collection status.")
|
||||
.required("store", StringSchema::new("Datastore name."))
|
||||
pub const API_METHOD_GARBAGE_COLLECTION_STATUS: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&garbage_collection_status),
|
||||
&ObjectSchema::new(
|
||||
"Garbage collection status.",
|
||||
&[ ("store", false, &StringSchema::new("Datastore name.").schema()) ]
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn get_datastore_list(
|
||||
_param: Value,
|
||||
@ -459,7 +467,7 @@ fn download_file(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
@ -500,23 +508,28 @@ fn download_file(
|
||||
Ok(Box::new(response_future))
|
||||
}
|
||||
|
||||
pub fn api_method_download_file() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
download_file,
|
||||
ObjectSchema::new("Download single raw file from backup snapshot.")
|
||||
.required("store", StringSchema::new("Datastore name."))
|
||||
.required("backup-type", BACKUP_TYPE_SCHEMA.clone())
|
||||
.required("backup-id", BACKUP_ID_SCHEMA.clone())
|
||||
.required("backup-time", BACKUP_TIME_SCHEMA.clone())
|
||||
.required("file-name", StringSchema::new("Raw file name.").format(FILENAME_FORMAT.clone()))
|
||||
pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Async(&download_file),
|
||||
&ObjectSchema::new(
|
||||
"Download single raw file from backup snapshot.",
|
||||
&[
|
||||
("store", false, &StringSchema::new("Datastore name.").schema()),
|
||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||
("file-name", false, &StringSchema::new("Raw file name.")
|
||||
.format(&FILENAME_FORMAT)
|
||||
.schema()
|
||||
),
|
||||
],
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn upload_backup_log(
|
||||
_parts: Parts,
|
||||
req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
@ -565,98 +578,122 @@ fn upload_backup_log(
|
||||
Ok(Box::new(resp))
|
||||
}
|
||||
|
||||
pub fn api_method_upload_backup_log() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
upload_backup_log,
|
||||
ObjectSchema::new("Download single raw file from backup snapshot.")
|
||||
.required("store", StringSchema::new("Datastore name."))
|
||||
.required("backup-type", BACKUP_TYPE_SCHEMA.clone())
|
||||
.required("backup-id", BACKUP_ID_SCHEMA.clone())
|
||||
.required("backup-time", BACKUP_TIME_SCHEMA.clone())
|
||||
pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Async(&upload_backup_log),
|
||||
&ObjectSchema::new(
|
||||
"Download single raw file from backup snapshot.",
|
||||
&[
|
||||
("store", false, &StringSchema::new("Datastore name.").schema()),
|
||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||
],
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
pub fn router() -> Router {
|
||||
const STORE_SCHEMA: Schema = StringSchema::new("Datastore name.").schema();
|
||||
|
||||
let store_schema: Arc<Schema> = Arc::new(
|
||||
StringSchema::new("Datastore name.").into()
|
||||
);
|
||||
|
||||
let datastore_info = Router::new()
|
||||
.subdir(
|
||||
"download",
|
||||
Router::new()
|
||||
.download(api_method_download_file())
|
||||
)
|
||||
.subdir(
|
||||
"upload-backup-log",
|
||||
Router::new()
|
||||
.upload(api_method_upload_backup_log())
|
||||
)
|
||||
.subdir(
|
||||
"gc",
|
||||
Router::new()
|
||||
.get(api_method_garbage_collection_status())
|
||||
.post(api_method_start_garbage_collection()))
|
||||
.subdir(
|
||||
"files",
|
||||
Router::new()
|
||||
.get(
|
||||
ApiMethod::new(
|
||||
list_snapshot_files,
|
||||
ObjectSchema::new("List snapshot files.")
|
||||
.required("store", store_schema.clone())
|
||||
.required("backup-type", BACKUP_TYPE_SCHEMA.clone())
|
||||
.required("backup-id", BACKUP_ID_SCHEMA.clone())
|
||||
.required("backup-time", BACKUP_TIME_SCHEMA.clone())
|
||||
const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"download",
|
||||
&Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_FILE)
|
||||
),
|
||||
(
|
||||
"files",
|
||||
&Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&list_snapshot_files),
|
||||
&ObjectSchema::new(
|
||||
"List snapshot files.",
|
||||
&[
|
||||
("store", false, &STORE_SCHEMA),
|
||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||
],
|
||||
)
|
||||
)
|
||||
)
|
||||
.subdir(
|
||||
"groups",
|
||||
Router::new()
|
||||
.get(ApiMethod::new(
|
||||
list_groups,
|
||||
ObjectSchema::new("List backup groups.")
|
||||
.required("store", store_schema.clone()))))
|
||||
.subdir(
|
||||
"snapshots",
|
||||
Router::new()
|
||||
.get(
|
||||
ApiMethod::new(
|
||||
list_snapshots,
|
||||
ObjectSchema::new("List backup groups.")
|
||||
.required("store", store_schema.clone())
|
||||
.optional("backup-type", BACKUP_TYPE_SCHEMA.clone())
|
||||
.optional("backup-id", BACKUP_ID_SCHEMA.clone())
|
||||
)
|
||||
),
|
||||
(
|
||||
"gc",
|
||||
&Router::new()
|
||||
.get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
|
||||
.post(&API_METHOD_START_GARBAGE_COLLECTION)
|
||||
),
|
||||
(
|
||||
"groups",
|
||||
&Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&list_groups),
|
||||
&ObjectSchema::new(
|
||||
"List backup groups.",
|
||||
&[ ("store", false, &STORE_SCHEMA) ],
|
||||
)
|
||||
)
|
||||
.delete(
|
||||
ApiMethod::new(
|
||||
delete_snapshots,
|
||||
ObjectSchema::new("Delete backup snapshot.")
|
||||
.required("store", store_schema.clone())
|
||||
.required("backup-type", BACKUP_TYPE_SCHEMA.clone())
|
||||
.required("backup-id", BACKUP_ID_SCHEMA.clone())
|
||||
.required("backup-time", BACKUP_TIME_SCHEMA.clone())
|
||||
)
|
||||
)
|
||||
),
|
||||
(
|
||||
"prune",
|
||||
&Router::new()
|
||||
.post(&API_METHOD_PRUNE)
|
||||
),
|
||||
(
|
||||
"snapshots",
|
||||
&Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&list_snapshots),
|
||||
&ObjectSchema::new(
|
||||
"List backup groups.",
|
||||
&[
|
||||
("store", false, &STORE_SCHEMA),
|
||||
("backup-type", true, &BACKUP_TYPE_SCHEMA),
|
||||
("backup-id", true, &BACKUP_ID_SCHEMA),
|
||||
],
|
||||
)
|
||||
)
|
||||
)
|
||||
.subdir(
|
||||
"prune",
|
||||
Router::new()
|
||||
.post(api_method_prune())
|
||||
)
|
||||
.subdir(
|
||||
"status",
|
||||
Router::new()
|
||||
.get(api_method_status())
|
||||
)
|
||||
.list_subdirs();
|
||||
)
|
||||
.delete(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&delete_snapshots),
|
||||
&ObjectSchema::new(
|
||||
"Delete backup snapshot.",
|
||||
&[
|
||||
("store", false, &STORE_SCHEMA),
|
||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||
],
|
||||
)
|
||||
)
|
||||
)
|
||||
),
|
||||
(
|
||||
"status",
|
||||
&Router::new()
|
||||
.get(&API_METHOD_STATUS)
|
||||
),
|
||||
(
|
||||
"upload-backup-log",
|
||||
&Router::new()
|
||||
.upload(&API_METHOD_UPLOAD_BACKUP_LOG)
|
||||
),
|
||||
];
|
||||
|
||||
Router::new()
|
||||
.get(ApiMethod::new(
|
||||
get_datastore_list,
|
||||
ObjectSchema::new("Directory index.")))
|
||||
.match_all("store", datastore_info)
|
||||
}
|
||||
const DATASTORE_INFO_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
|
||||
.subdirs(DATASTORE_INFO_SUBDIRS);
|
||||
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_datastore_list),
|
||||
&ObjectSchema::new("Directory index.", &[])
|
||||
)
|
||||
)
|
||||
.match_all("store", &DATASTORE_INFO_ROUTER);
|
||||
|
@ -1,7 +1,4 @@
|
||||
use failure::*;
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
//use std::sync::Arc;
|
||||
|
||||
use futures::*;
|
||||
use hyper::header::{HeaderValue, UPGRADE};
|
||||
@ -24,28 +21,28 @@ use environment::*;
|
||||
mod upload_chunk;
|
||||
use upload_chunk::*;
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.upgrade(api_method_upgrade_backup())
|
||||
}
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.upgrade(&API_METHOD_UPGRADE_BACKUP);
|
||||
|
||||
pub fn api_method_upgrade_backup() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
upgrade_to_backup_protocol,
|
||||
ObjectSchema::new(concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_PROTOCOL_ID_V1!(), "')."))
|
||||
.required("store", StringSchema::new("Datastore name."))
|
||||
.required("backup-type", BACKUP_TYPE_SCHEMA.clone())
|
||||
.required("backup-id", BACKUP_ID_SCHEMA.clone())
|
||||
.required("backup-time", BACKUP_TIME_SCHEMA.clone())
|
||||
.optional("debug", BooleanSchema::new("Enable verbose debug logging."))
|
||||
pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Async(&upgrade_to_backup_protocol),
|
||||
&ObjectSchema::new(
|
||||
concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_PROTOCOL_ID_V1!(), "')."),
|
||||
&[
|
||||
("store", false, &StringSchema::new("Datastore name.").schema()),
|
||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||
("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
|
||||
],
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn upgrade_to_backup_protocol(
|
||||
parts: Parts,
|
||||
req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
@ -99,7 +96,7 @@ fn upgrade_to_backup_protocol(
|
||||
|
||||
env.log(format!("starting new backup on datastore '{}': {:?}", store, path));
|
||||
|
||||
let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_ROUTER, debug);
|
||||
let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
|
||||
|
||||
let abort_future = worker.abort_future();
|
||||
|
||||
@ -162,67 +159,67 @@ fn upgrade_to_backup_protocol(
|
||||
Ok(Box::new(futures::future::ok(response)))
|
||||
}
|
||||
|
||||
lazy_static!{
|
||||
static ref BACKUP_ROUTER: Router = backup_api();
|
||||
}
|
||||
|
||||
pub fn backup_api() -> Router {
|
||||
Router::new()
|
||||
.subdir(
|
||||
"blob", Router::new()
|
||||
.upload(api_method_upload_blob())
|
||||
)
|
||||
.subdir(
|
||||
"dynamic_chunk", Router::new()
|
||||
.upload(api_method_upload_dynamic_chunk())
|
||||
)
|
||||
.subdir(
|
||||
"dynamic_index", Router::new()
|
||||
.download(api_method_dynamic_chunk_index())
|
||||
.post(api_method_create_dynamic_index())
|
||||
.put(api_method_dynamic_append())
|
||||
)
|
||||
.subdir(
|
||||
"dynamic_close", Router::new()
|
||||
.post(api_method_close_dynamic_index())
|
||||
)
|
||||
.subdir(
|
||||
"fixed_chunk", Router::new()
|
||||
.upload(api_method_upload_fixed_chunk())
|
||||
)
|
||||
.subdir(
|
||||
"fixed_index", Router::new()
|
||||
.download(api_method_fixed_chunk_index())
|
||||
.post(api_method_create_fixed_index())
|
||||
.put(api_method_fixed_append())
|
||||
)
|
||||
.subdir(
|
||||
"fixed_close", Router::new()
|
||||
.post(api_method_close_fixed_index())
|
||||
)
|
||||
.subdir(
|
||||
"finish", Router::new()
|
||||
.post(
|
||||
ApiMethod::new(
|
||||
finish_backup,
|
||||
ObjectSchema::new("Mark backup as finished.")
|
||||
)
|
||||
pub const BACKUP_API_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"blob", &Router::new()
|
||||
.upload(&API_METHOD_UPLOAD_BLOB)
|
||||
),
|
||||
(
|
||||
"dynamic_chunk", &Router::new()
|
||||
.upload(&API_METHOD_UPLOAD_DYNAMIC_CHUNK)
|
||||
),
|
||||
(
|
||||
"dynamic_close", &Router::new()
|
||||
.post(&API_METHOD_CLOSE_DYNAMIC_INDEX)
|
||||
),
|
||||
(
|
||||
"dynamic_index", &Router::new()
|
||||
.download(&API_METHOD_DYNAMIC_CHUNK_INDEX)
|
||||
.post(&API_METHOD_CREATE_DYNAMIC_INDEX)
|
||||
.put(&API_METHOD_DYNAMIC_APPEND)
|
||||
),
|
||||
(
|
||||
"finish", &Router::new()
|
||||
.post(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&finish_backup),
|
||||
&ObjectSchema::new("Mark backup as finished.", &[])
|
||||
)
|
||||
)
|
||||
.subdir(
|
||||
"speedtest", Router::new()
|
||||
.upload(api_method_upload_speedtest())
|
||||
)
|
||||
.list_subdirs()
|
||||
}
|
||||
)
|
||||
),
|
||||
(
|
||||
"fixed_chunk", &Router::new()
|
||||
.upload(&API_METHOD_UPLOAD_FIXED_CHUNK)
|
||||
),
|
||||
(
|
||||
"fixed_close", &Router::new()
|
||||
.post(&API_METHOD_CLOSE_FIXED_INDEX)
|
||||
),
|
||||
(
|
||||
"fixed_index", &Router::new()
|
||||
.download(&API_METHOD_FIXED_CHUNK_INDEX)
|
||||
.post(&API_METHOD_CREATE_FIXED_INDEX)
|
||||
.put(&API_METHOD_FIXED_APPEND)
|
||||
),
|
||||
(
|
||||
"speedtest", &Router::new()
|
||||
.upload(&API_METHOD_UPLOAD_SPEEDTEST)
|
||||
),
|
||||
];
|
||||
|
||||
pub fn api_method_create_dynamic_index() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
create_dynamic_index,
|
||||
ObjectSchema::new("Create dynamic chunk index file.")
|
||||
.required("archive-name", crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA.clone())
|
||||
pub const BACKUP_API_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(BACKUP_API_SUBDIRS))
|
||||
.subdirs(BACKUP_API_SUBDIRS);
|
||||
|
||||
pub const API_METHOD_CREATE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&create_dynamic_index),
|
||||
&ObjectSchema::new(
|
||||
"Create dynamic chunk index file.",
|
||||
&[
|
||||
("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA),
|
||||
],
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn create_dynamic_index(
|
||||
param: Value,
|
||||
@ -250,16 +247,19 @@ fn create_dynamic_index(
|
||||
Ok(json!(wid))
|
||||
}
|
||||
|
||||
pub fn api_method_create_fixed_index() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
create_fixed_index,
|
||||
ObjectSchema::new("Create fixed chunk index file.")
|
||||
.required("archive-name", crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA.clone())
|
||||
.required("size", IntegerSchema::new("File size.")
|
||||
.minimum(1)
|
||||
)
|
||||
pub const API_METHOD_CREATE_FIXED_INDEX: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&create_fixed_index),
|
||||
&ObjectSchema::new(
|
||||
"Create fixed chunk index file.",
|
||||
&[
|
||||
("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA),
|
||||
("size", false, &IntegerSchema::new("File size.")
|
||||
.minimum(1)
|
||||
.schema()
|
||||
),
|
||||
],
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn create_fixed_index(
|
||||
param: Value,
|
||||
@ -292,25 +292,37 @@ fn create_fixed_index(
|
||||
Ok(json!(wid))
|
||||
}
|
||||
|
||||
pub fn api_method_dynamic_append() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
dynamic_append,
|
||||
ObjectSchema::new("Append chunk to dynamic index writer.")
|
||||
.required("wid", IntegerSchema::new("Dynamic writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
)
|
||||
.required("digest-list", ArraySchema::new(
|
||||
"Chunk digest list.", CHUNK_DIGEST_SCHEMA.clone())
|
||||
)
|
||||
.required("offset-list", ArraySchema::new(
|
||||
"Chunk offset list.",
|
||||
IntegerSchema::new("Corresponding chunk offsets.")
|
||||
.minimum(0)
|
||||
.into())
|
||||
)
|
||||
pub const API_METHOD_DYNAMIC_APPEND: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&dynamic_append),
|
||||
&ObjectSchema::new(
|
||||
"Append chunk to dynamic index writer.",
|
||||
&[
|
||||
(
|
||||
"wid",
|
||||
false,
|
||||
&IntegerSchema::new("Dynamic writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"digest-list",
|
||||
false,
|
||||
&ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
|
||||
),
|
||||
(
|
||||
"offset-list",
|
||||
false,
|
||||
&ArraySchema::new(
|
||||
"Chunk offset list.",
|
||||
&IntegerSchema::new("Corresponding chunk offsets.")
|
||||
.minimum(0)
|
||||
.schema()
|
||||
).schema()
|
||||
),
|
||||
],
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn dynamic_append (
|
||||
param: Value,
|
||||
@ -344,25 +356,37 @@ fn dynamic_append (
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn api_method_fixed_append() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
fixed_append,
|
||||
ObjectSchema::new("Append chunk to fixed index writer.")
|
||||
.required("wid", IntegerSchema::new("Fixed writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
)
|
||||
.required("digest-list", ArraySchema::new(
|
||||
"Chunk digest list.", CHUNK_DIGEST_SCHEMA.clone())
|
||||
)
|
||||
.required("offset-list", ArraySchema::new(
|
||||
"Chunk offset list.",
|
||||
IntegerSchema::new("Corresponding chunk offsets.")
|
||||
.minimum(0)
|
||||
.into())
|
||||
pub const API_METHOD_FIXED_APPEND: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&fixed_append),
|
||||
&ObjectSchema::new(
|
||||
"Append chunk to fixed index writer.",
|
||||
&[
|
||||
(
|
||||
"wid",
|
||||
false,
|
||||
&IntegerSchema::new("Fixed writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"digest-list",
|
||||
false,
|
||||
&ArraySchema::new("Chunk digest list.", &CHUNK_DIGEST_SCHEMA).schema()
|
||||
),
|
||||
(
|
||||
"offset-list",
|
||||
false,
|
||||
&ArraySchema::new(
|
||||
"Chunk offset list.",
|
||||
&IntegerSchema::new("Corresponding chunk offsets.")
|
||||
.minimum(0)
|
||||
.schema()
|
||||
).schema()
|
||||
)
|
||||
],
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn fixed_append (
|
||||
param: Value,
|
||||
@ -396,23 +420,37 @@ fn fixed_append (
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn api_method_close_dynamic_index() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
close_dynamic_index,
|
||||
ObjectSchema::new("Close dynamic index writer.")
|
||||
.required("wid", IntegerSchema::new("Dynamic writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
)
|
||||
.required("chunk-count", IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
|
||||
.minimum(1)
|
||||
)
|
||||
.required("size", IntegerSchema::new("File size. This is used to verify that the server got all data.")
|
||||
.minimum(1)
|
||||
)
|
||||
.required("csum", StringSchema::new("Digest list checksum."))
|
||||
pub const API_METHOD_CLOSE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&close_dynamic_index),
|
||||
&ObjectSchema::new(
|
||||
"Close dynamic index writer.",
|
||||
&[
|
||||
(
|
||||
"wid",
|
||||
false,
|
||||
&IntegerSchema::new("Dynamic writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"chunk-count",
|
||||
false,
|
||||
&IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
|
||||
.minimum(1)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"size",
|
||||
false,
|
||||
&IntegerSchema::new("File size. This is used to verify that the server got all data.")
|
||||
.minimum(1)
|
||||
.schema()
|
||||
),
|
||||
("csum", false, &StringSchema::new("Digest list checksum.").schema()),
|
||||
],
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn close_dynamic_index (
|
||||
param: Value,
|
||||
@ -435,23 +473,37 @@ fn close_dynamic_index (
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn api_method_close_fixed_index() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
close_fixed_index,
|
||||
ObjectSchema::new("Close fixed index writer.")
|
||||
.required("wid", IntegerSchema::new("Fixed writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
)
|
||||
.required("chunk-count", IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
|
||||
.minimum(1)
|
||||
)
|
||||
.required("size", IntegerSchema::new("File size. This is used to verify that the server got all data.")
|
||||
.minimum(1)
|
||||
)
|
||||
.required("csum", StringSchema::new("Digest list checksum."))
|
||||
pub const API_METHOD_CLOSE_FIXED_INDEX: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&close_fixed_index),
|
||||
&ObjectSchema::new(
|
||||
"Close fixed index writer.",
|
||||
&[
|
||||
(
|
||||
"wid",
|
||||
false,
|
||||
&IntegerSchema::new("Fixed writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"chunk-count",
|
||||
false,
|
||||
&IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
|
||||
.minimum(1)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"size",
|
||||
false,
|
||||
&IntegerSchema::new("File size. This is used to verify that the server got all data.")
|
||||
.minimum(1)
|
||||
.schema()
|
||||
),
|
||||
("csum", false, &StringSchema::new("Digest list checksum.").schema()),
|
||||
],
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn close_fixed_index (
|
||||
param: Value,
|
||||
@ -488,23 +540,22 @@ fn finish_backup (
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn api_method_dynamic_chunk_index() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
dynamic_chunk_index,
|
||||
ObjectSchema::new(r###"
|
||||
pub const API_METHOD_DYNAMIC_CHUNK_INDEX: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Async(&dynamic_chunk_index),
|
||||
&ObjectSchema::new(
|
||||
r###"
|
||||
Download the dynamic chunk index from the previous backup.
|
||||
Simply returns an empty list if this is the first backup.
|
||||
"###
|
||||
)
|
||||
.required("archive-name", crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA.clone())
|
||||
"### ,
|
||||
&[ ("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA) ],
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn dynamic_chunk_index(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
@ -559,23 +610,22 @@ fn dynamic_chunk_index(
|
||||
Ok(Box::new(future::ok(response)))
|
||||
}
|
||||
|
||||
pub fn api_method_fixed_chunk_index() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
fixed_chunk_index,
|
||||
ObjectSchema::new(r###"
|
||||
pub const API_METHOD_FIXED_CHUNK_INDEX: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Async(&fixed_chunk_index),
|
||||
&ObjectSchema::new(
|
||||
r###"
|
||||
Download the fixed chunk index from the previous backup.
|
||||
Simply returns an empty list if this is the first backup.
|
||||
"###
|
||||
)
|
||||
.required("archive-name", crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA.clone())
|
||||
"### ,
|
||||
&[ ("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA) ],
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn fixed_chunk_index(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
|
@ -81,31 +81,36 @@ impl Future for UploadChunk {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn api_method_upload_fixed_chunk() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
upload_fixed_chunk,
|
||||
ObjectSchema::new("Upload a new chunk.")
|
||||
.required("wid", IntegerSchema::new("Fixed writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
)
|
||||
.required("digest", CHUNK_DIGEST_SCHEMA.clone())
|
||||
.required("size", IntegerSchema::new("Chunk size.")
|
||||
.minimum(1)
|
||||
.maximum(1024*1024*16)
|
||||
)
|
||||
.required("encoded-size", IntegerSchema::new("Encoded chunk size.")
|
||||
.minimum((std::mem::size_of::<DataBlobHeader>() as isize)+1)
|
||||
.maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize))
|
||||
)
|
||||
pub const API_METHOD_UPLOAD_FIXED_CHUNK: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Async(&upload_fixed_chunk),
|
||||
&ObjectSchema::new(
|
||||
"Upload a new chunk.",
|
||||
&[
|
||||
("wid", false, &IntegerSchema::new("Fixed writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
.schema()
|
||||
),
|
||||
("digest", false, &CHUNK_DIGEST_SCHEMA),
|
||||
("size", false, &IntegerSchema::new("Chunk size.")
|
||||
.minimum(1)
|
||||
.maximum(1024*1024*16)
|
||||
.schema()
|
||||
),
|
||||
("encoded-size", false, &IntegerSchema::new("Encoded chunk size.")
|
||||
.minimum((std::mem::size_of::<DataBlobHeader>() as isize)+1)
|
||||
.maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize))
|
||||
.schema()
|
||||
),
|
||||
],
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn upload_fixed_chunk(
|
||||
_parts: Parts,
|
||||
req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
@ -137,31 +142,36 @@ fn upload_fixed_chunk(
|
||||
Ok(Box::new(resp))
|
||||
}
|
||||
|
||||
pub fn api_method_upload_dynamic_chunk() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
upload_dynamic_chunk,
|
||||
ObjectSchema::new("Upload a new chunk.")
|
||||
.required("wid", IntegerSchema::new("Dynamic writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
)
|
||||
.required("digest", CHUNK_DIGEST_SCHEMA.clone())
|
||||
.required("size", IntegerSchema::new("Chunk size.")
|
||||
.minimum(1)
|
||||
.maximum(1024*1024*16)
|
||||
)
|
||||
.required("encoded-size", IntegerSchema::new("Encoded chunk size.")
|
||||
.minimum((std::mem::size_of::<DataBlobHeader>() as isize) +1)
|
||||
.maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize))
|
||||
)
|
||||
pub const API_METHOD_UPLOAD_DYNAMIC_CHUNK: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Async(&upload_dynamic_chunk),
|
||||
&ObjectSchema::new(
|
||||
"Upload a new chunk.",
|
||||
&[
|
||||
("wid", false, &IntegerSchema::new("Dynamic writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
.schema()
|
||||
),
|
||||
("digest", false, &CHUNK_DIGEST_SCHEMA),
|
||||
("size", false, &IntegerSchema::new("Chunk size.")
|
||||
.minimum(1)
|
||||
.maximum(1024*1024*16)
|
||||
.schema()
|
||||
),
|
||||
("encoded-size", false, &IntegerSchema::new("Encoded chunk size.")
|
||||
.minimum((std::mem::size_of::<DataBlobHeader>() as isize) +1)
|
||||
.maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize))
|
||||
.schema()
|
||||
),
|
||||
],
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn upload_dynamic_chunk(
|
||||
_parts: Parts,
|
||||
req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
@ -193,18 +203,16 @@ fn upload_dynamic_chunk(
|
||||
Ok(Box::new(resp))
|
||||
}
|
||||
|
||||
pub fn api_method_upload_speedtest() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
upload_speedtest,
|
||||
ObjectSchema::new("Test uploadf speed.")
|
||||
)
|
||||
}
|
||||
pub const API_METHOD_UPLOAD_SPEEDTEST: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Async(&upload_speedtest),
|
||||
&ObjectSchema::new("Test upload speed.", &[])
|
||||
);
|
||||
|
||||
fn upload_speedtest(
|
||||
_parts: Parts,
|
||||
req_body: Body,
|
||||
_param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
@ -231,23 +239,26 @@ fn upload_speedtest(
|
||||
Ok(Box::new(resp))
|
||||
}
|
||||
|
||||
pub fn api_method_upload_blob() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
upload_blob,
|
||||
ObjectSchema::new("Upload binary blob file.")
|
||||
.required("file-name", crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA.clone())
|
||||
.required("encoded-size", IntegerSchema::new("Encoded blob size.")
|
||||
.minimum((std::mem::size_of::<DataBlobHeader>() as isize) +1)
|
||||
.maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize))
|
||||
pub const API_METHOD_UPLOAD_BLOB: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Async(&upload_blob),
|
||||
&ObjectSchema::new(
|
||||
"Upload binary blob file.",
|
||||
&[
|
||||
("file-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA),
|
||||
("encoded-size", false, &IntegerSchema::new("Encoded blob size.")
|
||||
.minimum((std::mem::size_of::<DataBlobHeader>() as isize) +1)
|
||||
.maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize))
|
||||
.schema()
|
||||
)
|
||||
],
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn upload_blob(
|
||||
_parts: Parts,
|
||||
req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
|
@ -1,13 +1,11 @@
|
||||
//use failure::*;
|
||||
//use std::collections::HashMap;
|
||||
|
||||
//use crate::api_schema;
|
||||
use crate::api_schema::router::*;
|
||||
|
||||
pub mod datastore;
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.subdir("datastore", datastore::router())
|
||||
.list_subdirs()
|
||||
}
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("datastore", &datastore::ROUTER)
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
||||
|
@ -9,11 +9,10 @@ use std::path::PathBuf;
|
||||
|
||||
use crate::config::datastore;
|
||||
|
||||
pub fn get() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
get_datastore_list,
|
||||
ObjectSchema::new("Directory index."))
|
||||
}
|
||||
pub const GET: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_datastore_list),
|
||||
&ObjectSchema::new("Directory index.", &[])
|
||||
);
|
||||
|
||||
fn get_datastore_list(
|
||||
_param: Value,
|
||||
@ -26,14 +25,16 @@ fn get_datastore_list(
|
||||
Ok(config.convert_to_array("name"))
|
||||
}
|
||||
|
||||
pub fn post() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
create_datastore,
|
||||
ObjectSchema::new("Create new datastore.")
|
||||
.required("name", StringSchema::new("Datastore name."))
|
||||
.required("path", StringSchema::new("Directory path (must exist)."))
|
||||
)
|
||||
}
|
||||
pub const POST: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&create_datastore),
|
||||
&ObjectSchema::new(
|
||||
"Create new datastore.",
|
||||
&[
|
||||
("name", false, &StringSchema::new("Datastore name.").schema()),
|
||||
("path", false, &StringSchema::new("Directory path (must exist).").schema()),
|
||||
],
|
||||
)
|
||||
);
|
||||
|
||||
fn create_datastore(
|
||||
param: Value,
|
||||
@ -65,12 +66,15 @@ fn create_datastore(
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn delete() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
delete_datastore,
|
||||
ObjectSchema::new("Remove a datastore configuration.")
|
||||
.required("name", StringSchema::new("Datastore name.")))
|
||||
}
|
||||
pub const DELETE: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&delete_datastore),
|
||||
&ObjectSchema::new(
|
||||
"Remove a datastore configuration.",
|
||||
&[
|
||||
("name", false, &StringSchema::new("Datastore name.").schema()),
|
||||
],
|
||||
)
|
||||
);
|
||||
|
||||
fn delete_datastore(
|
||||
param: Value,
|
||||
@ -96,9 +100,7 @@ fn delete_datastore(
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.get(get())
|
||||
.post(post())
|
||||
.delete(delete())
|
||||
}
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&GET)
|
||||
.post(&POST)
|
||||
.delete(&DELETE);
|
||||
|
@ -7,13 +7,15 @@ mod dns;
|
||||
mod syslog;
|
||||
mod services;
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.subdir("dns", dns::router())
|
||||
.subdir("network", network::router())
|
||||
.subdir("services", services::router())
|
||||
.subdir("syslog", syslog::router())
|
||||
.subdir("tasks", tasks::router())
|
||||
.subdir("time", time::router())
|
||||
.list_subdirs()
|
||||
}
|
||||
pub const SUBDIRS: SubdirMap = &[
|
||||
("dns", &dns::ROUTER),
|
||||
("network", &network::ROUTER),
|
||||
("services", &services::ROUTER),
|
||||
("syslog", &syslog::ROUTER),
|
||||
("tasks", &tasks::ROUTER),
|
||||
("time", &time::ROUTER),
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
||||
|
@ -107,32 +107,40 @@ fn get_dns(
|
||||
read_etc_resolv_conf()
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.get(
|
||||
ApiMethod::new(
|
||||
get_dns,
|
||||
ObjectSchema::new("Read DNS settings.")
|
||||
.required("node", NODE_SCHEMA.clone())
|
||||
).returns(
|
||||
ObjectSchema::new("Returns DNS server IPs and sreach domain.")
|
||||
.required("digest", PVE_CONFIG_DIGEST_SCHEMA.clone())
|
||||
.optional("search", SEARCH_DOMAIN_SCHEMA.clone())
|
||||
.optional("dns1", FIRST_DNS_SERVER_SCHEMA.clone())
|
||||
.optional("dns2", SECOND_DNS_SERVER_SCHEMA.clone())
|
||||
.optional("dns3", THIRD_DNS_SERVER_SCHEMA.clone())
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_dns),
|
||||
&ObjectSchema::new(
|
||||
"Read DNS settings.",
|
||||
&[ ("node", false, &NODE_SCHEMA) ],
|
||||
)
|
||||
).returns(
|
||||
&ObjectSchema::new(
|
||||
"Returns DNS server IPs and sreach domain.",
|
||||
&[
|
||||
("digest", false, &PVE_CONFIG_DIGEST_SCHEMA),
|
||||
("search", true, &SEARCH_DOMAIN_SCHEMA),
|
||||
("dns1", true, &FIRST_DNS_SERVER_SCHEMA),
|
||||
("dns2", true, &SECOND_DNS_SERVER_SCHEMA),
|
||||
("dns3", true, &THIRD_DNS_SERVER_SCHEMA),
|
||||
],
|
||||
).schema()
|
||||
)
|
||||
.put(
|
||||
ApiMethod::new(
|
||||
update_dns,
|
||||
ObjectSchema::new("Returns DNS server IPs and sreach domain.")
|
||||
.required("node", NODE_SCHEMA.clone())
|
||||
.required("search", SEARCH_DOMAIN_SCHEMA.clone())
|
||||
.optional("dns1", FIRST_DNS_SERVER_SCHEMA.clone())
|
||||
.optional("dns2", SECOND_DNS_SERVER_SCHEMA.clone())
|
||||
.optional("dns3", THIRD_DNS_SERVER_SCHEMA.clone())
|
||||
.optional("digest", PVE_CONFIG_DIGEST_SCHEMA.clone())
|
||||
).protected(true)
|
||||
)
|
||||
}
|
||||
)
|
||||
.put(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&update_dns),
|
||||
&ObjectSchema::new(
|
||||
"Returns DNS server IPs and sreach domain.",
|
||||
&[
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("search", false, &SEARCH_DOMAIN_SCHEMA),
|
||||
("dns1", true, &FIRST_DNS_SERVER_SCHEMA),
|
||||
("dns2", true, &SECOND_DNS_SERVER_SCHEMA),
|
||||
("dns3", true, &THIRD_DNS_SERVER_SCHEMA),
|
||||
("digest", true, &PVE_CONFIG_DIGEST_SCHEMA),
|
||||
],
|
||||
)
|
||||
).protected(true)
|
||||
);
|
||||
|
@ -16,11 +16,14 @@ fn get_network_config(
|
||||
Ok(json!({}))
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.get(ApiMethod::new(
|
||||
get_network_config,
|
||||
ObjectSchema::new("Read network configuration.")
|
||||
.required("node", NODE_SCHEMA.clone())
|
||||
))
|
||||
}
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_network_config),
|
||||
&ObjectSchema::new(
|
||||
"Read network configuration.",
|
||||
&[ ("node", false, &NODE_SCHEMA) ],
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
|
@ -5,7 +5,6 @@ use crate::api_schema::*;
|
||||
use crate::api_schema::router::*;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::process::{Command, Stdio};
|
||||
|
||||
use crate::api2::types::*;
|
||||
@ -214,92 +213,115 @@ fn reload_service(
|
||||
run_service_command(service, "reload")
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
|
||||
let service_id_schema : Arc<Schema> = Arc::new(
|
||||
StringSchema::new("Service ID.")
|
||||
.max_length(256)
|
||||
.into()
|
||||
);
|
||||
const SERVICE_ID_SCHEMA: Schema = StringSchema::new("Service ID.")
|
||||
.max_length(256)
|
||||
.schema();
|
||||
|
||||
let service_api = Router::new()
|
||||
.subdir(
|
||||
"state",
|
||||
Router::new()
|
||||
.get(ApiMethod::new(
|
||||
get_service_state,
|
||||
ObjectSchema::new("Read service properties.")
|
||||
.required("node", NODE_SCHEMA.clone())
|
||||
.required("service", service_id_schema.clone()))
|
||||
)
|
||||
)
|
||||
.subdir(
|
||||
"start",
|
||||
Router::new()
|
||||
.post(
|
||||
ApiMethod::new(
|
||||
start_service,
|
||||
ObjectSchema::new("Start service.")
|
||||
.required("node", NODE_SCHEMA.clone())
|
||||
.required("service", service_id_schema.clone())
|
||||
).protected(true)
|
||||
)
|
||||
)
|
||||
.subdir(
|
||||
"stop",
|
||||
Router::new()
|
||||
.post(
|
||||
ApiMethod::new(
|
||||
stop_service,
|
||||
ObjectSchema::new("Stop service.")
|
||||
.required("node", NODE_SCHEMA.clone())
|
||||
.required("service", service_id_schema.clone())
|
||||
).protected(true)
|
||||
)
|
||||
)
|
||||
.subdir(
|
||||
"restart",
|
||||
Router::new()
|
||||
.post(
|
||||
ApiMethod::new(
|
||||
restart_service,
|
||||
ObjectSchema::new("Restart service.")
|
||||
.required("node", NODE_SCHEMA.clone())
|
||||
.required("service", service_id_schema.clone())
|
||||
).protected(true)
|
||||
)
|
||||
)
|
||||
.subdir(
|
||||
"reload",
|
||||
Router::new()
|
||||
.post(
|
||||
ApiMethod::new(
|
||||
reload_service,
|
||||
ObjectSchema::new("Reload service.")
|
||||
.required("node", NODE_SCHEMA.clone())
|
||||
.required("service", service_id_schema.clone())
|
||||
).protected(true)
|
||||
)
|
||||
)
|
||||
.list_subdirs();
|
||||
|
||||
Router::new()
|
||||
.get(
|
||||
ApiMethod::new(
|
||||
list_services,
|
||||
ObjectSchema::new("Service list.")
|
||||
.required("node", NODE_SCHEMA.clone())
|
||||
).returns(
|
||||
ArraySchema::new(
|
||||
"Returns a list of systemd services.",
|
||||
ObjectSchema::new("Service details.")
|
||||
.required("service", service_id_schema.clone())
|
||||
.required("name", StringSchema::new("systemd service name."))
|
||||
.required("desc", StringSchema::new("systemd service description."))
|
||||
.required("state", StringSchema::new("systemd service 'SubState'."))
|
||||
.into()
|
||||
const SERVICE_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"reload", &Router::new()
|
||||
.post(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&reload_service),
|
||||
&ObjectSchema::new(
|
||||
"Reload service.",
|
||||
&[
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
],
|
||||
)
|
||||
).protected(true)
|
||||
)
|
||||
),
|
||||
(
|
||||
"restart", &Router::new()
|
||||
.post(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&restart_service),
|
||||
&ObjectSchema::new(
|
||||
"Restart service.",
|
||||
&[
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
],
|
||||
)
|
||||
).protected(true)
|
||||
)
|
||||
),
|
||||
(
|
||||
"start", &Router::new()
|
||||
.post(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&start_service),
|
||||
&ObjectSchema::new(
|
||||
"Start service.",
|
||||
&[
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
],
|
||||
)
|
||||
).protected(true)
|
||||
)
|
||||
),
|
||||
(
|
||||
"state", &Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_service_state),
|
||||
&ObjectSchema::new(
|
||||
"Read service properties.",
|
||||
&[
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
],
|
||||
)
|
||||
)
|
||||
)
|
||||
),
|
||||
(
|
||||
"stop", &Router::new()
|
||||
.post(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&stop_service),
|
||||
&ObjectSchema::new(
|
||||
"Stop service.",
|
||||
&[
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
],
|
||||
)
|
||||
).protected(true)
|
||||
)
|
||||
),
|
||||
];
|
||||
|
||||
const SERVICE_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SERVICE_SUBDIRS))
|
||||
.subdirs(SERVICE_SUBDIRS);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&list_services),
|
||||
&ObjectSchema::new(
|
||||
"Service list.",
|
||||
&[ ("node", false, &NODE_SCHEMA) ],
|
||||
)
|
||||
).returns(
|
||||
&ArraySchema::new(
|
||||
"Returns a list of systemd services.",
|
||||
&ObjectSchema::new(
|
||||
"Service details.",
|
||||
&[
|
||||
("service", false, &SERVICE_ID_SCHEMA),
|
||||
("name", false, &StringSchema::new("systemd service name.").schema()),
|
||||
("desc", false, &StringSchema::new("systemd service description.").schema()),
|
||||
("state", false, &StringSchema::new("systemd service 'SubState'.").schema()),
|
||||
],
|
||||
).schema()
|
||||
).schema()
|
||||
)
|
||||
.match_all("service", service_api)
|
||||
}
|
||||
)
|
||||
.match_all("service", &SERVICE_ROUTER);
|
||||
|
||||
|
@ -5,10 +5,6 @@ use crate::api_schema::router::*;
|
||||
use crate::api2::types::*;
|
||||
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use std::sync::Arc;
|
||||
use lazy_static::lazy_static;
|
||||
use proxmox::tools::common_regex;
|
||||
use std::process::{Command, Stdio};
|
||||
|
||||
fn dump_journal(
|
||||
@ -91,47 +87,44 @@ fn get_syslog(
|
||||
Ok(json!(lines))
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
pub static ref SYSTEMD_DATETIME_FORMAT: Arc<ApiStringFormat> =
|
||||
ApiStringFormat::Pattern(&common_regex::SYSTEMD_DATETIME_REGEX).into();
|
||||
}
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_syslog),
|
||||
&ObjectSchema::new(
|
||||
"Read server time and time zone settings.",
|
||||
&[
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("start", true, &IntegerSchema::new("Start line number.")
|
||||
.minimum(0)
|
||||
.schema()
|
||||
),
|
||||
("limit", true, &IntegerSchema::new("Max. number of lines.")
|
||||
.minimum(0)
|
||||
.schema()
|
||||
),
|
||||
("since", true, &StringSchema::new("Display all log since this date-time string.")
|
||||
.format(&SYSTEMD_DATETIME_FORMAT)
|
||||
.schema()
|
||||
),
|
||||
("until", true, &StringSchema::new("Display all log until this date-time string.")
|
||||
.format(&SYSTEMD_DATETIME_FORMAT)
|
||||
.schema()
|
||||
),
|
||||
("service", true, &StringSchema::new("Service ID.")
|
||||
.max_length(128)
|
||||
.schema()
|
||||
),
|
||||
],
|
||||
)
|
||||
).returns(
|
||||
&ObjectSchema::new(
|
||||
"Returns a list of syslog entries.",
|
||||
&[
|
||||
("n", false, &IntegerSchema::new("Line number.").schema()),
|
||||
("t", false, &StringSchema::new("Line text.").schema()),
|
||||
],
|
||||
).schema()
|
||||
).protected(true)
|
||||
);
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.get(
|
||||
ApiMethod::new(
|
||||
get_syslog,
|
||||
ObjectSchema::new("Read server time and time zone settings.")
|
||||
.required("node", NODE_SCHEMA.clone())
|
||||
.optional(
|
||||
"start",
|
||||
IntegerSchema::new("Start line number.")
|
||||
.minimum(0)
|
||||
)
|
||||
.optional(
|
||||
"limit",
|
||||
IntegerSchema::new("Max. number of lines.")
|
||||
.minimum(0)
|
||||
)
|
||||
.optional(
|
||||
"since",
|
||||
StringSchema::new("Display all log since this date-time string.")
|
||||
.format(SYSTEMD_DATETIME_FORMAT.clone())
|
||||
)
|
||||
.optional(
|
||||
"until",
|
||||
StringSchema::new("Display all log until this date-time string.")
|
||||
.format(SYSTEMD_DATETIME_FORMAT.clone())
|
||||
)
|
||||
.optional(
|
||||
"service",
|
||||
StringSchema::new("Service ID.")
|
||||
.max_length(128)
|
||||
)
|
||||
).returns(
|
||||
ObjectSchema::new("Returns a list of syslog entries.")
|
||||
.required("n", IntegerSchema::new("Line number."))
|
||||
.required("t", StringSchema::new("Line text."))
|
||||
).protected(true)
|
||||
)
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ use crate::tools;
|
||||
use crate::api_schema::*;
|
||||
use crate::api_schema::router::*;
|
||||
use serde_json::{json, Value};
|
||||
use std::sync::Arc;
|
||||
use std::fs::File;
|
||||
use std::io::{BufRead,BufReader};
|
||||
|
||||
@ -166,84 +165,91 @@ fn list_tasks(
|
||||
Ok(json!(result))
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
const UPID_SCHEMA: Schema = StringSchema::new("Unique Process/Task ID.")
|
||||
.max_length(256)
|
||||
.schema();
|
||||
|
||||
let upid_schema: Arc<Schema> = Arc::new(
|
||||
StringSchema::new("Unique Process/Task ID.")
|
||||
.max_length(256)
|
||||
.into()
|
||||
);
|
||||
|
||||
let upid_api = Router::new()
|
||||
.delete(ApiMethod::new(
|
||||
stop_task,
|
||||
ObjectSchema::new("Try to stop a task.")
|
||||
.required("node", NODE_SCHEMA.clone())
|
||||
.required("upid", upid_schema.clone())).protected(true)
|
||||
|
||||
)
|
||||
.subdir(
|
||||
"log", Router::new()
|
||||
.get(
|
||||
ApiMethod::new(
|
||||
read_task_log,
|
||||
ObjectSchema::new("Read task log.")
|
||||
.required("node", NODE_SCHEMA.clone())
|
||||
.required("upid", upid_schema.clone())
|
||||
.optional(
|
||||
"start",
|
||||
IntegerSchema::new("Start at this line.")
|
||||
.minimum(0)
|
||||
.default(0)
|
||||
)
|
||||
.optional(
|
||||
"limit",
|
||||
IntegerSchema::new("Only list this amount of lines.")
|
||||
.minimum(0)
|
||||
.default(50)
|
||||
)
|
||||
const UPID_API_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"log", &Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&read_task_log),
|
||||
&ObjectSchema::new(
|
||||
"Read task log.",
|
||||
&[
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("upid", false, &UPID_SCHEMA),
|
||||
("start", true, &IntegerSchema::new("Start at this line.")
|
||||
.minimum(0)
|
||||
.default(0)
|
||||
.schema()
|
||||
),
|
||||
("limit", true, &IntegerSchema::new("Only list this amount of lines.")
|
||||
.minimum(0)
|
||||
.default(50)
|
||||
.schema()
|
||||
),
|
||||
],
|
||||
)
|
||||
)
|
||||
)
|
||||
.subdir(
|
||||
"status", Router::new()
|
||||
.get(
|
||||
ApiMethod::new(
|
||||
get_task_status,
|
||||
ObjectSchema::new("Get task status.")
|
||||
.required("node", NODE_SCHEMA.clone())
|
||||
.required("upid", upid_schema.clone()))
|
||||
)
|
||||
),
|
||||
(
|
||||
"status", &Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_task_status),
|
||||
&ObjectSchema::new(
|
||||
"Get task status.",
|
||||
&[
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("upid", false, &UPID_SCHEMA),
|
||||
],
|
||||
)
|
||||
)
|
||||
)
|
||||
.list_subdirs();
|
||||
)
|
||||
)
|
||||
];
|
||||
|
||||
pub const UPID_API_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(UPID_API_SUBDIRS))
|
||||
.delete(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&stop_task),
|
||||
&ObjectSchema::new(
|
||||
"Try to stop a task.",
|
||||
&[
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("upid", false, &UPID_SCHEMA),
|
||||
],
|
||||
)
|
||||
).protected(true)
|
||||
)
|
||||
.subdirs(&UPID_API_SUBDIRS);
|
||||
|
||||
Router::new()
|
||||
.get(ApiMethod::new(
|
||||
list_tasks,
|
||||
ObjectSchema::new("List tasks.")
|
||||
.required("node", NODE_SCHEMA.clone())
|
||||
.optional(
|
||||
"start",
|
||||
IntegerSchema::new("List tasks beginning from this offset.")
|
||||
.minimum(0)
|
||||
.default(0)
|
||||
)
|
||||
.optional(
|
||||
"limit",
|
||||
IntegerSchema::new("Only list this amount of tasks.")
|
||||
.minimum(0)
|
||||
.default(50)
|
||||
)
|
||||
.optional(
|
||||
"errors",
|
||||
BooleanSchema::new("Only list erroneous tasks.")
|
||||
)
|
||||
.optional(
|
||||
"userfilter",
|
||||
StringSchema::new("Only list tasks from this user.")
|
||||
)
|
||||
)
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&list_tasks),
|
||||
&ObjectSchema::new(
|
||||
"List tasks.",
|
||||
&[
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("start", true, &IntegerSchema::new("List tasks beginning from this offset.")
|
||||
.minimum(0)
|
||||
.default(0)
|
||||
.schema()
|
||||
),
|
||||
("limit", true, &IntegerSchema::new("Only list this amount of tasks.")
|
||||
.minimum(0)
|
||||
.default(50)
|
||||
.schema()
|
||||
),
|
||||
("errors", true, &BooleanSchema::new("Only list erroneous tasks.").schema()),
|
||||
("userfilter", true, &StringSchema::new("Only list tasks from this user.").schema()),
|
||||
],
|
||||
)
|
||||
)
|
||||
.match_all("upid", upid_api)
|
||||
}
|
||||
)
|
||||
.match_all("upid", &UPID_API_ROUTER);
|
||||
|
@ -80,29 +80,44 @@ fn set_timezone(
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.get(
|
||||
ApiMethod::new(
|
||||
get_time,
|
||||
ObjectSchema::new("Read server time and time zone settings.")
|
||||
.required("node", NODE_SCHEMA.clone())
|
||||
).returns(
|
||||
ObjectSchema::new("Returns server time and timezone.")
|
||||
.required("timezone", StringSchema::new("Time zone"))
|
||||
.required("time", IntegerSchema::new("Seconds since 1970-01-01 00:00:00 UTC.")
|
||||
.minimum(1_297_163_644))
|
||||
.required("localtime", IntegerSchema::new("Seconds since 1970-01-01 00:00:00 UTC. (local time)")
|
||||
.minimum(1_297_163_644))
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_time),
|
||||
&ObjectSchema::new(
|
||||
"Read server time and time zone settings.",
|
||||
&[ ("node", false, &NODE_SCHEMA) ],
|
||||
)
|
||||
).returns(
|
||||
&ObjectSchema::new(
|
||||
"Returns server time and timezone.",
|
||||
&[
|
||||
("timezone", false, &StringSchema::new("Time zone").schema()),
|
||||
("time", false, &IntegerSchema::new("Seconds since 1970-01-01 00:00:00 UTC.")
|
||||
.minimum(1_297_163_644)
|
||||
.schema()
|
||||
),
|
||||
("localtime", false, &IntegerSchema::new("Seconds since 1970-01-01 00:00:00 UTC. (local time)")
|
||||
.minimum(1_297_163_644)
|
||||
.schema()
|
||||
),
|
||||
],
|
||||
).schema()
|
||||
)
|
||||
.put(
|
||||
ApiMethod::new(
|
||||
set_timezone,
|
||||
ObjectSchema::new("Set time zone.")
|
||||
.required("node", NODE_SCHEMA.clone())
|
||||
.required("timezone", StringSchema::new(
|
||||
"Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names."))
|
||||
).protected(true).reload_timezone(true)
|
||||
)
|
||||
}
|
||||
)
|
||||
.put(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&set_timezone),
|
||||
&ObjectSchema::new(
|
||||
"Set time zone.",
|
||||
&[
|
||||
("node", false, &NODE_SCHEMA),
|
||||
("timezone", false, &StringSchema::new(
|
||||
"Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.")
|
||||
.schema()
|
||||
),
|
||||
],
|
||||
)
|
||||
).protected(true).reload_timezone(true)
|
||||
);
|
||||
|
||||
|
@ -1,7 +1,4 @@
|
||||
use failure::*;
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::*;
|
||||
use hyper::header::{self, HeaderValue, UPGRADE};
|
||||
@ -21,30 +18,34 @@ use crate::api2::types::*;
|
||||
mod environment;
|
||||
use environment::*;
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.upgrade(api_method_upgrade_backup())
|
||||
}
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.upgrade(&API_METHOD_UPGRADE_BACKUP);
|
||||
|
||||
pub fn api_method_upgrade_backup() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
upgrade_to_backup_reader_protocol,
|
||||
ObjectSchema::new(concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!(), "')."))
|
||||
.required("store", StringSchema::new("Datastore name."))
|
||||
.required("backup-type", StringSchema::new("Backup type.")
|
||||
.format(Arc::new(ApiStringFormat::Enum(&["vm", "ct", "host"]))))
|
||||
.required("backup-id", StringSchema::new("Backup ID."))
|
||||
.required("backup-time", IntegerSchema::new("Backup time (Unix epoch.)")
|
||||
.minimum(1_547_797_308))
|
||||
.optional("debug", BooleanSchema::new("Enable verbose debug logging."))
|
||||
pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Async(&upgrade_to_backup_reader_protocol),
|
||||
&ObjectSchema::new(
|
||||
concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!(), "')."),
|
||||
&[
|
||||
("store", false, &StringSchema::new("Datastore name.").schema()),
|
||||
("backup-type", false, &StringSchema::new("Backup type.")
|
||||
.format(&ApiStringFormat::Enum(&["vm", "ct", "host"]))
|
||||
.schema()
|
||||
),
|
||||
("backup-id", false, &StringSchema::new("Backup ID.").schema()),
|
||||
("backup-time", false, &IntegerSchema::new("Backup time (Unix epoch.)")
|
||||
.minimum(1_547_797_308)
|
||||
.schema()
|
||||
),
|
||||
("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
|
||||
],
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn upgrade_to_backup_reader_protocol(
|
||||
parts: Parts,
|
||||
req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
@ -89,7 +90,7 @@ fn upgrade_to_backup_reader_protocol(
|
||||
|
||||
env.log(format!("starting new backup reader datastore '{}': {:?}", store, path));
|
||||
|
||||
let service = H2Service::new(env.clone(), worker.clone(), &READER_ROUTER, debug);
|
||||
let service = H2Service::new(env.clone(), worker.clone(), &READER_API_ROUTER, debug);
|
||||
|
||||
let abort_future = worker.abort_future();
|
||||
|
||||
@ -134,39 +135,35 @@ fn upgrade_to_backup_reader_protocol(
|
||||
Ok(Box::new(futures::future::ok(response)))
|
||||
}
|
||||
|
||||
lazy_static!{
|
||||
static ref READER_ROUTER: Router = reader_api();
|
||||
}
|
||||
pub const READER_API_ROUTER: Router = Router::new()
|
||||
.subdirs(&[
|
||||
(
|
||||
"chunk", &Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_CHUNK)
|
||||
),
|
||||
(
|
||||
"download", &Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_FILE)
|
||||
),
|
||||
(
|
||||
"speedtest", &Router::new()
|
||||
.download(&API_METHOD_SPEEDTEST)
|
||||
),
|
||||
]);
|
||||
|
||||
pub fn reader_api() -> Router {
|
||||
Router::new()
|
||||
.subdir(
|
||||
"chunk", Router::new()
|
||||
.download(api_method_download_chunk())
|
||||
)
|
||||
.subdir(
|
||||
"download", Router::new()
|
||||
.download(api_method_download_file())
|
||||
)
|
||||
.subdir(
|
||||
"speedtest", Router::new()
|
||||
.download(api_method_speedtest())
|
||||
)
|
||||
}
|
||||
|
||||
pub fn api_method_download_file() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
download_file,
|
||||
ObjectSchema::new("Download specified file.")
|
||||
.required("file-name", crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA.clone())
|
||||
pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Async(&download_file),
|
||||
&ObjectSchema::new(
|
||||
"Download specified file.",
|
||||
&[ ("file-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA) ],
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn download_file(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
@ -202,19 +199,19 @@ fn download_file(
|
||||
Ok(Box::new(response_future))
|
||||
}
|
||||
|
||||
pub fn api_method_download_chunk() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
download_chunk,
|
||||
ObjectSchema::new("Download specified chunk.")
|
||||
.required("digest", CHUNK_DIGEST_SCHEMA.clone())
|
||||
pub const API_METHOD_DOWNLOAD_CHUNK: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Async(&download_chunk),
|
||||
&ObjectSchema::new(
|
||||
"Download specified chunk.",
|
||||
&[ ("digest", false, &CHUNK_DIGEST_SCHEMA) ],
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
fn download_chunk(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
@ -250,7 +247,7 @@ fn download_chunk_old(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
@ -286,18 +283,16 @@ fn download_chunk_old(
|
||||
}
|
||||
*/
|
||||
|
||||
pub fn api_method_speedtest() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
speedtest,
|
||||
ObjectSchema::new("Test 4M block download speed.")
|
||||
)
|
||||
}
|
||||
pub const API_METHOD_SPEEDTEST: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Async(&speedtest),
|
||||
&ObjectSchema::new("Test 4M block download speed.", &[])
|
||||
);
|
||||
|
||||
fn speedtest(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
_param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
|
@ -21,9 +21,10 @@ fn get_subscription(
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.get(ApiMethod::new(
|
||||
get_subscription,
|
||||
ObjectSchema::new("Read subscription info.")))
|
||||
}
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_subscription),
|
||||
&ObjectSchema::new("Read subscription info.", &[])
|
||||
)
|
||||
);
|
||||
|
@ -1,85 +1,97 @@
|
||||
use failure::*;
|
||||
use lazy_static::lazy_static;
|
||||
use std::sync::Arc;
|
||||
//use lazy_static::lazy_static;
|
||||
//use std::sync::Arc;
|
||||
|
||||
use crate::api_schema::*;
|
||||
use proxmox::tools::common_regex;
|
||||
use proxmox::tools::*; // required to use IPRE!() macro ???
|
||||
|
||||
lazy_static!{
|
||||
// File names: may not contain slashes, may not start with "."
|
||||
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
||||
if name.starts_with('.') {
|
||||
bail!("file names may not start with '.'");
|
||||
}
|
||||
if name.contains('/') {
|
||||
bail!("file names may not contain slashes");
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
|
||||
// File names: may not contain slashes, may not start with "."
|
||||
pub static ref FILENAME_FORMAT: Arc<ApiStringFormat> = Arc::new(ApiStringFormat::VerifyFn(|name| {
|
||||
if name.starts_with('.') {
|
||||
bail!("file names may not start with '.'");
|
||||
}
|
||||
if name.contains('/') {
|
||||
bail!("file names may not contain slashes");
|
||||
}
|
||||
Ok(())
|
||||
})).into();
|
||||
|
||||
pub static ref IP_FORMAT: Arc<ApiStringFormat> = ApiStringFormat::Pattern(&common_regex::IP_REGEX).into();
|
||||
|
||||
pub static ref PVE_CONFIG_DIGEST_FORMAT: Arc<ApiStringFormat> =
|
||||
ApiStringFormat::Pattern(&common_regex::SHA256_HEX_REGEX).into();
|
||||
|
||||
pub static ref PVE_CONFIG_DIGEST_SCHEMA: Arc<Schema> =
|
||||
StringSchema::new("Prevent changes if current configuration file has different SHA256 digest. This can be used to prevent concurrent modifications.")
|
||||
.format(PVE_CONFIG_DIGEST_FORMAT.clone()).into();
|
||||
|
||||
pub static ref CHUNK_DIGEST_FORMAT: Arc<ApiStringFormat> =
|
||||
ApiStringFormat::Pattern(&common_regex::SHA256_HEX_REGEX).into();
|
||||
|
||||
pub static ref CHUNK_DIGEST_SCHEMA: Arc<Schema> =
|
||||
StringSchema::new("Chunk digest (SHA256).")
|
||||
.format(CHUNK_DIGEST_FORMAT.clone()).into();
|
||||
|
||||
pub static ref NODE_SCHEMA: Arc<Schema> = Arc::new(
|
||||
StringSchema::new("Node name (or 'localhost')")
|
||||
.format(
|
||||
Arc::new(ApiStringFormat::VerifyFn(|node| {
|
||||
if node == "localhost" || node == proxmox::tools::nodename() {
|
||||
Ok(())
|
||||
} else {
|
||||
bail!("no such node '{}'", node);
|
||||
}
|
||||
}))
|
||||
)
|
||||
.into()
|
||||
);
|
||||
|
||||
pub static ref SEARCH_DOMAIN_SCHEMA: Arc<Schema> =
|
||||
StringSchema::new("Search domain for host-name lookup.").into();
|
||||
|
||||
pub static ref FIRST_DNS_SERVER_SCHEMA: Arc<Schema> =
|
||||
StringSchema::new("First name server IP address.")
|
||||
.format(IP_FORMAT.clone()).into();
|
||||
|
||||
pub static ref SECOND_DNS_SERVER_SCHEMA: Arc<Schema> =
|
||||
StringSchema::new("Second name server IP address.")
|
||||
.format(IP_FORMAT.clone()).into();
|
||||
|
||||
pub static ref THIRD_DNS_SERVER_SCHEMA: Arc<Schema> =
|
||||
StringSchema::new("Third name server IP address.")
|
||||
.format(IP_FORMAT.clone()).into();
|
||||
|
||||
pub static ref BACKUP_ARCHIVE_NAME_SCHEMA: Arc<Schema> =
|
||||
StringSchema::new("Backup archive name.")
|
||||
.format(FILENAME_FORMAT.clone()).into();
|
||||
|
||||
pub static ref BACKUP_TYPE_SCHEMA: Arc<Schema> =
|
||||
StringSchema::new("Backup type.")
|
||||
.format(Arc::new(ApiStringFormat::Enum(&["vm", "ct", "host"])))
|
||||
.into();
|
||||
|
||||
pub static ref BACKUP_ID_SCHEMA: Arc<Schema> =
|
||||
StringSchema::new("Backup ID.")
|
||||
.format(FILENAME_FORMAT.clone())
|
||||
.into();
|
||||
|
||||
pub static ref BACKUP_TIME_SCHEMA: Arc<Schema> =
|
||||
IntegerSchema::new("Backup time (Unix epoch.)")
|
||||
.minimum(1_547_797_308)
|
||||
.into();
|
||||
|
||||
const_regex!{
|
||||
pub IP_FORMAT_REGEX = IPRE!();
|
||||
pub SHA256_HEX_REGEX = r"^[a-f0-9]{64}$"; // fixme: define in common_regex ?
|
||||
pub SYSTEMD_DATETIME_REGEX = r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$"; // fixme: define in common_regex ?
|
||||
}
|
||||
|
||||
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX);
|
||||
|
||||
pub const IP_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&IP_FORMAT_REGEX);
|
||||
|
||||
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
|
||||
|
||||
pub const PVE_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(r#"\
|
||||
Prevent changes if current configuration file has different SHA256 digest.
|
||||
This can be used to prevent concurrent modifications.
|
||||
"#
|
||||
)
|
||||
.format(&PVE_CONFIG_DIGEST_FORMAT)
|
||||
.schema();
|
||||
|
||||
|
||||
pub const CHUNK_DIGEST_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
|
||||
|
||||
pub const CHUNK_DIGEST_SCHEMA: Schema = StringSchema::new("Chunk digest (SHA256).")
|
||||
.format(&CHUNK_DIGEST_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')")
|
||||
.format(&ApiStringFormat::VerifyFn(|node| {
|
||||
if node == "localhost" || node == proxmox::tools::nodename() {
|
||||
Ok(())
|
||||
} else {
|
||||
bail!("no such node '{}'", node);
|
||||
}
|
||||
}))
|
||||
.schema();
|
||||
|
||||
pub const SEARCH_DOMAIN_SCHEMA: Schema =
|
||||
StringSchema::new("Search domain for host-name lookup.").schema();
|
||||
|
||||
pub const FIRST_DNS_SERVER_SCHEMA: Schema =
|
||||
StringSchema::new("First name server IP address.")
|
||||
.format(&IP_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const SECOND_DNS_SERVER_SCHEMA: Schema =
|
||||
StringSchema::new("Second name server IP address.")
|
||||
.format(&IP_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const THIRD_DNS_SERVER_SCHEMA: Schema =
|
||||
StringSchema::new("Third name server IP address.")
|
||||
.format(&IP_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema =
|
||||
StringSchema::new("Backup archive name.")
|
||||
.format(&FILENAME_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const BACKUP_TYPE_SCHEMA: Schema =
|
||||
StringSchema::new("Backup type.")
|
||||
.format(&ApiStringFormat::Enum(&["vm", "ct", "host"]))
|
||||
.schema();
|
||||
|
||||
pub const BACKUP_ID_SCHEMA: Schema =
|
||||
StringSchema::new("Backup ID.")
|
||||
.format(&FILENAME_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const BACKUP_TIME_SCHEMA: Schema =
|
||||
IntegerSchema::new("Backup time (Unix epoch.)")
|
||||
.minimum(1_547_797_308)
|
||||
.schema();
|
||||
|
@ -26,9 +26,11 @@ fn get_version(
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.get(ApiMethod::new(
|
||||
get_version,
|
||||
ObjectSchema::new("Proxmox Backup Server API version.")))
|
||||
}
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(
|
||||
&ApiMethod::new(
|
||||
&ApiHandler::Sync(&get_version),
|
||||
&ObjectSchema::new("Proxmox Backup Server API version.", &[])
|
||||
)
|
||||
);
|
||||
|
||||
|
Reference in New Issue
Block a user