src/api2.rs: move backup api to /backup
This commit is contained in:
@ -20,7 +20,6 @@ use crate::server::WorkerTask;
|
||||
|
||||
mod pxar;
|
||||
mod upload;
|
||||
pub mod backup;
|
||||
|
||||
fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
|
||||
|
||||
@ -393,10 +392,6 @@ pub fn router() -> Router {
|
||||
"test-upload",
|
||||
Router::new()
|
||||
.upgrade(upload::api_method_upgrade_upload()))
|
||||
.subdir(
|
||||
"backup",
|
||||
Router::new()
|
||||
.upgrade(backup::api_method_upgrade_backup()))
|
||||
.subdir(
|
||||
"gc",
|
||||
Router::new()
|
||||
|
@ -1,619 +0,0 @@
|
||||
use failure::*;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::*;
|
||||
use hyper::header::{HeaderValue, UPGRADE};
|
||||
use hyper::{Body, Response, StatusCode};
|
||||
use hyper::http::request::Parts;
|
||||
use chrono::{Local, TimeZone};
|
||||
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use crate::tools;
|
||||
use crate::tools::wrapped_reader_stream::*;
|
||||
use crate::api_schema::router::*;
|
||||
use crate::api_schema::*;
|
||||
use crate::server::WorkerTask;
|
||||
use crate::backup::*;
|
||||
|
||||
mod environment;
|
||||
use environment::*;
|
||||
|
||||
mod service;
|
||||
use service::*;
|
||||
|
||||
mod upload_chunk;
|
||||
use upload_chunk::*;
|
||||
|
||||
|
||||
pub fn api_method_upgrade_backup() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
upgrade_to_backup_protocol,
|
||||
ObjectSchema::new("Upgraded to backup protocol.")
|
||||
.required("store", StringSchema::new("Datastore name."))
|
||||
.required("backup-type", StringSchema::new("Backup type.")
|
||||
.format(Arc::new(ApiStringFormat::Enum(&["vm", "ct", "host"]))))
|
||||
.required("backup-id", StringSchema::new("Backup ID."))
|
||||
.optional("debug", BooleanSchema::new("Enable verbose debug logging."))
|
||||
)
|
||||
}
|
||||
|
||||
fn upgrade_to_backup_protocol(
|
||||
parts: Parts,
|
||||
req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
rpcenv: Box<RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
static PROXMOX_BACKUP_PROTOCOL_ID: &str = "proxmox-backup-protocol-h2";
|
||||
|
||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||
|
||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = Local.timestamp(Local::now().timestamp(), 0);
|
||||
|
||||
let protocols = parts
|
||||
.headers
|
||||
.get("UPGRADE")
|
||||
.ok_or_else(|| format_err!("missing Upgrade header"))?
|
||||
.to_str()?;
|
||||
|
||||
if protocols != PROXMOX_BACKUP_PROTOCOL_ID {
|
||||
bail!("invalid protocol name");
|
||||
}
|
||||
|
||||
if parts.version >= http::version::Version::HTTP_2 {
|
||||
bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
|
||||
}
|
||||
|
||||
let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let env_type = rpcenv.env_type();
|
||||
|
||||
let backup_group = BackupGroup::new(backup_type, backup_id);
|
||||
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group).unwrap_or(None);
|
||||
let backup_dir = BackupDir::new_with_group(backup_group, backup_time.timestamp());
|
||||
|
||||
let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
|
||||
if !is_new { bail!("backup directorty already exists."); }
|
||||
|
||||
WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| {
|
||||
let mut env = BackupEnvironment::new(
|
||||
env_type, username.clone(), worker.clone(), datastore, backup_dir);
|
||||
|
||||
env.debug = debug;
|
||||
env.last_backup = last_backup;
|
||||
|
||||
env.log(format!("starting new backup on datastore '{}': {:?}", store, path));
|
||||
|
||||
let service = BackupService::new(env.clone(), worker.clone(), debug);
|
||||
|
||||
let abort_future = worker.abort_future();
|
||||
|
||||
let env2 = env.clone();
|
||||
let env3 = env.clone();
|
||||
|
||||
req_body
|
||||
.on_upgrade()
|
||||
.map_err(Error::from)
|
||||
.and_then(move |conn| {
|
||||
env3.debug("protocol upgrade done");
|
||||
|
||||
let mut http = hyper::server::conn::Http::new();
|
||||
http.http2_only(true);
|
||||
// increase window size: todo - find optiomal size
|
||||
let window_size = 32*1024*1024; // max = (1 << 31) - 2
|
||||
http.http2_initial_stream_window_size(window_size);
|
||||
http.http2_initial_connection_window_size(window_size);
|
||||
|
||||
http.serve_connection(conn, service)
|
||||
.map_err(Error::from)
|
||||
})
|
||||
.select(abort_future.map_err(|_| {}).then(move |_| { bail!("task aborted"); }))
|
||||
.map_err(|(err, _)| err)
|
||||
.and_then(move |(_result, _)| {
|
||||
env.ensure_finished()?;
|
||||
env.log("backup finished sucessfully");
|
||||
Ok(())
|
||||
})
|
||||
.then(move |result| {
|
||||
if let Err(err) = result {
|
||||
match env2.ensure_finished() {
|
||||
Ok(()) => {}, // ignore error after finish
|
||||
_ => {
|
||||
env2.log(format!("backup failed: {}", err));
|
||||
env2.log("removing failed backup");
|
||||
env2.remove_backup()?;
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
})?;
|
||||
|
||||
let response = Response::builder()
|
||||
.status(StatusCode::SWITCHING_PROTOCOLS)
|
||||
.header(UPGRADE, HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID))
|
||||
.body(Body::empty())?;
|
||||
|
||||
Ok(Box::new(futures::future::ok(response)))
|
||||
}
|
||||
|
||||
pub fn backup_api() -> Router {
|
||||
|
||||
let router = Router::new()
|
||||
.subdir(
|
||||
"config", Router::new()
|
||||
.upload(api_method_upload_config())
|
||||
)
|
||||
.subdir(
|
||||
"dynamic_chunk", Router::new()
|
||||
.upload(api_method_upload_dynamic_chunk())
|
||||
)
|
||||
.subdir(
|
||||
"dynamic_index", Router::new()
|
||||
.download(api_method_dynamic_chunk_index())
|
||||
.post(api_method_create_dynamic_index())
|
||||
.put(api_method_dynamic_append())
|
||||
)
|
||||
.subdir(
|
||||
"dynamic_close", Router::new()
|
||||
.post(api_method_close_dynamic_index())
|
||||
)
|
||||
.subdir(
|
||||
"fixed_chunk", Router::new()
|
||||
.upload(api_method_upload_fixed_chunk())
|
||||
)
|
||||
.subdir(
|
||||
"fixed_index", Router::new()
|
||||
.download(api_method_fixed_chunk_index())
|
||||
.post(api_method_create_fixed_index())
|
||||
.put(api_method_fixed_append())
|
||||
)
|
||||
.subdir(
|
||||
"fixed_close", Router::new()
|
||||
.post(api_method_close_fixed_index())
|
||||
)
|
||||
.subdir(
|
||||
"finish", Router::new()
|
||||
.post(
|
||||
ApiMethod::new(
|
||||
finish_backup,
|
||||
ObjectSchema::new("Mark backup as finished.")
|
||||
)
|
||||
)
|
||||
)
|
||||
.subdir(
|
||||
"speedtest", Router::new()
|
||||
.upload(api_method_upload_speedtest())
|
||||
)
|
||||
.list_subdirs();
|
||||
|
||||
router
|
||||
}
|
||||
|
||||
pub fn api_method_create_dynamic_index() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
create_dynamic_index,
|
||||
ObjectSchema::new("Create dynamic chunk index file.")
|
||||
.required("archive-name", crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA.clone())
|
||||
)
|
||||
}
|
||||
|
||||
fn create_dynamic_index(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
let name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||
|
||||
let mut archive_name = name.clone();
|
||||
if !archive_name.ends_with(".pxar") {
|
||||
bail!("wrong archive extension: '{}'", archive_name);
|
||||
} else {
|
||||
archive_name.push_str(".didx");
|
||||
}
|
||||
|
||||
let mut path = env.backup_dir.relative_path();
|
||||
path.push(archive_name);
|
||||
|
||||
let index = env.datastore.create_dynamic_writer(&path)?;
|
||||
let wid = env.register_dynamic_writer(index, name)?;
|
||||
|
||||
env.log(format!("created new dynamic index {} ({:?})", wid, path));
|
||||
|
||||
Ok(json!(wid))
|
||||
}
|
||||
|
||||
pub fn api_method_create_fixed_index() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
create_fixed_index,
|
||||
ObjectSchema::new("Create fixed chunk index file.")
|
||||
.required("archive-name", crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA.clone())
|
||||
.required("size", IntegerSchema::new("File size.")
|
||||
.minimum(1)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
fn create_fixed_index(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
println!("PARAM: {:?}", param);
|
||||
|
||||
let name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||
let size = tools::required_integer_param(¶m, "size")? as usize;
|
||||
|
||||
let mut archive_name = name.clone();
|
||||
if !archive_name.ends_with(".img") {
|
||||
bail!("wrong archive extension: '{}'", archive_name);
|
||||
} else {
|
||||
archive_name.push_str(".fidx");
|
||||
}
|
||||
|
||||
let mut path = env.backup_dir.relative_path();
|
||||
path.push(archive_name);
|
||||
|
||||
let chunk_size = 4096*1024; // todo: ??
|
||||
|
||||
let index = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
|
||||
let wid = env.register_fixed_writer(index, name, size, chunk_size as u32)?;
|
||||
|
||||
env.log(format!("created new fixed index {} ({:?})", wid, path));
|
||||
|
||||
Ok(json!(wid))
|
||||
}
|
||||
|
||||
pub fn api_method_dynamic_append() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
dynamic_append,
|
||||
ObjectSchema::new("Append chunk to dynamic index writer.")
|
||||
.required("wid", IntegerSchema::new("Dynamic writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
)
|
||||
.required("digest-list", ArraySchema::new(
|
||||
"Chunk digest list.",
|
||||
StringSchema::new("Chunk digest.").into())
|
||||
)
|
||||
.required("offset-list", ArraySchema::new(
|
||||
"Chunk offset list.",
|
||||
IntegerSchema::new("Corresponding chunk offsets.")
|
||||
.minimum(0)
|
||||
.into())
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
fn dynamic_append (
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let wid = tools::required_integer_param(¶m, "wid")? as usize;
|
||||
let digest_list = tools::required_array_param(¶m, "digest-list")?;
|
||||
let offset_list = tools::required_array_param(¶m, "offset-list")?;
|
||||
|
||||
if offset_list.len() != digest_list.len() {
|
||||
bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
|
||||
}
|
||||
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
env.debug(format!("dynamic_append {} chunks", digest_list.len()));
|
||||
|
||||
for (i, item) in digest_list.iter().enumerate() {
|
||||
let digest_str = item.as_str().unwrap();
|
||||
let digest = crate::tools::hex_to_digest(digest_str)?;
|
||||
let offset = offset_list[i].as_u64().unwrap();
|
||||
let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
|
||||
|
||||
env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
|
||||
|
||||
env.debug(format!("sucessfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn api_method_fixed_append() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
fixed_append,
|
||||
ObjectSchema::new("Append chunk to fixed index writer.")
|
||||
.required("wid", IntegerSchema::new("Fixed writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
)
|
||||
.required("digest-list", ArraySchema::new(
|
||||
"Chunk digest list.",
|
||||
StringSchema::new("Chunk digest.").into())
|
||||
)
|
||||
.required("offset-list", ArraySchema::new(
|
||||
"Chunk offset list.",
|
||||
IntegerSchema::new("Corresponding chunk offsets.")
|
||||
.minimum(0)
|
||||
.into())
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
fn fixed_append (
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let wid = tools::required_integer_param(¶m, "wid")? as usize;
|
||||
let digest_list = tools::required_array_param(¶m, "digest-list")?;
|
||||
let offset_list = tools::required_array_param(¶m, "offset-list")?;
|
||||
|
||||
if offset_list.len() != digest_list.len() {
|
||||
bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
|
||||
}
|
||||
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
env.debug(format!("fixed_append {} chunks", digest_list.len()));
|
||||
|
||||
for (i, item) in digest_list.iter().enumerate() {
|
||||
let digest_str = item.as_str().unwrap();
|
||||
let digest = crate::tools::hex_to_digest(digest_str)?;
|
||||
let offset = offset_list[i].as_u64().unwrap();
|
||||
let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
|
||||
|
||||
env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
|
||||
|
||||
env.debug(format!("sucessfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn api_method_close_dynamic_index() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
close_dynamic_index,
|
||||
ObjectSchema::new("Close dynamic index writer.")
|
||||
.required("wid", IntegerSchema::new("Dynamic writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
)
|
||||
.required("chunk-count", IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
|
||||
.minimum(1)
|
||||
)
|
||||
.required("size", IntegerSchema::new("File size. This is used to verify that the server got all data.")
|
||||
.minimum(1)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
fn close_dynamic_index (
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let wid = tools::required_integer_param(¶m, "wid")? as usize;
|
||||
let chunk_count = tools::required_integer_param(¶m, "chunk-count")? as u64;
|
||||
let size = tools::required_integer_param(¶m, "size")? as u64;
|
||||
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
env.dynamic_writer_close(wid, chunk_count, size)?;
|
||||
|
||||
env.log(format!("sucessfully closed dynamic index {}", wid));
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn api_method_close_fixed_index() -> ApiMethod {
|
||||
ApiMethod::new(
|
||||
close_fixed_index,
|
||||
ObjectSchema::new("Close fixed index writer.")
|
||||
.required("wid", IntegerSchema::new("Fixed writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
)
|
||||
.required("chunk-count", IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
|
||||
.minimum(1)
|
||||
)
|
||||
.required("size", IntegerSchema::new("File size. This is used to verify that the server got all data.")
|
||||
.minimum(1)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
fn close_fixed_index (
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let wid = tools::required_integer_param(¶m, "wid")? as usize;
|
||||
let chunk_count = tools::required_integer_param(¶m, "chunk-count")? as u64;
|
||||
let size = tools::required_integer_param(¶m, "size")? as u64;
|
||||
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
env.fixed_writer_close(wid, chunk_count, size)?;
|
||||
|
||||
env.log(format!("sucessfully closed fixed index {}", wid));
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
fn finish_backup (
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
env.finish_backup()?;
|
||||
env.log("sucessfully finished backup");
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn api_method_dynamic_chunk_index() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
dynamic_chunk_index,
|
||||
ObjectSchema::new(r###"
|
||||
Download the dynamic chunk index from the previous backup.
|
||||
Simply returns an empty list if this is the first backup.
|
||||
"###
|
||||
)
|
||||
.required("archive-name", crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA.clone())
|
||||
)
|
||||
}
|
||||
|
||||
fn dynamic_chunk_index(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
rpcenv: Box<RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
let mut archive_name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||
|
||||
if !archive_name.ends_with(".pxar") {
|
||||
bail!("wrong archive extension: '{}'", archive_name);
|
||||
} else {
|
||||
archive_name.push_str(".didx");
|
||||
}
|
||||
|
||||
let empty_response = {
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::empty())?
|
||||
};
|
||||
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => return Ok(Box::new(future::ok(empty_response))),
|
||||
};
|
||||
|
||||
let mut path = last_backup.backup_dir.relative_path();
|
||||
path.push(&archive_name);
|
||||
|
||||
let index = match env.datastore.open_dynamic_reader(path) {
|
||||
Ok(index) => index,
|
||||
Err(_) => {
|
||||
env.log(format!("there is no last backup for archive '{}'", archive_name));
|
||||
return Ok(Box::new(future::ok(empty_response)));
|
||||
}
|
||||
};
|
||||
|
||||
env.log(format!("download last backup index for archive '{}'", archive_name));
|
||||
|
||||
let count = index.index_count();
|
||||
for pos in 0..count {
|
||||
let (start, end, digest) = index.chunk_info(pos)?;
|
||||
let size = (end - start) as u32;
|
||||
env.register_chunk(digest, size)?;
|
||||
}
|
||||
|
||||
let reader = DigestListEncoder::new(Box::new(index));
|
||||
|
||||
let stream = WrappedReaderStream::new(reader);
|
||||
|
||||
// fixme: set size, content type?
|
||||
let response = http::Response::builder()
|
||||
.status(200)
|
||||
.body(Body::wrap_stream(stream))?;
|
||||
|
||||
Ok(Box::new(future::ok(response)))
|
||||
}
|
||||
|
||||
pub fn api_method_fixed_chunk_index() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
fixed_chunk_index,
|
||||
ObjectSchema::new(r###"
|
||||
Download the fixed chunk index from the previous backup.
|
||||
Simply returns an empty list if this is the first backup.
|
||||
"###
|
||||
)
|
||||
.required("archive-name", crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA.clone())
|
||||
)
|
||||
}
|
||||
|
||||
fn fixed_chunk_index(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
rpcenv: Box<RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
let mut archive_name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||
|
||||
if !archive_name.ends_with(".img") {
|
||||
bail!("wrong archive extension: '{}'", archive_name);
|
||||
} else {
|
||||
archive_name.push_str(".fidx");
|
||||
}
|
||||
|
||||
let empty_response = {
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::empty())?
|
||||
};
|
||||
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => return Ok(Box::new(future::ok(empty_response))),
|
||||
};
|
||||
|
||||
let mut path = last_backup.backup_dir.relative_path();
|
||||
path.push(&archive_name);
|
||||
|
||||
let index = match env.datastore.open_fixed_reader(path) {
|
||||
Ok(index) => index,
|
||||
Err(_) => {
|
||||
env.log(format!("there is no last backup for archive '{}'", archive_name));
|
||||
return Ok(Box::new(future::ok(empty_response)));
|
||||
}
|
||||
};
|
||||
|
||||
env.log(format!("download last backup index for archive '{}'", archive_name));
|
||||
|
||||
let count = index.index_count();
|
||||
for pos in 0..count {
|
||||
let digest = index.index_digest(pos).unwrap();
|
||||
let size = index.chunk_size as u32;
|
||||
env.register_chunk(*digest, size)?;
|
||||
}
|
||||
|
||||
let reader = DigestListEncoder::new(Box::new(index));
|
||||
|
||||
let stream = WrappedReaderStream::new(reader);
|
||||
|
||||
// fixme: set size, content type?
|
||||
let response = http::Response::builder()
|
||||
.status(200)
|
||||
.body(Body::wrap_stream(stream))?;
|
||||
|
||||
Ok(Box::new(future::ok(response)))
|
||||
}
|
@ -1,452 +0,0 @@
|
||||
use failure::*;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::api_schema::router::{RpcEnvironment, RpcEnvironmentType};
|
||||
use crate::server::WorkerTask;
|
||||
use crate::backup::*;
|
||||
use crate::server::formatter::*;
|
||||
use hyper::{Body, Response};
|
||||
|
||||
struct UploadStatistic {
|
||||
count: u64,
|
||||
size: u64,
|
||||
compressed_size: u64,
|
||||
duplicates: u64,
|
||||
}
|
||||
|
||||
impl UploadStatistic {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
count: 0,
|
||||
size: 0,
|
||||
compressed_size: 0,
|
||||
duplicates: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct DynamicWriterState {
|
||||
name: String,
|
||||
index: DynamicIndexWriter,
|
||||
offset: u64,
|
||||
chunk_count: u64,
|
||||
upload_stat: UploadStatistic,
|
||||
}
|
||||
|
||||
struct FixedWriterState {
|
||||
name: String,
|
||||
index: FixedIndexWriter,
|
||||
size: usize,
|
||||
chunk_size: u32,
|
||||
chunk_count: u64,
|
||||
upload_stat: UploadStatistic,
|
||||
}
|
||||
|
||||
struct SharedBackupState {
|
||||
finished: bool,
|
||||
uid_counter: usize,
|
||||
file_counter: usize, // sucessfully uploaded files
|
||||
dynamic_writers: HashMap<usize, DynamicWriterState>,
|
||||
fixed_writers: HashMap<usize, FixedWriterState>,
|
||||
known_chunks: HashMap<[u8;32], u32>,
|
||||
}
|
||||
|
||||
impl SharedBackupState {
|
||||
|
||||
// Raise error if finished flag is set
|
||||
fn ensure_unfinished(&self) -> Result<(), Error> {
|
||||
if self.finished {
|
||||
bail!("backup already marked as finished.");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Get an unique integer ID
|
||||
pub fn next_uid(&mut self) -> usize {
|
||||
self.uid_counter += 1;
|
||||
self.uid_counter
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// `RpcEnvironmet` implementation for backup service
|
||||
#[derive(Clone)]
|
||||
pub struct BackupEnvironment {
|
||||
env_type: RpcEnvironmentType,
|
||||
result_attributes: HashMap<String, Value>,
|
||||
user: String,
|
||||
pub debug: bool,
|
||||
pub formatter: &'static OutputFormatter,
|
||||
pub worker: Arc<WorkerTask>,
|
||||
pub datastore: Arc<DataStore>,
|
||||
pub backup_dir: BackupDir,
|
||||
pub last_backup: Option<BackupInfo>,
|
||||
state: Arc<Mutex<SharedBackupState>>
|
||||
}
|
||||
|
||||
impl BackupEnvironment {
|
||||
pub fn new(
|
||||
env_type: RpcEnvironmentType,
|
||||
user: String,
|
||||
worker: Arc<WorkerTask>,
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: BackupDir,
|
||||
) -> Self {
|
||||
|
||||
let state = SharedBackupState {
|
||||
finished: false,
|
||||
uid_counter: 0,
|
||||
file_counter: 0,
|
||||
dynamic_writers: HashMap::new(),
|
||||
fixed_writers: HashMap::new(),
|
||||
known_chunks: HashMap::new(),
|
||||
};
|
||||
|
||||
Self {
|
||||
result_attributes: HashMap::new(),
|
||||
env_type,
|
||||
user,
|
||||
worker,
|
||||
datastore,
|
||||
debug: false,
|
||||
formatter: &JSON_FORMATTER,
|
||||
backup_dir,
|
||||
last_backup: None,
|
||||
state: Arc::new(Mutex::new(state)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Register a Chunk with associated length.
|
||||
///
|
||||
/// We do not fully trust clients, so a client may only use registered
|
||||
/// chunks. Please use this method to register chunks from previous backups.
|
||||
pub fn register_chunk(&self, digest: [u8; 32], length: u32) -> Result<(), Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
state.known_chunks.insert(digest, length);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Register fixed length chunks after upload.
|
||||
///
|
||||
/// Like `register_chunk()`, but additionally record statistics for
|
||||
/// the fixed index writer.
|
||||
pub fn register_fixed_chunk(
|
||||
&self,
|
||||
wid: usize,
|
||||
digest: [u8; 32],
|
||||
size: u32,
|
||||
compressed_size: u32,
|
||||
is_duplicate: bool,
|
||||
) -> Result<(), Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
let mut data = match state.fixed_writers.get_mut(&wid) {
|
||||
Some(data) => data,
|
||||
None => bail!("fixed writer '{}' not registered", wid),
|
||||
};
|
||||
|
||||
if size != data.chunk_size {
|
||||
bail!("fixed writer '{}' - got unexpected chunk size ({} != {}", data.name, size, data.chunk_size);
|
||||
}
|
||||
|
||||
// record statistics
|
||||
data.upload_stat.count += 1;
|
||||
data.upload_stat.size += size as u64;
|
||||
data.upload_stat.compressed_size += compressed_size as u64;
|
||||
if is_duplicate { data.upload_stat.duplicates += 1; }
|
||||
|
||||
// register chunk
|
||||
state.known_chunks.insert(digest, size);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Register dynamic length chunks after upload.
|
||||
///
|
||||
/// Like `register_chunk()`, but additionally record statistics for
|
||||
/// the dynamic index writer.
|
||||
pub fn register_dynamic_chunk(
|
||||
&self,
|
||||
wid: usize,
|
||||
digest: [u8; 32],
|
||||
size: u32,
|
||||
compressed_size: u32,
|
||||
is_duplicate: bool,
|
||||
) -> Result<(), Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
let mut data = match state.dynamic_writers.get_mut(&wid) {
|
||||
Some(data) => data,
|
||||
None => bail!("dynamic writer '{}' not registered", wid),
|
||||
};
|
||||
|
||||
// record statistics
|
||||
data.upload_stat.count += 1;
|
||||
data.upload_stat.size += size as u64;
|
||||
data.upload_stat.compressed_size += compressed_size as u64;
|
||||
if is_duplicate { data.upload_stat.duplicates += 1; }
|
||||
|
||||
// register chunk
|
||||
state.known_chunks.insert(digest, size);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn lookup_chunk(&self, digest: &[u8; 32]) -> Option<u32> {
|
||||
let state = self.state.lock().unwrap();
|
||||
|
||||
match state.known_chunks.get(digest) {
|
||||
Some(len) => Some(*len),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Store the writer with an unique ID
|
||||
pub fn register_dynamic_writer(&self, index: DynamicIndexWriter, name: String) -> Result<usize, Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
let uid = state.next_uid();
|
||||
|
||||
state.dynamic_writers.insert(uid, DynamicWriterState {
|
||||
index, name, offset: 0, chunk_count: 0, upload_stat: UploadStatistic::new(),
|
||||
});
|
||||
|
||||
Ok(uid)
|
||||
}
|
||||
|
||||
/// Store the writer with an unique ID
|
||||
pub fn register_fixed_writer(&self, index: FixedIndexWriter, name: String, size: usize, chunk_size: u32) -> Result<usize, Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
let uid = state.next_uid();
|
||||
|
||||
state.fixed_writers.insert(uid, FixedWriterState {
|
||||
index, name, chunk_count: 0, size, chunk_size, upload_stat: UploadStatistic::new(),
|
||||
});
|
||||
|
||||
Ok(uid)
|
||||
}
|
||||
|
||||
/// Append chunk to dynamic writer
|
||||
pub fn dynamic_writer_append_chunk(&self, wid: usize, offset: u64, size: u32, digest: &[u8; 32]) -> Result<(), Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
let mut data = match state.dynamic_writers.get_mut(&wid) {
|
||||
Some(data) => data,
|
||||
None => bail!("dynamic writer '{}' not registered", wid),
|
||||
};
|
||||
|
||||
|
||||
if data.offset != offset {
|
||||
bail!("dynamic writer '{}' append chunk failed - got strange chunk offset ({} != {})",
|
||||
data.name, data.offset, offset);
|
||||
}
|
||||
|
||||
data.offset += size as u64;
|
||||
data.chunk_count += 1;
|
||||
|
||||
data.index.add_chunk(data.offset, digest)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Append chunk to fixed writer
|
||||
pub fn fixed_writer_append_chunk(&self, wid: usize, offset: u64, size: u32, digest: &[u8; 32]) -> Result<(), Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
let mut data = match state.fixed_writers.get_mut(&wid) {
|
||||
Some(data) => data,
|
||||
None => bail!("fixed writer '{}' not registered", wid),
|
||||
};
|
||||
|
||||
data.chunk_count += 1;
|
||||
|
||||
if size != data.chunk_size {
|
||||
bail!("fixed writer '{}' - got unexpected chunk size ({} != {}", data.name, size, data.chunk_size);
|
||||
}
|
||||
|
||||
let pos = (offset as usize)/(data.chunk_size as usize);
|
||||
data.index.add_digest(pos, digest)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn log_upload_stat(&self, archive_name: &str, size: u64, chunk_count: u64, upload_stat: &UploadStatistic) {
|
||||
self.log(format!("Upload statistics for '{}'", archive_name));
|
||||
self.log(format!("Size: {}", size));
|
||||
self.log(format!("Chunk count: {}", chunk_count));
|
||||
self.log(format!("Upload size: {} ({}%)", upload_stat.size, (upload_stat.size*100)/size));
|
||||
if upload_stat.size > 0 {
|
||||
self.log(format!("Compression: {}%", (upload_stat.compressed_size*100)/upload_stat.size));
|
||||
}
|
||||
}
|
||||
|
||||
/// Close dynamic writer
|
||||
pub fn dynamic_writer_close(&self, wid: usize, chunk_count: u64, size: u64) -> Result<(), Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
let mut data = match state.dynamic_writers.remove(&wid) {
|
||||
Some(data) => data,
|
||||
None => bail!("dynamic writer '{}' not registered", wid),
|
||||
};
|
||||
|
||||
if data.chunk_count != chunk_count {
|
||||
bail!("dynamic writer '{}' close failed - unexpected chunk count ({} != {})", data.name, data.chunk_count, chunk_count);
|
||||
}
|
||||
|
||||
if data.offset != size {
|
||||
bail!("dynamic writer '{}' close failed - unexpected file size ({} != {})", data.name, data.offset, size);
|
||||
}
|
||||
|
||||
data.index.close()?;
|
||||
|
||||
self.log_upload_stat(&data.name, size, chunk_count, &data.upload_stat);
|
||||
|
||||
state.file_counter += 1;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Close fixed writer
|
||||
pub fn fixed_writer_close(&self, wid: usize, chunk_count: u64, size: u64) -> Result<(), Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
let mut data = match state.fixed_writers.remove(&wid) {
|
||||
Some(data) => data,
|
||||
None => bail!("fixed writer '{}' not registered", wid),
|
||||
};
|
||||
|
||||
if data.chunk_count != chunk_count {
|
||||
bail!("fixed writer '{}' close failed - received wrong number of chunk ({} != {})", data.name, data.chunk_count, chunk_count);
|
||||
}
|
||||
|
||||
let expected_count = data.index.index_length();
|
||||
|
||||
if chunk_count != (expected_count as u64) {
|
||||
bail!("fixed writer '{}' close failed - unexpected chunk count ({} != {})", data.name, expected_count, chunk_count);
|
||||
}
|
||||
|
||||
if size != (data.size as u64) {
|
||||
bail!("fixed writer '{}' close failed - unexpected file size ({} != {})", data.name, data.size, size);
|
||||
}
|
||||
|
||||
data.index.close()?;
|
||||
|
||||
self.log_upload_stat(&data.name, size, chunk_count, &data.upload_stat);
|
||||
|
||||
state.file_counter += 1;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Mark backup as finished
|
||||
pub fn finish_backup(&self) -> Result<(), Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
// test if all writer are correctly closed
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
state.finished = true;
|
||||
|
||||
if state.dynamic_writers.len() != 0 {
|
||||
bail!("found open index writer - unable to finish backup");
|
||||
}
|
||||
|
||||
if state.file_counter == 0 {
|
||||
bail!("backup does not contain valid files (file count == 0)");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn log<S: AsRef<str>>(&self, msg: S) {
|
||||
self.worker.log(msg);
|
||||
}
|
||||
|
||||
pub fn debug<S: AsRef<str>>(&self, msg: S) {
|
||||
if self.debug { self.worker.log(msg); }
|
||||
}
|
||||
|
||||
pub fn format_response(&self, result: Result<Value, Error>) -> Response<Body> {
|
||||
match result {
|
||||
Ok(data) => (self.formatter.format_data)(data, self),
|
||||
Err(err) => (self.formatter.format_error)(err),
|
||||
}
|
||||
}
|
||||
|
||||
/// Raise error if finished flag is not set
|
||||
pub fn ensure_finished(&self) -> Result<(), Error> {
|
||||
let state = self.state.lock().unwrap();
|
||||
if !state.finished {
|
||||
bail!("backup ended but finished flag is not set.");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove complete backup
|
||||
pub fn remove_backup(&self) -> Result<(), Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
state.finished = true;
|
||||
|
||||
self.datastore.remove_backup_dir(&self.backup_dir)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl RpcEnvironment for BackupEnvironment {
|
||||
|
||||
fn set_result_attrib(&mut self, name: &str, value: Value) {
|
||||
self.result_attributes.insert(name.into(), value);
|
||||
}
|
||||
|
||||
fn get_result_attrib(&self, name: &str) -> Option<&Value> {
|
||||
self.result_attributes.get(name)
|
||||
}
|
||||
|
||||
fn env_type(&self) -> RpcEnvironmentType {
|
||||
self.env_type
|
||||
}
|
||||
|
||||
fn set_user(&mut self, _user: Option<String>) {
|
||||
panic!("unable to change user");
|
||||
}
|
||||
|
||||
fn get_user(&self) -> Option<String> {
|
||||
Some(self.user.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<BackupEnvironment> for RpcEnvironment {
|
||||
fn as_ref(&self) -> &BackupEnvironment {
|
||||
self.as_any().downcast_ref::<BackupEnvironment>().unwrap()
|
||||
}
|
||||
}
|
||||
impl AsRef<BackupEnvironment> for Box<RpcEnvironment> {
|
||||
fn as_ref(&self) -> &BackupEnvironment {
|
||||
self.as_any().downcast_ref::<BackupEnvironment>().unwrap()
|
||||
}
|
||||
}
|
@ -1,120 +0,0 @@
|
||||
use failure::*;
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::*;
|
||||
use hyper::{Body, Request, Response, StatusCode};
|
||||
|
||||
use crate::tools;
|
||||
use crate::api_schema::router::*;
|
||||
use crate::server::formatter::*;
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use super::environment::*;
|
||||
|
||||
lazy_static!{
|
||||
static ref BACKUP_ROUTER: Router = super::backup_api();
|
||||
}
|
||||
|
||||
pub struct BackupService {
|
||||
rpcenv: BackupEnvironment,
|
||||
worker: Arc<WorkerTask>,
|
||||
debug: bool,
|
||||
}
|
||||
|
||||
impl BackupService {
|
||||
|
||||
pub fn new(rpcenv: BackupEnvironment, worker: Arc<WorkerTask>, debug: bool) -> Self {
|
||||
Self { rpcenv, worker, debug }
|
||||
}
|
||||
|
||||
pub fn debug<S: AsRef<str>>(&self, msg: S) {
|
||||
if self.debug { self.worker.log(msg); }
|
||||
}
|
||||
|
||||
fn handle_request(&self, req: Request<Body>) -> BoxFut {
|
||||
|
||||
let (parts, body) = req.into_parts();
|
||||
|
||||
let method = parts.method.clone();
|
||||
|
||||
let (path, components) = match tools::normalize_uri_path(parts.uri.path()) {
|
||||
Ok((p,c)) => (p, c),
|
||||
Err(err) => return Box::new(future::err(http_err!(BAD_REQUEST, err.to_string()))),
|
||||
};
|
||||
|
||||
self.debug(format!("{} {}", method, path));
|
||||
|
||||
let mut uri_param = HashMap::new();
|
||||
|
||||
match BACKUP_ROUTER.find_method(&components, method, &mut uri_param) {
|
||||
MethodDefinition::None => {
|
||||
let err = http_err!(NOT_FOUND, "Path not found.".to_string());
|
||||
return Box::new(future::ok((self.rpcenv.formatter.format_error)(err)));
|
||||
}
|
||||
MethodDefinition::Simple(api_method) => {
|
||||
return crate::server::rest::handle_sync_api_request(
|
||||
self.rpcenv.clone(), api_method, self.rpcenv.formatter, parts, body, uri_param);
|
||||
}
|
||||
MethodDefinition::Async(async_method) => {
|
||||
return crate::server::rest::handle_async_api_request(
|
||||
self.rpcenv.clone(), async_method, self.rpcenv.formatter, parts, body, uri_param);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn log_response(worker: Arc<WorkerTask>, method: hyper::Method, path: &str, resp: &Response<Body>) {
|
||||
|
||||
let status = resp.status();
|
||||
|
||||
if !status.is_success() {
|
||||
let reason = status.canonical_reason().unwrap_or("unknown reason");
|
||||
|
||||
let mut message = "request failed";
|
||||
if let Some(data) = resp.extensions().get::<ErrorMessageExtension>() {
|
||||
message = &data.0;
|
||||
}
|
||||
|
||||
worker.log(format!("{} {}: {} {}: {}", method.as_str(), path, status.as_str(), reason, message));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl hyper::service::Service for BackupService {
|
||||
type ReqBody = Body;
|
||||
type ResBody = Body;
|
||||
type Error = Error;
|
||||
type Future = Box<Future<Item = Response<Body>, Error = Self::Error> + Send>;
|
||||
|
||||
fn call(&mut self, req: Request<Self::ReqBody>) -> Self::Future {
|
||||
let path = req.uri().path().to_owned();
|
||||
let method = req.method().clone();
|
||||
let worker = self.worker.clone();
|
||||
|
||||
Box::new(self.handle_request(req).then(move |result| {
|
||||
match result {
|
||||
Ok(res) => {
|
||||
Self::log_response(worker, method, &path, &res);
|
||||
Ok::<_, Error>(res)
|
||||
}
|
||||
Err(err) => {
|
||||
if let Some(apierr) = err.downcast_ref::<HttpError>() {
|
||||
let mut resp = Response::new(Body::from(apierr.message.clone()));
|
||||
resp.extensions_mut().insert(ErrorMessageExtension(apierr.message.clone()));
|
||||
*resp.status_mut() = apierr.code;
|
||||
Self::log_response(worker, method, &path, &resp);
|
||||
Ok(resp)
|
||||
} else {
|
||||
let mut resp = Response::new(Body::from(err.to_string()));
|
||||
resp.extensions_mut().insert(ErrorMessageExtension(err.to_string()));
|
||||
*resp.status_mut() = StatusCode::BAD_REQUEST;
|
||||
Self::log_response(worker, method, &path, &resp);
|
||||
Ok(resp)
|
||||
}
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
||||
}
|
@ -1,249 +0,0 @@
|
||||
use failure::*;
|
||||
use futures::*;
|
||||
use std::sync::Arc;
|
||||
|
||||
use hyper::http::request::Parts;
|
||||
use hyper::Body;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use crate::tools;
|
||||
use crate::backup::*;
|
||||
use crate::api_schema::*;
|
||||
use crate::api_schema::router::*;
|
||||
|
||||
use super::environment::*;
|
||||
|
||||
pub struct UploadChunk {
|
||||
stream: Body,
|
||||
store: Arc<DataStore>,
|
||||
size: u32,
|
||||
chunk: Vec<u8>,
|
||||
}
|
||||
|
||||
impl UploadChunk {
|
||||
|
||||
pub fn new(stream: Body, store: Arc<DataStore>, size: u32) -> Self {
|
||||
Self { stream, store, size, chunk: vec![] }
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for UploadChunk {
|
||||
type Item = ([u8; 32], u32, u32, bool);
|
||||
type Error = failure::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<([u8; 32], u32, u32, bool), failure::Error> {
|
||||
loop {
|
||||
match try_ready!(self.stream.poll()) {
|
||||
Some(chunk) => {
|
||||
if (self.chunk.len() + chunk.len()) > (self.size as usize) {
|
||||
bail!("uploaded chunk is larger than announced.");
|
||||
}
|
||||
self.chunk.extend_from_slice(&chunk);
|
||||
}
|
||||
None => {
|
||||
if self.chunk.len() != (self.size as usize) {
|
||||
bail!("uploaded chunk has unexpected size.");
|
||||
}
|
||||
|
||||
let (is_duplicate, digest, compressed_size) = self.store.insert_chunk(&self.chunk)?;
|
||||
|
||||
return Ok(Async::Ready((digest, self.size, compressed_size as u32, is_duplicate)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn api_method_upload_fixed_chunk() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
upload_fixed_chunk,
|
||||
ObjectSchema::new("Upload a new chunk.")
|
||||
.required("wid", IntegerSchema::new("Fixed writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
)
|
||||
.required("size", IntegerSchema::new("Chunk size.")
|
||||
.minimum(1)
|
||||
.maximum(1024*1024*16)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
fn upload_fixed_chunk(
|
||||
_parts: Parts,
|
||||
req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
rpcenv: Box<RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
let wid = tools::required_integer_param(¶m, "wid")? as usize;
|
||||
let size = tools::required_integer_param(¶m, "size")? as u32;
|
||||
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
let upload = UploadChunk::new(req_body, env.datastore.clone(), size);
|
||||
|
||||
let resp = upload
|
||||
.then(move |result| {
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
let result = result.and_then(|(digest, size, compressed_size, is_duplicate)| {
|
||||
env.register_fixed_chunk(wid, digest, size, compressed_size, is_duplicate)?;
|
||||
let digest_str = tools::digest_to_hex(&digest);
|
||||
env.debug(format!("upload_chunk done: {} bytes, {}", size, digest_str));
|
||||
Ok(json!(digest_str))
|
||||
});
|
||||
|
||||
Ok(env.format_response(result))
|
||||
});
|
||||
|
||||
Ok(Box::new(resp))
|
||||
}
|
||||
|
||||
pub fn api_method_upload_dynamic_chunk() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
upload_dynamic_chunk,
|
||||
ObjectSchema::new("Upload a new chunk.")
|
||||
.required("wid", IntegerSchema::new("Dynamic writer ID.")
|
||||
.minimum(1)
|
||||
.maximum(256)
|
||||
)
|
||||
.required("size", IntegerSchema::new("Chunk size.")
|
||||
.minimum(1)
|
||||
.maximum(1024*1024*16)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
fn upload_dynamic_chunk(
|
||||
_parts: Parts,
|
||||
req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
rpcenv: Box<RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
let wid = tools::required_integer_param(¶m, "wid")? as usize;
|
||||
let size = tools::required_integer_param(¶m, "size")? as u32;
|
||||
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
let upload = UploadChunk::new(req_body, env.datastore.clone(), size);
|
||||
|
||||
let resp = upload
|
||||
.then(move |result| {
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
let result = result.and_then(|(digest, size, compressed_size, is_duplicate)| {
|
||||
env.register_dynamic_chunk(wid, digest, size, compressed_size, is_duplicate)?;
|
||||
let digest_str = tools::digest_to_hex(&digest);
|
||||
env.debug(format!("upload_chunk done: {} bytes, {}", size, digest_str));
|
||||
Ok(json!(digest_str))
|
||||
});
|
||||
|
||||
Ok(env.format_response(result))
|
||||
});
|
||||
|
||||
Ok(Box::new(resp))
|
||||
}
|
||||
|
||||
pub fn api_method_upload_speedtest() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
upload_speedtest,
|
||||
ObjectSchema::new("Test uploadf speed.")
|
||||
)
|
||||
}
|
||||
|
||||
fn upload_speedtest(
|
||||
_parts: Parts,
|
||||
req_body: Body,
|
||||
_param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
rpcenv: Box<RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
let resp = req_body
|
||||
.map_err(Error::from)
|
||||
.fold(0, |size: usize, chunk| -> Result<usize, Error> {
|
||||
let sum = size + chunk.len();
|
||||
//println!("UPLOAD {} bytes, sum {}", chunk.len(), sum);
|
||||
Ok(sum)
|
||||
})
|
||||
.then(move |result| {
|
||||
match result {
|
||||
Ok(size) => {
|
||||
println!("UPLOAD END {} bytes", size);
|
||||
}
|
||||
Err(err) => {
|
||||
println!("Upload error: {}", err);
|
||||
}
|
||||
}
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
Ok(env.format_response(Ok(Value::Null)))
|
||||
});
|
||||
|
||||
Ok(Box::new(resp))
|
||||
}
|
||||
|
||||
pub fn api_method_upload_config() -> ApiAsyncMethod {
|
||||
ApiAsyncMethod::new(
|
||||
upload_config,
|
||||
ObjectSchema::new("Upload configuration file.")
|
||||
.required("file-name", crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA.clone())
|
||||
.required("size", IntegerSchema::new("File size.")
|
||||
.minimum(1)
|
||||
.maximum(1024*1024*16)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
fn upload_config(
|
||||
_parts: Parts,
|
||||
req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiAsyncMethod,
|
||||
rpcenv: Box<RpcEnvironment>,
|
||||
) -> Result<BoxFut, Error> {
|
||||
|
||||
let mut file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
||||
let size = tools::required_integer_param(¶m, "size")? as usize;
|
||||
|
||||
if !file_name.ends_with(".conf") {
|
||||
bail!("wrong config file extension: '{}'", file_name);
|
||||
} else {
|
||||
file_name.push_str(".zstd");
|
||||
}
|
||||
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
let mut path = env.datastore.base_path();
|
||||
path.push(env.backup_dir.relative_path());
|
||||
path.push(&file_name);
|
||||
|
||||
let env2 = env.clone();
|
||||
let env3 = env.clone();
|
||||
|
||||
let resp = req_body
|
||||
.map_err(Error::from)
|
||||
.concat2()
|
||||
.and_then(move |data| {
|
||||
if size != data.len() {
|
||||
bail!("got configuration file with unexpected length ({} != {})", size, data.len());
|
||||
}
|
||||
|
||||
let data = zstd::block::compress(&data, 0)?;
|
||||
|
||||
tools::file_set_contents(&path, &data, None)?;
|
||||
|
||||
env2.debug(format!("upload config {:?} ({} bytes, comp: {})", path, size, data.len()));
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.and_then(move |_| {
|
||||
Ok(env3.format_response(Ok(Value::Null)))
|
||||
})
|
||||
;
|
||||
|
||||
Ok(Box::new(resp))
|
||||
}
|
Reference in New Issue
Block a user