src/server/rest.rs: cleanup async code

This commit is contained in:
Dietmar Maurer
2019-11-22 13:02:05 +01:00
parent be2bb37205
commit ad51d02aa9
6 changed files with 451 additions and 502 deletions

View File

@ -480,43 +480,43 @@ fn download_file(
param: Value,
_info: &ApiMethod,
_rpcenv: Box<dyn RpcEnvironment>,
) -> Result<ApiFuture, Error> {
) -> ApiFuture {
let store = tools::required_string_param(&param, "store")?;
async move {
let store = tools::required_string_param(&param, "store")?;
let datastore = DataStore::lookup_datastore(store)?;
let datastore = DataStore::lookup_datastore(store)?;
let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
let backup_type = tools::required_string_param(&param, "backup-type")?;
let backup_id = tools::required_string_param(&param, "backup-id")?;
let backup_time = tools::required_integer_param(&param, "backup-time")?;
let backup_type = tools::required_string_param(&param, "backup-type")?;
let backup_id = tools::required_string_param(&param, "backup-id")?;
let backup_time = tools::required_integer_param(&param, "backup-time")?;
println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
let mut path = datastore.base_path();
path.push(backup_dir.relative_path());
path.push(&file_name);
let mut path = datastore.base_path();
path.push(backup_dir.relative_path());
path.push(&file_name);
let response_future = tokio::fs::File::open(path)
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
.and_then(move |file| {
let payload = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
.map_ok(|bytes| hyper::Chunk::from(bytes.freeze()));
let body = Body::wrap_stream(payload);
let file = tokio::fs::File::open(path)
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
.await?;
// fixme: set other headers ?
futures::future::ok(Response::builder()
.status(StatusCode::OK)
.header(header::CONTENT_TYPE, "application/octet-stream")
.body(body)
.unwrap())
});
let payload = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
.map_ok(|bytes| hyper::Chunk::from(bytes.freeze()));
let body = Body::wrap_stream(payload);
Ok(Box::new(response_future))
// fixme: set other headers ?
Ok(Response::builder()
.status(StatusCode::OK)
.header(header::CONTENT_TYPE, "application/octet-stream")
.body(body)
.unwrap())
}.boxed()
}
#[sortable]
@ -543,51 +543,49 @@ fn upload_backup_log(
param: Value,
_info: &ApiMethod,
_rpcenv: Box<dyn RpcEnvironment>,
) -> Result<ApiFuture, Error> {
) -> ApiFuture {
let store = tools::required_string_param(&param, "store")?;
async move {
let store = tools::required_string_param(&param, "store")?;
let datastore = DataStore::lookup_datastore(store)?;
let datastore = DataStore::lookup_datastore(store)?;
let file_name = "client.log.blob";
let file_name = "client.log.blob";
let backup_type = tools::required_string_param(&param, "backup-type")?;
let backup_id = tools::required_string_param(&param, "backup-id")?;
let backup_time = tools::required_integer_param(&param, "backup-time")?;
let backup_type = tools::required_string_param(&param, "backup-type")?;
let backup_id = tools::required_string_param(&param, "backup-id")?;
let backup_time = tools::required_integer_param(&param, "backup-time")?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
let mut path = datastore.base_path();
path.push(backup_dir.relative_path());
path.push(&file_name);
let mut path = datastore.base_path();
path.push(backup_dir.relative_path());
path.push(&file_name);
if path.exists() {
bail!("backup already contains a log.");
}
if path.exists() {
bail!("backup already contains a log.");
}
println!("Upload backup log to {}/{}/{}/{}/{}", store,
backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
println!("Upload backup log to {}/{}/{}/{}/{}", store,
backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
let resp = req_body
.map_err(Error::from)
.try_fold(Vec::new(), |mut acc, chunk| {
acc.extend_from_slice(&*chunk);
future::ok::<_, Error>(acc)
})
.and_then(move |data| async move {
let blob = DataBlob::from_raw(data)?;
// always verify CRC at server side
blob.verify_crc()?;
let raw_data = blob.raw_data();
file_set_contents(&path, raw_data, None)?;
Ok(())
})
.and_then(move |_| {
future::ok(crate::server::formatter::json_response(Ok(Value::Null)))
})
;
let data = req_body
.map_err(Error::from)
.try_fold(Vec::new(), |mut acc, chunk| {
acc.extend_from_slice(&*chunk);
future::ok::<_, Error>(acc)
})
.await?;
Ok(Box::new(resp))
let blob = DataBlob::from_raw(data)?;
// always verify CRC at server side
blob.verify_crc()?;
let raw_data = blob.raw_data();
file_set_contents(&path, raw_data, None)?;
// fixme: use correct formatter
Ok(crate::server::formatter::json_response(Ok(Value::Null)))
}.boxed()
}
#[sortable]
@ -698,7 +696,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
),
];
const DATASTORE_INFO_ROUTER: Router = Router::new()
const DATASTORE_INFO_ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
.subdirs(DATASTORE_INFO_SUBDIRS);

View File

@ -47,8 +47,9 @@ fn upgrade_to_backup_protocol(
param: Value,
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> Result<ApiFuture, Error> {
) -> ApiFuture {
async move {
let debug = param["debug"].as_bool().unwrap_or(false);
let store = tools::required_string_param(&param, "store")?.to_owned();
@ -159,7 +160,8 @@ fn upgrade_to_backup_protocol(
.header(UPGRADE, HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
.body(Body::empty())?;
Ok(Box::new(futures::future::ok(response)))
Ok(response)
}.boxed()
}
pub const BACKUP_API_SUBDIRS: SubdirMap = &[
@ -569,57 +571,59 @@ fn dynamic_chunk_index(
param: Value,
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> Result<ApiFuture, Error> {
) -> ApiFuture {
let env: &BackupEnvironment = rpcenv.as_ref();
async move {
let env: &BackupEnvironment = rpcenv.as_ref();
let archive_name = tools::required_string_param(&param, "archive-name")?.to_owned();
let archive_name = tools::required_string_param(&param, "archive-name")?.to_owned();
if !archive_name.ends_with(".didx") {
bail!("wrong archive extension: '{}'", archive_name);
}
let empty_response = {
Response::builder()
.status(StatusCode::OK)
.body(Body::empty())?
};
let last_backup = match &env.last_backup {
Some(info) => info,
None => return Ok(Box::new(future::ok(empty_response))),
};
let mut path = last_backup.backup_dir.relative_path();
path.push(&archive_name);
let index = match env.datastore.open_dynamic_reader(path) {
Ok(index) => index,
Err(_) => {
env.log(format!("there is no last backup for archive '{}'", archive_name));
return Ok(Box::new(future::ok(empty_response)));
if !archive_name.ends_with(".didx") {
bail!("wrong archive extension: '{}'", archive_name);
}
};
env.log(format!("download last backup index for archive '{}'", archive_name));
let empty_response = {
Response::builder()
.status(StatusCode::OK)
.body(Body::empty())?
};
let count = index.index_count();
for pos in 0..count {
let (start, end, digest) = index.chunk_info(pos)?;
let size = (end - start) as u32;
env.register_chunk(digest, size)?;
}
let last_backup = match &env.last_backup {
Some(info) => info,
None => return Ok(empty_response),
};
let reader = DigestListEncoder::new(Box::new(index));
let mut path = last_backup.backup_dir.relative_path();
path.push(&archive_name);
let stream = WrappedReaderStream::new(reader);
let index = match env.datastore.open_dynamic_reader(path) {
Ok(index) => index,
Err(_) => {
env.log(format!("there is no last backup for archive '{}'", archive_name));
return Ok(empty_response);
}
};
// fixme: set size, content type?
let response = http::Response::builder()
.status(200)
.body(Body::wrap_stream(stream))?;
env.log(format!("download last backup index for archive '{}'", archive_name));
Ok(Box::new(future::ok(response)))
let count = index.index_count();
for pos in 0..count {
let (start, end, digest) = index.chunk_info(pos)?;
let size = (end - start) as u32;
env.register_chunk(digest, size)?;
}
let reader = DigestListEncoder::new(Box::new(index));
let stream = WrappedReaderStream::new(reader);
// fixme: set size, content type?
let response = http::Response::builder()
.status(200)
.body(Body::wrap_stream(stream))?;
Ok(response)
}.boxed()
}
#[sortable]
@ -642,60 +646,62 @@ fn fixed_chunk_index(
param: Value,
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> Result<ApiFuture, Error> {
) -> ApiFuture {
let env: &BackupEnvironment = rpcenv.as_ref();
async move {
let env: &BackupEnvironment = rpcenv.as_ref();
let archive_name = tools::required_string_param(&param, "archive-name")?.to_owned();
let archive_name = tools::required_string_param(&param, "archive-name")?.to_owned();
if !archive_name.ends_with(".fidx") {
bail!("wrong archive extension: '{}'", archive_name);
}
let empty_response = {
Response::builder()
.status(StatusCode::OK)
.body(Body::empty())?
};
let last_backup = match &env.last_backup {
Some(info) => info,
None => return Ok(Box::new(future::ok(empty_response))),
};
let mut path = last_backup.backup_dir.relative_path();
path.push(&archive_name);
let index = match env.datastore.open_fixed_reader(path) {
Ok(index) => index,
Err(_) => {
env.log(format!("there is no last backup for archive '{}'", archive_name));
return Ok(Box::new(future::ok(empty_response)));
if !archive_name.ends_with(".fidx") {
bail!("wrong archive extension: '{}'", archive_name);
}
};
env.log(format!("download last backup index for archive '{}'", archive_name));
let empty_response = {
Response::builder()
.status(StatusCode::OK)
.body(Body::empty())?
};
let count = index.index_count();
let image_size = index.index_bytes();
for pos in 0..count {
let digest = index.index_digest(pos).unwrap();
// Note: last chunk can be smaller
let start = (pos*index.chunk_size) as u64;
let mut end = start + index.chunk_size as u64;
if end > image_size { end = image_size; }
let size = (end - start) as u32;
env.register_chunk(*digest, size)?;
}
let last_backup = match &env.last_backup {
Some(info) => info,
None => return Ok(empty_response),
};
let reader = DigestListEncoder::new(Box::new(index));
let mut path = last_backup.backup_dir.relative_path();
path.push(&archive_name);
let stream = WrappedReaderStream::new(reader);
let index = match env.datastore.open_fixed_reader(path) {
Ok(index) => index,
Err(_) => {
env.log(format!("there is no last backup for archive '{}'", archive_name));
return Ok(empty_response);
}
};
// fixme: set size, content type?
let response = http::Response::builder()
.status(200)
.body(Body::wrap_stream(stream))?;
env.log(format!("download last backup index for archive '{}'", archive_name));
Ok(Box::new(future::ok(response)))
let count = index.index_count();
let image_size = index.index_bytes();
for pos in 0..count {
let digest = index.index_digest(pos).unwrap();
// Note: last chunk can be smaller
let start = (pos*index.chunk_size) as u64;
let mut end = start + index.chunk_size as u64;
if end > image_size { end = image_size; }
let size = (end - start) as u32;
env.register_chunk(*digest, size)?;
}
let reader = DigestListEncoder::new(Box::new(index));
let stream = WrappedReaderStream::new(reader);
// fixme: set size, content type?
let response = http::Response::builder()
.status(200)
.body(Body::wrap_stream(stream))?;
Ok(response)
}.boxed()
}

View File

@ -115,34 +115,30 @@ fn upload_fixed_chunk(
param: Value,
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> Result<ApiFuture, Error> {
) -> ApiFuture {
let wid = tools::required_integer_param(&param, "wid")? as usize;
let size = tools::required_integer_param(&param, "size")? as u32;
let encoded_size = tools::required_integer_param(&param, "encoded-size")? as u32;
async move {
let wid = tools::required_integer_param(&param, "wid")? as usize;
let size = tools::required_integer_param(&param, "size")? as u32;
let encoded_size = tools::required_integer_param(&param, "encoded-size")? as u32;
let digest_str = tools::required_string_param(&param, "digest")?;
let digest = proxmox::tools::hex_to_digest(digest_str)?;
let digest_str = tools::required_string_param(&param, "digest")?;
let digest = proxmox::tools::hex_to_digest(digest_str)?;
let env: &BackupEnvironment = rpcenv.as_ref();
let env: &BackupEnvironment = rpcenv.as_ref();
let upload = UploadChunk::new(req_body, env.datastore.clone(), digest, size, encoded_size);
let (digest, size, compressed_size, is_duplicate) =
UploadChunk::new(req_body, env.datastore.clone(), digest, size, encoded_size).await?;
let resp = upload
.then(move |result| {
let env: &BackupEnvironment = rpcenv.as_ref();
env.register_fixed_chunk(wid, digest, size, compressed_size, is_duplicate)?;
let digest_str = proxmox::tools::digest_to_hex(&digest);
env.debug(format!("upload_chunk done: {} bytes, {}", size, digest_str));
let result = result.and_then(|(digest, size, compressed_size, is_duplicate)| {
env.register_fixed_chunk(wid, digest, size, compressed_size, is_duplicate)?;
let digest_str = proxmox::tools::digest_to_hex(&digest);
env.debug(format!("upload_chunk done: {} bytes, {}", size, digest_str));
Ok(json!(digest_str))
});
let result = Ok(json!(digest_str));
future::ok(env.format_response(result))
});
Ok(Box::new(resp))
Ok(env.format_response(result))
}
.boxed()
}
#[sortable]
@ -177,34 +173,29 @@ fn upload_dynamic_chunk(
param: Value,
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> Result<ApiFuture, Error> {
) -> ApiFuture {
let wid = tools::required_integer_param(&param, "wid")? as usize;
let size = tools::required_integer_param(&param, "size")? as u32;
let encoded_size = tools::required_integer_param(&param, "encoded-size")? as u32;
async move {
let wid = tools::required_integer_param(&param, "wid")? as usize;
let size = tools::required_integer_param(&param, "size")? as u32;
let encoded_size = tools::required_integer_param(&param, "encoded-size")? as u32;
let digest_str = tools::required_string_param(&param, "digest")?;
let digest = proxmox::tools::hex_to_digest(digest_str)?;
let digest_str = tools::required_string_param(&param, "digest")?;
let digest = proxmox::tools::hex_to_digest(digest_str)?;
let env: &BackupEnvironment = rpcenv.as_ref();
let env: &BackupEnvironment = rpcenv.as_ref();
let upload = UploadChunk::new(req_body, env.datastore.clone(), digest, size, encoded_size);
let (digest, size, compressed_size, is_duplicate) =
UploadChunk::new(req_body, env.datastore.clone(), digest, size, encoded_size)
.await?;
let resp = upload
.then(move |result| {
let env: &BackupEnvironment = rpcenv.as_ref();
env.register_dynamic_chunk(wid, digest, size, compressed_size, is_duplicate)?;
let digest_str = proxmox::tools::digest_to_hex(&digest);
env.debug(format!("upload_chunk done: {} bytes, {}", size, digest_str));
let result = result.and_then(|(digest, size, compressed_size, is_duplicate)| {
env.register_dynamic_chunk(wid, digest, size, compressed_size, is_duplicate)?;
let digest_str = proxmox::tools::digest_to_hex(&digest);
env.debug(format!("upload_chunk done: {} bytes, {}", size, digest_str));
Ok(json!(digest_str))
});
future::ok(env.format_response(result))
});
Ok(Box::new(resp))
let result = Ok(json!(digest_str));
Ok(env.format_response(result))
}.boxed()
}
pub const API_METHOD_UPLOAD_SPEEDTEST: ApiMethod = ApiMethod::new(
@ -218,29 +209,30 @@ fn upload_speedtest(
_param: Value,
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> Result<ApiFuture, Error> {
) -> ApiFuture {
let resp = req_body
.map_err(Error::from)
.try_fold(0, |size: usize, chunk| {
let sum = size + chunk.len();
//println!("UPLOAD {} bytes, sum {}", chunk.len(), sum);
future::ok::<usize, Error>(sum)
})
.then(move |result| {
match result {
Ok(size) => {
println!("UPLOAD END {} bytes", size);
}
Err(err) => {
println!("Upload error: {}", err);
}
async move {
let result = req_body
.map_err(Error::from)
.try_fold(0, |size: usize, chunk| {
let sum = size + chunk.len();
//println!("UPLOAD {} bytes, sum {}", chunk.len(), sum);
future::ok::<usize, Error>(sum)
})
.await;
match result {
Ok(size) => {
println!("UPLOAD END {} bytes", size);
}
let env: &BackupEnvironment = rpcenv.as_ref();
future::ok(env.format_response(Ok(Value::Null)))
});
Ok(Box::new(resp))
Err(err) => {
println!("Upload error: {}", err);
}
}
let env: &BackupEnvironment = rpcenv.as_ref();
Ok(env.format_response(Ok(Value::Null)))
}.boxed()
}
#[sortable]
@ -265,40 +257,32 @@ fn upload_blob(
param: Value,
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> Result<ApiFuture, Error> {
) -> ApiFuture {
let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
let encoded_size = tools::required_integer_param(&param, "encoded-size")? as usize;
async move {
let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
let encoded_size = tools::required_integer_param(&param, "encoded-size")? as usize;
let env: &BackupEnvironment = rpcenv.as_ref();
let env: &BackupEnvironment = rpcenv.as_ref();
if !file_name.ends_with(".blob") {
bail!("wrong blob file extension: '{}'", file_name);
}
if !file_name.ends_with(".blob") {
bail!("wrong blob file extension: '{}'", file_name);
}
let data = req_body
.map_err(Error::from)
.try_fold(Vec::new(), |mut acc, chunk| {
acc.extend_from_slice(&*chunk);
future::ok::<_, Error>(acc)
})
.await?;
let env2 = env.clone();
let env3 = env.clone();
if encoded_size != data.len() {
bail!("got blob with unexpected length ({} != {})", encoded_size, data.len());
}
let resp = req_body
.map_err(Error::from)
.try_fold(Vec::new(), |mut acc, chunk| {
acc.extend_from_slice(&*chunk);
future::ok::<_, Error>(acc)
})
.and_then(move |data| async move {
if encoded_size != data.len() {
bail!("got blob with unexpected length ({} != {})", encoded_size, data.len());
}
env.add_blob(&file_name, data)?;
env2.add_blob(&file_name, data)?;
Ok(())
})
.and_then(move |_| {
future::ok(env3.format_response(Ok(Value::Null)))
})
;
Ok(Box::new(resp))
Ok(env.format_response(Ok(Value::Null)))
}.boxed()
}

View File

@ -49,92 +49,94 @@ fn upgrade_to_backup_reader_protocol(
param: Value,
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> Result<ApiFuture, Error> {
) -> ApiFuture {
let debug = param["debug"].as_bool().unwrap_or(false);
async move {
let debug = param["debug"].as_bool().unwrap_or(false);
let store = tools::required_string_param(&param, "store")?.to_owned();
let datastore = DataStore::lookup_datastore(&store)?;
let store = tools::required_string_param(&param, "store")?.to_owned();
let datastore = DataStore::lookup_datastore(&store)?;
let backup_type = tools::required_string_param(&param, "backup-type")?;
let backup_id = tools::required_string_param(&param, "backup-id")?;
let backup_time = tools::required_integer_param(&param, "backup-time")?;
let backup_type = tools::required_string_param(&param, "backup-type")?;
let backup_id = tools::required_string_param(&param, "backup-id")?;
let backup_time = tools::required_integer_param(&param, "backup-time")?;
let protocols = parts
.headers
.get("UPGRADE")
.ok_or_else(|| format_err!("missing Upgrade header"))?
let protocols = parts
.headers
.get("UPGRADE")
.ok_or_else(|| format_err!("missing Upgrade header"))?
.to_str()?;
if protocols != PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!() {
bail!("invalid protocol name");
}
if protocols != PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!() {
bail!("invalid protocol name");
}
if parts.version >= http::version::Version::HTTP_2 {
bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
}
if parts.version >= http::version::Version::HTTP_2 {
bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
}
let username = rpcenv.get_user().unwrap();
let env_type = rpcenv.env_type();
let username = rpcenv.get_user().unwrap();
let env_type = rpcenv.env_type();
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
let path = datastore.base_path();
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
let path = datastore.base_path();
//let files = BackupInfo::list_files(&path, &backup_dir)?;
//let files = BackupInfo::list_files(&path, &backup_dir)?;
let worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_dir.backup_time().timestamp());
let worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_dir.backup_time().timestamp());
WorkerTask::spawn("reader", Some(worker_id), &username.clone(), true, move |worker| {
let mut env = ReaderEnvironment::new(
env_type, username.clone(), worker.clone(), datastore, backup_dir);
WorkerTask::spawn("reader", Some(worker_id), &username.clone(), true, move |worker| {
let mut env = ReaderEnvironment::new(
env_type, username.clone(), worker.clone(), datastore, backup_dir);
env.debug = debug;
env.debug = debug;
env.log(format!("starting new backup reader datastore '{}': {:?}", store, path));
env.log(format!("starting new backup reader datastore '{}': {:?}", store, path));
let service = H2Service::new(env.clone(), worker.clone(), &READER_API_ROUTER, debug);
let service = H2Service::new(env.clone(), worker.clone(), &READER_API_ROUTER, debug);
let abort_future = worker.abort_future();
let abort_future = worker.abort_future();
let req_fut = req_body
.on_upgrade()
.map_err(Error::from)
.and_then({
let env = env.clone();
move |conn| {
env.debug("protocol upgrade done");
let req_fut = req_body
.on_upgrade()
.map_err(Error::from)
.and_then({
let env = env.clone();
move |conn| {
env.debug("protocol upgrade done");
let mut http = hyper::server::conn::Http::new();
http.http2_only(true);
// increase window size: todo - find optiomal size
let window_size = 32*1024*1024; // max = (1 << 31) - 2
http.http2_initial_stream_window_size(window_size);
http.http2_initial_connection_window_size(window_size);
let mut http = hyper::server::conn::Http::new();
http.http2_only(true);
// increase window size: todo - find optiomal size
let window_size = 32*1024*1024; // max = (1 << 31) - 2
http.http2_initial_stream_window_size(window_size);
http.http2_initial_connection_window_size(window_size);
http.serve_connection(conn, service)
.map_err(Error::from)
}
});
let abort_future = abort_future
.map(|_| Err(format_err!("task aborted")));
http.serve_connection(conn, service)
.map_err(Error::from)
}
});
let abort_future = abort_future
.map(|_| Err(format_err!("task aborted")));
use futures::future::Either;
futures::future::select(req_fut, abort_future)
.map(|res| match res {
Either::Left((Ok(res), _)) => Ok(res),
Either::Left((Err(err), _)) => Err(err),
Either::Right((Ok(res), _)) => Ok(res),
Either::Right((Err(err), _)) => Err(err),
})
.map_ok(move |_| env.log("reader finished sucessfully"))
})?;
use futures::future::Either;
futures::future::select(req_fut, abort_future)
.map(|res| match res {
Either::Left((Ok(res), _)) => Ok(res),
Either::Left((Err(err), _)) => Err(err),
Either::Right((Ok(res), _)) => Ok(res),
Either::Right((Err(err), _)) => Err(err),
})
.map_ok(move |_| env.log("reader finished sucessfully"))
})?;
let response = Response::builder()
.status(StatusCode::SWITCHING_PROTOCOLS)
.header(UPGRADE, HeaderValue::from_static(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!()))
.body(Body::empty())?;
let response = Response::builder()
.status(StatusCode::SWITCHING_PROTOCOLS)
.header(UPGRADE, HeaderValue::from_static(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!()))
.body(Body::empty())?;
Ok(Box::new(futures::future::ok(response)))
Ok(response)
}.boxed()
}
pub const READER_API_ROUTER: Router = Router::new()
@ -170,38 +172,38 @@ fn download_file(
param: Value,
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> Result<ApiFuture, Error> {
) -> ApiFuture {
let env: &ReaderEnvironment = rpcenv.as_ref();
let env2 = env.clone();
async move {
let env: &ReaderEnvironment = rpcenv.as_ref();
let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
let mut path = env.datastore.base_path();
path.push(env.backup_dir.relative_path());
path.push(&file_name);
let mut path = env.datastore.base_path();
path.push(env.backup_dir.relative_path());
path.push(&file_name);
let path2 = path.clone();
let path3 = path.clone();
let path2 = path.clone();
let path3 = path.clone();
let response_future = tokio::fs::File::open(path)
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path2, err)))
.and_then(move |file| {
env2.log(format!("download {:?}", path3));
let payload = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
.map_ok(|bytes| hyper::Chunk::from(bytes.freeze()));
let file = tokio::fs::File::open(path)
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path2, err)))
.await?;
let body = Body::wrap_stream(payload);
env.log(format!("download {:?}", path3));
// fixme: set other headers ?
futures::future::ok(Response::builder()
.status(StatusCode::OK)
.header(header::CONTENT_TYPE, "application/octet-stream")
.body(body)
.unwrap())
});
let payload = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
.map_ok(|bytes| hyper::Chunk::from(bytes.freeze()));
Ok(Box::new(response_future))
let body = Body::wrap_stream(payload);
// fixme: set other headers ?
Ok(Response::builder()
.status(StatusCode::OK)
.header(header::CONTENT_TYPE, "application/octet-stream")
.body(body)
.unwrap())
}.boxed()
}
#[sortable]
@ -221,33 +223,32 @@ fn download_chunk(
param: Value,
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> Result<ApiFuture, Error> {
) -> ApiFuture {
let env: &ReaderEnvironment = rpcenv.as_ref();
async move {
let env: &ReaderEnvironment = rpcenv.as_ref();
let digest_str = tools::required_string_param(&param, "digest")?;
let digest = proxmox::tools::hex_to_digest(digest_str)?;
let digest_str = tools::required_string_param(&param, "digest")?;
let digest = proxmox::tools::hex_to_digest(digest_str)?;
let (path, _) = env.datastore.chunk_path(&digest);
let path2 = path.clone();
let (path, _) = env.datastore.chunk_path(&digest);
let path2 = path.clone();
env.debug(format!("download chunk {:?}", path));
env.debug(format!("download chunk {:?}", path));
let response_future = tokio::fs::read(path)
.map_err(move |err| http_err!(BAD_REQUEST, format!("reading file {:?} failed: {}", path2, err)))
.and_then(move |data| {
let body = Body::from(data);
let data = tokio::fs::read(path)
.map_err(move |err| http_err!(BAD_REQUEST, format!("reading file {:?} failed: {}", path2, err)))
.await?;
// fixme: set other headers ?
futures::future::ok(
Response::builder()
.status(StatusCode::OK)
.header(header::CONTENT_TYPE, "application/octet-stream")
.body(body)
.unwrap())
});
let body = Body::from(data);
Ok(Box::new(response_future))
// fixme: set other headers ?
Ok(Response::builder()
.status(StatusCode::OK)
.header(header::CONTENT_TYPE, "application/octet-stream")
.body(body)
.unwrap())
}.boxed()
}
/* this is too slow
@ -302,7 +303,7 @@ fn speedtest(
_param: Value,
_info: &ApiMethod,
_rpcenv: Box<dyn RpcEnvironment>,
) -> Result<ApiFuture, Error> {
) -> ApiFuture {
let buffer = vec![65u8; 1024*1024]; // nonsense [A,A,A...]
@ -314,5 +315,5 @@ fn speedtest(
.body(body)
.unwrap();
Ok(Box::new(future::ok(response)))
future::ok(response).boxed()
}