2019-08-26 11:33:38 +00:00
|
|
|
use std::collections::HashMap;
|
2019-12-12 14:27:07 +00:00
|
|
|
use std::future::Future;
|
2019-10-26 09:36:01 +00:00
|
|
|
use std::hash::BuildHasher;
|
2018-12-02 10:00:52 +00:00
|
|
|
use std::path::{Path, PathBuf};
|
2019-08-26 11:33:38 +00:00
|
|
|
use std::pin::Pin;
|
2020-10-16 09:06:46 +00:00
|
|
|
use std::sync::{Arc, Mutex};
|
2019-08-26 11:33:38 +00:00
|
|
|
use std::task::{Context, Poll};
|
2018-11-15 07:18:48 +00:00
|
|
|
|
2020-04-17 12:11:25 +00:00
|
|
|
use anyhow::{bail, format_err, Error};
|
2019-11-22 12:02:05 +00:00
|
|
|
use futures::future::{self, FutureExt, TryFutureExt};
|
2019-08-26 11:33:38 +00:00
|
|
|
use futures::stream::TryStreamExt;
|
2020-10-16 09:06:46 +00:00
|
|
|
use hyper::body::HttpBody;
|
2021-03-29 06:17:26 +00:00
|
|
|
use hyper::header::{self, HeaderMap};
|
2019-08-26 11:33:38 +00:00
|
|
|
use hyper::http::request::Parts;
|
|
|
|
use hyper::{Body, Request, Response, StatusCode};
|
2020-10-15 15:43:42 +00:00
|
|
|
use lazy_static::lazy_static;
|
2021-03-29 06:17:26 +00:00
|
|
|
use regex::Regex;
|
2021-09-21 05:58:47 +00:00
|
|
|
use serde_json::Value;
|
2018-11-15 07:18:48 +00:00
|
|
|
use tokio::fs::File;
|
2019-12-12 14:27:07 +00:00
|
|
|
use tokio::time::Instant;
|
2021-09-30 11:49:29 +00:00
|
|
|
use tower_service::Service;
|
2022-04-06 14:55:39 +00:00
|
|
|
use url::form_urlencoded;
|
2018-11-15 07:18:48 +00:00
|
|
|
|
2022-04-06 14:55:39 +00:00
|
|
|
use proxmox_router::http_err;
|
2021-10-08 09:19:37 +00:00
|
|
|
use proxmox_router::{
|
2021-03-29 06:17:26 +00:00
|
|
|
check_api_permission, ApiHandler, ApiMethod, HttpError, Permission, RpcEnvironment,
|
2021-09-21 05:58:48 +00:00
|
|
|
RpcEnvironmentType, UserInformation,
|
2021-03-29 06:17:26 +00:00
|
|
|
};
|
2021-12-16 10:02:53 +00:00
|
|
|
use proxmox_schema::{ObjectSchemaType, ParameterSchema};
|
2019-11-21 13:36:28 +00:00
|
|
|
|
2021-11-04 12:42:30 +00:00
|
|
|
use proxmox_http::client::RateLimitedStream;
|
|
|
|
|
2021-11-19 16:36:06 +00:00
|
|
|
use proxmox_async::stream::AsyncReaderStream;
|
2022-04-06 14:55:39 +00:00
|
|
|
use proxmox_compression::{DeflateEncoder, Level};
|
2021-09-21 05:58:51 +00:00
|
|
|
|
|
|
|
use crate::{
|
2022-04-06 14:55:39 +00:00
|
|
|
formatter::*, normalize_uri_path, ApiConfig, AuthError, CompressionMethod, FileLogger,
|
|
|
|
RestEnvironment,
|
2021-09-21 05:58:44 +00:00
|
|
|
};
|
2019-11-22 08:23:03 +00:00
|
|
|
|
2021-03-29 06:17:26 +00:00
|
|
|
extern "C" {
|
|
|
|
fn tzset();
|
|
|
|
}
|
2019-02-01 08:54:56 +00:00
|
|
|
|
2021-09-21 05:58:49 +00:00
|
|
|
struct AuthStringExtension(String);
|
|
|
|
|
2021-09-21 05:58:48 +00:00
|
|
|
struct EmptyUserInformation {}
|
|
|
|
|
|
|
|
impl UserInformation for EmptyUserInformation {
|
2022-04-06 14:55:39 +00:00
|
|
|
fn is_superuser(&self, _userid: &str) -> bool {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
fn is_group_member(&self, _userid: &str, _group: &str) -> bool {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
fn lookup_privs(&self, _userid: &str, _path: &[&str]) -> u64 {
|
|
|
|
0
|
|
|
|
}
|
2021-09-21 05:58:48 +00:00
|
|
|
}
|
|
|
|
|
2021-09-30 09:59:21 +00:00
|
|
|
/// REST server implementation (configured with [ApiConfig])
|
2021-09-30 11:49:29 +00:00
|
|
|
///
|
|
|
|
/// This struct implements the [Service] trait in order to use it with
|
|
|
|
/// [hyper::server::Builder::serve].
|
2018-11-15 09:18:01 +00:00
|
|
|
pub struct RestServer {
|
2021-09-30 09:59:21 +00:00
|
|
|
api_config: Arc<ApiConfig>,
|
2018-11-15 09:18:01 +00:00
|
|
|
}
|
|
|
|
|
server: rest: implement max URI path and query length request limits
Add a generous limit now and return the correct error (414 URI Too
Long). Otherwise we could to pretty larger GET requests, 64 KiB and
possible bigger (at 64 KiB my simple curl test failed due to
shell/curl limitations).
For now allow a 3072 characters as combined length of URI path and
query.
This is conform with the HTTP/1.1 RFCs (e.g., RFC 7231, 6.5.12 and
RFC 2616, 3.2.1) which do not specify any limits, upper or lower, but
require that all server accessible resources mus be reachable without
getting 414, which is normally fulfilled as we have various length
limits for stuff which could be in an URI, in place, e.g.:
* user id: max. 64 chars
* datastore: max. 32 chars
The only known problematic API endpoint is the catalog one, used in
the GUI's pxar file browser:
GET /api2/json/admin/datastore/<id>/catalog?..&filepath=<path>
The <path> is the encoded archive path, and can be arbitrary long.
But, this is a flawed design, as even without this new limit one can
easily generate archives which cannot be browsed anymore, as hyper
only accepts requests with max. 64 KiB in the URI.
So rather, we should move that to a GET-as-POST call, which has no
such limitations (and would not need to base32 encode the path).
Note: This change was inspired by adding a request access log, which
profits from such limits as we can then rely on certain atomicity
guarantees when writing requests to the log.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-15 15:49:16 +00:00
|
|
|
const MAX_URI_QUERY_LENGTH: usize = 3072;
|
2021-04-06 09:03:45 +00:00
|
|
|
const CHUNK_SIZE_LIMIT: u64 = 32 * 1024;
|
server: rest: implement max URI path and query length request limits
Add a generous limit now and return the correct error (414 URI Too
Long). Otherwise we could to pretty larger GET requests, 64 KiB and
possible bigger (at 64 KiB my simple curl test failed due to
shell/curl limitations).
For now allow a 3072 characters as combined length of URI path and
query.
This is conform with the HTTP/1.1 RFCs (e.g., RFC 7231, 6.5.12 and
RFC 2616, 3.2.1) which do not specify any limits, upper or lower, but
require that all server accessible resources mus be reachable without
getting 414, which is normally fulfilled as we have various length
limits for stuff which could be in an URI, in place, e.g.:
* user id: max. 64 chars
* datastore: max. 32 chars
The only known problematic API endpoint is the catalog one, used in
the GUI's pxar file browser:
GET /api2/json/admin/datastore/<id>/catalog?..&filepath=<path>
The <path> is the encoded archive path, and can be arbitrary long.
But, this is a flawed design, as even without this new limit one can
easily generate archives which cannot be browsed anymore, as hyper
only accepts requests with max. 64 KiB in the URI.
So rather, we should move that to a GET-as-POST call, which has no
such limitations (and would not need to base32 encode the path).
Note: This change was inspired by adding a request access log, which
profits from such limits as we can then rely on certain atomicity
guarantees when writing requests to the log.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-15 15:49:16 +00:00
|
|
|
|
2018-11-15 09:18:01 +00:00
|
|
|
impl RestServer {
|
2021-09-30 11:49:29 +00:00
|
|
|
/// Creates a new instance.
|
2018-11-15 09:18:01 +00:00
|
|
|
pub fn new(api_config: ApiConfig) -> Self {
|
2021-03-29 06:17:26 +00:00
|
|
|
Self {
|
|
|
|
api_config: Arc::new(api_config),
|
|
|
|
}
|
2018-11-15 09:18:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-04 12:42:30 +00:00
|
|
|
impl Service<&Pin<Box<tokio_openssl::SslStream<RateLimitedStream<tokio::net::TcpStream>>>>>
|
|
|
|
for RestServer
|
|
|
|
{
|
|
|
|
type Response = ApiService;
|
|
|
|
type Error = Error;
|
|
|
|
type Future = Pin<Box<dyn Future<Output = Result<ApiService, Error>> + Send>>;
|
|
|
|
|
|
|
|
fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
|
|
|
Poll::Ready(Ok(()))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn call(
|
|
|
|
&mut self,
|
|
|
|
ctx: &Pin<Box<tokio_openssl::SslStream<RateLimitedStream<tokio::net::TcpStream>>>>,
|
|
|
|
) -> Self::Future {
|
|
|
|
match ctx.get_ref().peer_addr() {
|
|
|
|
Err(err) => future::err(format_err!("unable to get peer address - {}", err)).boxed(),
|
|
|
|
Ok(peer) => future::ok(ApiService {
|
|
|
|
peer,
|
|
|
|
api_config: self.api_config.clone(),
|
|
|
|
})
|
|
|
|
.boxed(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-06 14:55:39 +00:00
|
|
|
impl Service<&Pin<Box<tokio_openssl::SslStream<tokio::net::TcpStream>>>> for RestServer {
|
2019-08-26 11:33:38 +00:00
|
|
|
type Response = ApiService;
|
2019-07-03 09:54:35 +00:00
|
|
|
type Error = Error;
|
2019-08-26 11:33:38 +00:00
|
|
|
type Future = Pin<Box<dyn Future<Output = Result<ApiService, Error>> + Send>>;
|
|
|
|
|
|
|
|
fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
|
|
|
Poll::Ready(Ok(()))
|
|
|
|
}
|
|
|
|
|
2021-03-29 06:17:26 +00:00
|
|
|
fn call(
|
|
|
|
&mut self,
|
|
|
|
ctx: &Pin<Box<tokio_openssl::SslStream<tokio::net::TcpStream>>>,
|
|
|
|
) -> Self::Future {
|
2019-08-26 11:33:38 +00:00
|
|
|
match ctx.get_ref().peer_addr() {
|
2021-03-29 06:17:26 +00:00
|
|
|
Err(err) => future::err(format_err!("unable to get peer address - {}", err)).boxed(),
|
|
|
|
Ok(peer) => future::ok(ApiService {
|
|
|
|
peer,
|
|
|
|
api_config: self.api_config.clone(),
|
|
|
|
})
|
|
|
|
.boxed(),
|
2019-07-03 10:00:43 +00:00
|
|
|
}
|
2018-11-15 09:18:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-30 11:49:29 +00:00
|
|
|
impl Service<&hyper::server::conn::AddrStream> for RestServer {
|
2019-08-26 11:33:38 +00:00
|
|
|
type Response = ApiService;
|
2019-07-03 09:54:35 +00:00
|
|
|
type Error = Error;
|
2019-08-26 11:33:38 +00:00
|
|
|
type Future = Pin<Box<dyn Future<Output = Result<ApiService, Error>> + Send>>;
|
|
|
|
|
|
|
|
fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
|
|
|
Poll::Ready(Ok(()))
|
|
|
|
}
|
|
|
|
|
2021-09-29 07:04:17 +00:00
|
|
|
fn call(&mut self, ctx: &hyper::server::conn::AddrStream) -> Self::Future {
|
2022-04-06 14:55:39 +00:00
|
|
|
let peer = ctx.remote_addr();
|
2021-09-29 07:04:17 +00:00
|
|
|
future::ok(ApiService {
|
|
|
|
peer,
|
|
|
|
api_config: self.api_config.clone(),
|
|
|
|
})
|
|
|
|
.boxed()
|
2019-07-03 09:54:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-30 11:49:29 +00:00
|
|
|
impl Service<&tokio::net::UnixStream> for RestServer {
|
2021-02-16 17:07:01 +00:00
|
|
|
type Response = ApiService;
|
|
|
|
type Error = Error;
|
|
|
|
type Future = Pin<Box<dyn Future<Output = Result<ApiService, Error>> + Send>>;
|
|
|
|
|
|
|
|
fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
|
|
|
Poll::Ready(Ok(()))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn call(&mut self, _ctx: &tokio::net::UnixStream) -> Self::Future {
|
|
|
|
// TODO: Find a way to actually represent the vsock peer in the ApiService struct - for now
|
|
|
|
// it doesn't really matter, so just use a fake IP address
|
|
|
|
let fake_peer = "0.0.0.0:807".parse().unwrap();
|
|
|
|
future::ok(ApiService {
|
|
|
|
peer: fake_peer,
|
2021-03-29 06:17:26 +00:00
|
|
|
api_config: self.api_config.clone(),
|
|
|
|
})
|
|
|
|
.boxed()
|
2021-02-16 17:07:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-01 06:35:51 +00:00
|
|
|
// Helper [Service] containing the peer Address
|
|
|
|
//
|
|
|
|
// The lower level connection [Service] implementation on
|
|
|
|
// [RestServer] extracts the peer address and return an [ApiService].
|
|
|
|
//
|
|
|
|
// Rust wants this type 'pub' here (else we get 'private type `ApiService`
|
|
|
|
// in public interface'). The type is still private because the crate does
|
|
|
|
// not export it.
|
2018-11-15 09:18:01 +00:00
|
|
|
pub struct ApiService {
|
2019-07-03 09:54:35 +00:00
|
|
|
pub peer: std::net::SocketAddr,
|
2018-11-15 09:18:01 +00:00
|
|
|
pub api_config: Arc<ApiConfig>,
|
|
|
|
}
|
|
|
|
|
2019-07-03 09:54:35 +00:00
|
|
|
fn log_response(
|
2020-11-02 18:21:58 +00:00
|
|
|
logfile: Option<&Arc<Mutex<FileLogger>>>,
|
2019-07-03 09:54:35 +00:00
|
|
|
peer: &std::net::SocketAddr,
|
|
|
|
method: hyper::Method,
|
2020-10-15 15:49:17 +00:00
|
|
|
path_query: &str,
|
2019-07-03 09:54:35 +00:00
|
|
|
resp: &Response<Body>,
|
2020-10-16 09:06:47 +00:00
|
|
|
user_agent: Option<String>,
|
2019-07-03 09:54:35 +00:00
|
|
|
) {
|
2021-03-29 06:17:26 +00:00
|
|
|
if resp.extensions().get::<NoLogExtension>().is_some() {
|
|
|
|
return;
|
|
|
|
};
|
2019-02-14 12:28:41 +00:00
|
|
|
|
server: rest: implement max URI path and query length request limits
Add a generous limit now and return the correct error (414 URI Too
Long). Otherwise we could to pretty larger GET requests, 64 KiB and
possible bigger (at 64 KiB my simple curl test failed due to
shell/curl limitations).
For now allow a 3072 characters as combined length of URI path and
query.
This is conform with the HTTP/1.1 RFCs (e.g., RFC 7231, 6.5.12 and
RFC 2616, 3.2.1) which do not specify any limits, upper or lower, but
require that all server accessible resources mus be reachable without
getting 414, which is normally fulfilled as we have various length
limits for stuff which could be in an URI, in place, e.g.:
* user id: max. 64 chars
* datastore: max. 32 chars
The only known problematic API endpoint is the catalog one, used in
the GUI's pxar file browser:
GET /api2/json/admin/datastore/<id>/catalog?..&filepath=<path>
The <path> is the encoded archive path, and can be arbitrary long.
But, this is a flawed design, as even without this new limit one can
easily generate archives which cannot be browsed anymore, as hyper
only accepts requests with max. 64 KiB in the URI.
So rather, we should move that to a GET-as-POST call, which has no
such limitations (and would not need to base32 encode the path).
Note: This change was inspired by adding a request access log, which
profits from such limits as we can then rely on certain atomicity
guarantees when writing requests to the log.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-15 15:49:16 +00:00
|
|
|
// we also log URL-to-long requests, so avoid message bigger than PIPE_BUF (4k on Linux)
|
|
|
|
// to profit from atomicty guarantees for O_APPEND opened logfiles
|
2020-10-15 15:49:17 +00:00
|
|
|
let path = &path_query[..MAX_URI_QUERY_LENGTH.min(path_query.len())];
|
server: rest: implement max URI path and query length request limits
Add a generous limit now and return the correct error (414 URI Too
Long). Otherwise we could to pretty larger GET requests, 64 KiB and
possible bigger (at 64 KiB my simple curl test failed due to
shell/curl limitations).
For now allow a 3072 characters as combined length of URI path and
query.
This is conform with the HTTP/1.1 RFCs (e.g., RFC 7231, 6.5.12 and
RFC 2616, 3.2.1) which do not specify any limits, upper or lower, but
require that all server accessible resources mus be reachable without
getting 414, which is normally fulfilled as we have various length
limits for stuff which could be in an URI, in place, e.g.:
* user id: max. 64 chars
* datastore: max. 32 chars
The only known problematic API endpoint is the catalog one, used in
the GUI's pxar file browser:
GET /api2/json/admin/datastore/<id>/catalog?..&filepath=<path>
The <path> is the encoded archive path, and can be arbitrary long.
But, this is a flawed design, as even without this new limit one can
easily generate archives which cannot be browsed anymore, as hyper
only accepts requests with max. 64 KiB in the URI.
So rather, we should move that to a GET-as-POST call, which has no
such limitations (and would not need to base32 encode the path).
Note: This change was inspired by adding a request access log, which
profits from such limits as we can then rely on certain atomicity
guarantees when writing requests to the log.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-15 15:49:16 +00:00
|
|
|
|
2019-02-15 09:16:12 +00:00
|
|
|
let status = resp.status();
|
2019-05-14 04:23:22 +00:00
|
|
|
if !(status.is_success() || status.is_informational()) {
|
2019-02-15 09:16:12 +00:00
|
|
|
let reason = status.canonical_reason().unwrap_or("unknown reason");
|
2019-02-15 08:55:12 +00:00
|
|
|
|
2021-07-11 11:05:16 +00:00
|
|
|
let message = match resp.extensions().get::<ErrorMessageExtension>() {
|
|
|
|
Some(data) => &data.0,
|
|
|
|
None => "request failed",
|
|
|
|
};
|
2019-02-15 09:16:12 +00:00
|
|
|
|
2021-03-29 06:17:26 +00:00
|
|
|
log::error!(
|
|
|
|
"{} {}: {} {}: [client {}] {}",
|
|
|
|
method.as_str(),
|
|
|
|
path,
|
|
|
|
status.as_str(),
|
|
|
|
reason,
|
|
|
|
peer,
|
|
|
|
message
|
|
|
|
);
|
2019-02-14 12:07:34 +00:00
|
|
|
}
|
2020-10-16 09:06:46 +00:00
|
|
|
if let Some(logfile) = logfile {
|
2021-09-21 05:58:49 +00:00
|
|
|
let auth_id = match resp.extensions().get::<AuthStringExtension>() {
|
|
|
|
Some(AuthStringExtension(auth_id)) => auth_id.clone(),
|
2020-10-23 11:33:21 +00:00
|
|
|
None => "-".to_string(),
|
2020-10-16 09:06:46 +00:00
|
|
|
};
|
2021-10-08 09:19:37 +00:00
|
|
|
let now = proxmox_time::epoch_i64();
|
2020-10-16 09:06:46 +00:00
|
|
|
// time format which apache/nginx use (by default), copied from pve-http-server
|
2021-10-08 09:19:37 +00:00
|
|
|
let datetime = proxmox_time::strftime_local("%d/%m/%Y:%H:%M:%S %z", now)
|
2021-01-19 13:04:46 +00:00
|
|
|
.unwrap_or_else(|_| "-".to_string());
|
2020-10-16 09:06:46 +00:00
|
|
|
|
2021-03-29 06:17:26 +00:00
|
|
|
logfile.lock().unwrap().log(format!(
|
|
|
|
"{} - {} [{}] \"{} {}\" {} {} {}",
|
|
|
|
peer.ip(),
|
|
|
|
auth_id,
|
|
|
|
datetime,
|
|
|
|
method.as_str(),
|
|
|
|
path,
|
|
|
|
status.as_str(),
|
|
|
|
resp.body().size_hint().lower(),
|
|
|
|
user_agent.unwrap_or_else(|| "-".to_string()),
|
|
|
|
));
|
2020-10-16 09:06:46 +00:00
|
|
|
}
|
2019-02-14 12:07:34 +00:00
|
|
|
}
|
2021-09-21 05:58:49 +00:00
|
|
|
|
2020-10-15 15:43:42 +00:00
|
|
|
fn get_proxied_peer(headers: &HeaderMap) -> Option<std::net::SocketAddr> {
|
|
|
|
lazy_static! {
|
|
|
|
static ref RE: Regex = Regex::new(r#"for="([^"]+)""#).unwrap();
|
|
|
|
}
|
|
|
|
let forwarded = headers.get(header::FORWARDED)?.to_str().ok()?;
|
2021-12-30 11:57:37 +00:00
|
|
|
let capture = RE.captures(forwarded)?;
|
2020-10-15 15:43:42 +00:00
|
|
|
let rhost = capture.get(1)?.as_str();
|
|
|
|
|
|
|
|
rhost.parse().ok()
|
|
|
|
}
|
|
|
|
|
2020-10-16 09:06:47 +00:00
|
|
|
fn get_user_agent(headers: &HeaderMap) -> Option<String> {
|
|
|
|
let agent = headers.get(header::USER_AGENT)?.to_str();
|
2021-03-29 06:17:26 +00:00
|
|
|
agent
|
|
|
|
.map(|s| {
|
|
|
|
let mut s = s.to_owned();
|
|
|
|
s.truncate(128);
|
|
|
|
s
|
|
|
|
})
|
|
|
|
.ok()
|
2020-10-16 09:06:47 +00:00
|
|
|
}
|
|
|
|
|
2021-09-30 11:49:29 +00:00
|
|
|
impl Service<Request<Body>> for ApiService {
|
2019-08-26 11:33:38 +00:00
|
|
|
type Response = Response<Body>;
|
2019-07-03 09:54:35 +00:00
|
|
|
type Error = Error;
|
2021-01-25 13:42:49 +00:00
|
|
|
#[allow(clippy::type_complexity)]
|
|
|
|
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
|
2019-08-26 11:33:38 +00:00
|
|
|
|
|
|
|
fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
|
|
|
Poll::Ready(Ok(()))
|
|
|
|
}
|
2018-11-15 09:18:01 +00:00
|
|
|
|
2019-08-26 11:33:38 +00:00
|
|
|
fn call(&mut self, req: Request<Body>) -> Self::Future {
|
2020-10-15 15:49:17 +00:00
|
|
|
let path = req.uri().path_and_query().unwrap().as_str().to_owned();
|
2019-02-15 09:16:12 +00:00
|
|
|
let method = req.method().clone();
|
2020-10-16 09:06:47 +00:00
|
|
|
let user_agent = get_user_agent(req.headers());
|
2019-02-15 09:16:12 +00:00
|
|
|
|
2020-10-12 08:36:32 +00:00
|
|
|
let config = Arc::clone(&self.api_config);
|
2020-10-15 15:43:42 +00:00
|
|
|
let peer = match get_proxied_peer(req.headers()) {
|
|
|
|
Some(proxied_peer) => proxied_peer,
|
|
|
|
None => self.peer,
|
|
|
|
};
|
2020-10-12 08:36:32 +00:00
|
|
|
async move {
|
2020-10-16 09:06:46 +00:00
|
|
|
let response = match handle_request(Arc::clone(&config), req, &peer).await {
|
2020-10-15 07:03:54 +00:00
|
|
|
Ok(response) => response,
|
2018-11-15 09:18:01 +00:00
|
|
|
Err(err) => {
|
2020-10-15 07:03:54 +00:00
|
|
|
let (err, code) = match err.downcast_ref::<HttpError>() {
|
|
|
|
Some(apierr) => (apierr.message.clone(), apierr.code),
|
|
|
|
_ => (err.to_string(), StatusCode::BAD_REQUEST),
|
|
|
|
};
|
2021-07-03 18:49:16 +00:00
|
|
|
Response::builder()
|
|
|
|
.status(code)
|
|
|
|
.extension(ErrorMessageExtension(err.to_string()))
|
|
|
|
.body(err.into())?
|
2018-11-15 09:18:01 +00:00
|
|
|
}
|
2020-10-15 07:03:54 +00:00
|
|
|
};
|
2021-09-21 05:58:50 +00:00
|
|
|
let logger = config.get_access_log();
|
2020-10-16 09:06:47 +00:00
|
|
|
log_response(logger, &peer, method, &path, &response, user_agent);
|
2020-10-15 07:03:54 +00:00
|
|
|
Ok(response)
|
2020-10-12 08:36:32 +00:00
|
|
|
}
|
|
|
|
.boxed()
|
2018-11-15 09:18:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-22 17:44:14 +00:00
|
|
|
fn parse_query_parameters<S: 'static + BuildHasher + Send>(
|
2020-12-18 11:26:07 +00:00
|
|
|
param_schema: ParameterSchema,
|
2019-11-22 17:44:14 +00:00
|
|
|
form: &str, // x-www-form-urlencoded body data
|
|
|
|
parts: &Parts,
|
|
|
|
uri_param: &HashMap<String, String, S>,
|
|
|
|
) -> Result<Value, Error> {
|
|
|
|
let mut param_list: Vec<(String, String)> = vec![];
|
|
|
|
|
|
|
|
if !form.is_empty() {
|
|
|
|
for (k, v) in form_urlencoded::parse(form.as_bytes()).into_owned() {
|
|
|
|
param_list.push((k, v));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(query_str) = parts.uri.query() {
|
|
|
|
for (k, v) in form_urlencoded::parse(query_str.as_bytes()).into_owned() {
|
2021-03-29 06:17:26 +00:00
|
|
|
if k == "_dc" {
|
|
|
|
continue;
|
|
|
|
} // skip extjs "disable cache" parameter
|
2019-11-22 17:44:14 +00:00
|
|
|
param_list.push((k, v));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (k, v) in uri_param {
|
|
|
|
param_list.push((k.clone(), v.clone()));
|
|
|
|
}
|
|
|
|
|
2021-12-16 10:02:53 +00:00
|
|
|
let params = param_schema.parse_parameter_strings(¶m_list, true)?;
|
2019-11-22 17:44:14 +00:00
|
|
|
|
|
|
|
Ok(params)
|
|
|
|
}
|
|
|
|
|
2019-11-22 16:24:16 +00:00
|
|
|
async fn get_request_parameters<S: 'static + BuildHasher + Send>(
|
2020-12-18 11:26:07 +00:00
|
|
|
param_schema: ParameterSchema,
|
2018-11-15 07:18:48 +00:00
|
|
|
parts: Parts,
|
|
|
|
req_body: Body,
|
2019-10-26 09:36:01 +00:00
|
|
|
uri_param: HashMap<String, String, S>,
|
2019-11-22 12:02:05 +00:00
|
|
|
) -> Result<Value, Error> {
|
2019-02-27 11:12:00 +00:00
|
|
|
let mut is_json = false;
|
|
|
|
|
|
|
|
if let Some(value) = parts.headers.get(header::CONTENT_TYPE) {
|
2019-03-19 11:50:15 +00:00
|
|
|
match value.to_str().map(|v| v.split(';').next()) {
|
|
|
|
Ok(Some("application/x-www-form-urlencoded")) => {
|
|
|
|
is_json = false;
|
|
|
|
}
|
|
|
|
Ok(Some("application/json")) => {
|
|
|
|
is_json = true;
|
|
|
|
}
|
2019-11-22 12:02:05 +00:00
|
|
|
_ => bail!("unsupported content type {:?}", value.to_str()),
|
2019-02-27 11:12:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-29 10:00:38 +00:00
|
|
|
let body = TryStreamExt::map_err(req_body, |err| {
|
|
|
|
http_err!(BAD_REQUEST, "Problems reading request body: {}", err)
|
|
|
|
})
|
|
|
|
.try_fold(Vec::new(), |mut acc, chunk| async move {
|
|
|
|
// FIXME: max request body size?
|
|
|
|
if acc.len() + chunk.len() < 64 * 1024 {
|
|
|
|
acc.extend_from_slice(&*chunk);
|
|
|
|
Ok(acc)
|
|
|
|
} else {
|
|
|
|
Err(http_err!(BAD_REQUEST, "Request body too large"))
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.await?;
|
2018-11-15 07:18:48 +00:00
|
|
|
|
2021-03-29 06:17:26 +00:00
|
|
|
let utf8_data =
|
|
|
|
std::str::from_utf8(&body).map_err(|err| format_err!("Request body not uft8: {}", err))?;
|
2019-02-27 11:12:00 +00:00
|
|
|
|
2019-11-22 12:02:05 +00:00
|
|
|
if is_json {
|
2019-11-22 17:44:14 +00:00
|
|
|
let mut params: Value = serde_json::from_str(utf8_data)?;
|
2019-11-22 12:02:05 +00:00
|
|
|
for (k, v) in uri_param {
|
2019-11-22 16:22:07 +00:00
|
|
|
if let Some((_optional, prop_schema)) = param_schema.lookup(&k) {
|
2021-12-16 10:02:53 +00:00
|
|
|
params[&k] = prop_schema.parse_simple_value(&v)?;
|
2018-11-15 07:18:48 +00:00
|
|
|
}
|
2019-11-22 12:02:05 +00:00
|
|
|
}
|
2021-12-16 10:02:53 +00:00
|
|
|
param_schema.verify_json(¶ms)?;
|
2022-02-08 13:57:16 +00:00
|
|
|
Ok(params)
|
2019-11-22 17:44:14 +00:00
|
|
|
} else {
|
|
|
|
parse_query_parameters(param_schema, utf8_data, &parts, &uri_param)
|
2019-11-22 12:02:05 +00:00
|
|
|
}
|
2018-11-15 07:18:48 +00:00
|
|
|
}
|
|
|
|
|
2019-02-14 15:04:24 +00:00
|
|
|
struct NoLogExtension();
|
|
|
|
|
2019-11-22 12:02:05 +00:00
|
|
|
async fn proxy_protected_request(
|
2019-02-01 08:54:56 +00:00
|
|
|
info: &'static ApiMethod,
|
2019-01-28 17:06:42 +00:00
|
|
|
mut parts: Parts,
|
2019-01-28 16:30:39 +00:00
|
|
|
req_body: Body,
|
2020-10-15 15:43:42 +00:00
|
|
|
peer: &std::net::SocketAddr,
|
2019-11-22 12:02:05 +00:00
|
|
|
) -> Result<Response<Body>, Error> {
|
2019-01-28 17:06:42 +00:00
|
|
|
let mut uri_parts = parts.uri.clone().into_parts();
|
|
|
|
|
|
|
|
uri_parts.scheme = Some(http::uri::Scheme::HTTP);
|
|
|
|
uri_parts.authority = Some(http::uri::Authority::from_static("127.0.0.1:82"));
|
|
|
|
let new_uri = http::Uri::from_parts(uri_parts).unwrap();
|
|
|
|
|
|
|
|
parts.uri = new_uri;
|
|
|
|
|
2020-10-15 15:43:42 +00:00
|
|
|
let mut request = Request::from_parts(parts, req_body);
|
2021-03-29 06:17:26 +00:00
|
|
|
request.headers_mut().insert(
|
|
|
|
header::FORWARDED,
|
|
|
|
format!("for=\"{}\";", peer).parse().unwrap(),
|
|
|
|
);
|
2019-01-28 17:06:42 +00:00
|
|
|
|
2019-11-22 12:02:05 +00:00
|
|
|
let reload_timezone = info.reload_timezone;
|
|
|
|
|
2019-01-28 17:06:42 +00:00
|
|
|
let resp = hyper::client::Client::new()
|
|
|
|
.request(request)
|
2019-02-18 12:21:25 +00:00
|
|
|
.map_err(Error::from)
|
2019-08-26 11:33:38 +00:00
|
|
|
.map_ok(|mut resp| {
|
2019-02-18 05:54:12 +00:00
|
|
|
resp.extensions_mut().insert(NoLogExtension());
|
2019-02-14 12:28:41 +00:00
|
|
|
resp
|
2019-11-22 12:02:05 +00:00
|
|
|
})
|
|
|
|
.await?;
|
2019-01-28 17:06:42 +00:00
|
|
|
|
2021-03-29 06:17:26 +00:00
|
|
|
if reload_timezone {
|
|
|
|
unsafe {
|
|
|
|
tzset();
|
|
|
|
}
|
|
|
|
}
|
2019-02-18 05:54:12 +00:00
|
|
|
|
2019-11-22 12:02:05 +00:00
|
|
|
Ok(resp)
|
2019-01-28 16:30:39 +00:00
|
|
|
}
|
|
|
|
|
2021-09-23 10:38:09 +00:00
|
|
|
pub(crate) async fn handle_api_request<Env: RpcEnvironment, S: 'static + BuildHasher + Send>(
|
2019-05-08 09:09:01 +00:00
|
|
|
mut rpcenv: Env,
|
2018-11-15 09:25:59 +00:00
|
|
|
info: &'static ApiMethod,
|
2021-09-27 10:59:06 +00:00
|
|
|
formatter: &'static dyn OutputFormatter,
|
2018-11-15 07:18:48 +00:00
|
|
|
parts: Parts,
|
|
|
|
req_body: Body,
|
2019-10-26 09:36:01 +00:00
|
|
|
uri_param: HashMap<String, String, S>,
|
2019-11-22 12:02:05 +00:00
|
|
|
) -> Result<Response<Body>, Error> {
|
2019-01-31 13:34:21 +00:00
|
|
|
let delay_unauth_time = std::time::Instant::now() + std::time::Duration::from_millis(3000);
|
2021-04-06 09:03:44 +00:00
|
|
|
let compression = extract_compression_method(&parts.headers);
|
2019-01-31 13:34:21 +00:00
|
|
|
|
2019-11-22 17:44:14 +00:00
|
|
|
let result = match info.handler {
|
2019-11-23 08:03:21 +00:00
|
|
|
ApiHandler::AsyncHttp(handler) => {
|
2019-11-22 17:44:14 +00:00
|
|
|
let params = parse_query_parameters(info.parameters, "", &parts, &uri_param)?;
|
|
|
|
(handler)(parts, req_body, params, info, Box::new(rpcenv)).await
|
|
|
|
}
|
2022-04-12 14:15:09 +00:00
|
|
|
ApiHandler::StreamingSync(handler) => {
|
|
|
|
let params =
|
|
|
|
get_request_parameters(info.parameters, parts, req_body, uri_param).await?;
|
|
|
|
(handler)(params, info, &mut rpcenv)
|
|
|
|
.and_then(|data| formatter.format_data_streaming(data, &rpcenv))
|
|
|
|
}
|
|
|
|
ApiHandler::StreamingAsync(handler) => {
|
|
|
|
let params =
|
|
|
|
get_request_parameters(info.parameters, parts, req_body, uri_param).await?;
|
|
|
|
(handler)(params, info, &mut rpcenv)
|
|
|
|
.await
|
|
|
|
.and_then(|data| formatter.format_data_streaming(data, &rpcenv))
|
|
|
|
}
|
2019-11-22 17:44:14 +00:00
|
|
|
ApiHandler::Sync(handler) => {
|
2021-03-29 06:17:26 +00:00
|
|
|
let params =
|
|
|
|
get_request_parameters(info.parameters, parts, req_body, uri_param).await?;
|
2021-09-27 10:59:06 +00:00
|
|
|
(handler)(params, info, &mut rpcenv).map(|data| formatter.format_data(data, &rpcenv))
|
2019-11-22 17:44:14 +00:00
|
|
|
}
|
2019-12-16 08:59:45 +00:00
|
|
|
ApiHandler::Async(handler) => {
|
2021-03-29 06:17:26 +00:00
|
|
|
let params =
|
|
|
|
get_request_parameters(info.parameters, parts, req_body, uri_param).await?;
|
2019-12-16 08:59:45 +00:00
|
|
|
(handler)(params, info, &mut rpcenv)
|
|
|
|
.await
|
2021-09-27 10:59:06 +00:00
|
|
|
.map(|data| formatter.format_data(data, &rpcenv))
|
2019-12-16 08:59:45 +00:00
|
|
|
}
|
2019-11-22 17:44:14 +00:00
|
|
|
};
|
2019-01-31 13:34:21 +00:00
|
|
|
|
2021-04-06 09:03:44 +00:00
|
|
|
let mut resp = match result {
|
2019-11-22 17:44:14 +00:00
|
|
|
Ok(resp) => resp,
|
2019-11-22 12:02:05 +00:00
|
|
|
Err(err) => {
|
|
|
|
if let Some(httperr) = err.downcast_ref::<HttpError>() {
|
|
|
|
if httperr.code == StatusCode::UNAUTHORIZED {
|
2020-12-03 15:04:23 +00:00
|
|
|
tokio::time::sleep_until(Instant::from_std(delay_unauth_time)).await;
|
2019-11-22 12:02:05 +00:00
|
|
|
}
|
2019-02-01 08:54:56 +00:00
|
|
|
}
|
2021-09-27 10:59:06 +00:00
|
|
|
formatter.format_error(err)
|
2019-11-22 12:02:05 +00:00
|
|
|
}
|
|
|
|
};
|
2019-02-01 08:54:56 +00:00
|
|
|
|
2021-04-06 09:03:44 +00:00
|
|
|
let resp = match compression {
|
|
|
|
Some(CompressionMethod::Deflate) => {
|
|
|
|
resp.headers_mut().insert(
|
|
|
|
header::CONTENT_ENCODING,
|
|
|
|
CompressionMethod::Deflate.content_encoding(),
|
|
|
|
);
|
|
|
|
resp.map(|body| {
|
|
|
|
Body::wrap_stream(DeflateEncoder::with_quality(
|
2021-04-22 19:24:40 +00:00
|
|
|
TryStreamExt::map_err(body, |err| {
|
2022-02-21 10:39:18 +00:00
|
|
|
proxmox_lang::io_format_err!("error during compression: {}", err)
|
2021-04-06 09:03:44 +00:00
|
|
|
}),
|
server: rest: switch from fastest to default deflate compression level
I made some comparision with bombardier[0], the one listed here are
30s looped requests with two concurrent clients:
[ static download of ext-all.js ]:
lvl avg / stdev / max
none 1.98 MiB 100 % 5.17ms / 1.30ms / 32.38ms
fastest 813.14 KiB 42 % 20.53ms / 2.85ms / 58.71ms
default 626.35 KiB 30 % 39.70ms / 3.98ms / 85.47ms
[ deterministic (pre-defined data), but real API call ]:
lvl avg / stdev / max
none 129.09 KiB 100 % 2.70ms / 471.58us / 26.93ms
fastest 42.12 KiB 33 % 3.47ms / 606.46us / 32.42ms
default 34.82 KiB 27 % 4.28ms / 737.99us / 33.75ms
The reduction is quite better with default, but it's also slower, but
only when testing over unconstrained network. For real world
scenarios where compression actually matters, e.g., when using a
spotty train connection, we will be faster again with better
compression.
A GPRS limited connection (Firefox developer console) requires the
following load (until the DOMContentLoaded event triggered) times:
lvl t x faster
none 9m 18.6s x 1.0
fastest 3m 20.0s x 2.8
default 2m 30.0s x 3.7
So for worst case using sligthly more CPU time on the server has a
tremendous effect on the client load time.
Using a more realistical example and limiting for "Good 2G" gives:
none 1m 1.8s x 1.0
fastest 22.6s x 2.7
default 16.6s x 3.7
16s is somewhat OK, >1m just isn't...
So, use default level to ensure we get bearable load times on
clients, and if we want to improve transmission size AND speed then
we could always use a in-memory cache, only a few MiB would be
required for the compressable static files we server.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-07 15:12:01 +00:00
|
|
|
Level::Default,
|
2021-04-06 09:03:44 +00:00
|
|
|
))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
None => resp,
|
|
|
|
};
|
|
|
|
|
2021-03-29 06:17:26 +00:00
|
|
|
if info.reload_timezone {
|
|
|
|
unsafe {
|
|
|
|
tzset();
|
|
|
|
}
|
|
|
|
}
|
2019-11-22 12:02:05 +00:00
|
|
|
|
|
|
|
Ok(resp)
|
2019-01-14 11:26:04 +00:00
|
|
|
}
|
|
|
|
|
2018-12-02 10:00:52 +00:00
|
|
|
fn extension_to_content_type(filename: &Path) -> (&'static str, bool) {
|
|
|
|
if let Some(ext) = filename.extension().and_then(|osstr| osstr.to_str()) {
|
|
|
|
return match ext {
|
|
|
|
"css" => ("text/css", false),
|
|
|
|
"html" => ("text/html", false),
|
|
|
|
"js" => ("application/javascript", false),
|
|
|
|
"json" => ("application/json", false),
|
|
|
|
"map" => ("application/json", false),
|
|
|
|
"png" => ("image/png", true),
|
|
|
|
"ico" => ("image/x-icon", true),
|
|
|
|
"gif" => ("image/gif", true),
|
|
|
|
"svg" => ("image/svg+xml", false),
|
|
|
|
"jar" => ("application/java-archive", true),
|
|
|
|
"woff" => ("application/font-woff", true),
|
|
|
|
"woff2" => ("application/font-woff2", true),
|
|
|
|
"ttf" => ("application/font-snft", true),
|
|
|
|
"pdf" => ("application/pdf", true),
|
|
|
|
"epub" => ("application/epub+zip", true),
|
|
|
|
"mp3" => ("audio/mpeg", true),
|
|
|
|
"oga" => ("audio/ogg", true),
|
|
|
|
"tgz" => ("application/x-compressed-tar", true),
|
|
|
|
_ => ("application/octet-stream", false),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
("application/octet-stream", false)
|
|
|
|
}
|
|
|
|
|
2021-04-06 09:03:45 +00:00
|
|
|
async fn simple_static_file_download(
|
|
|
|
filename: PathBuf,
|
|
|
|
content_type: &'static str,
|
|
|
|
compression: Option<CompressionMethod>,
|
|
|
|
) -> Result<Response<Body>, Error> {
|
2019-08-26 11:33:38 +00:00
|
|
|
use tokio::io::AsyncReadExt;
|
2018-11-15 07:18:48 +00:00
|
|
|
|
2019-08-26 11:33:38 +00:00
|
|
|
let mut file = File::open(filename)
|
|
|
|
.await
|
2020-07-29 07:38:11 +00:00
|
|
|
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
2018-11-15 07:18:48 +00:00
|
|
|
|
2019-08-26 11:33:38 +00:00
|
|
|
let mut data: Vec<u8> = Vec::new();
|
|
|
|
|
2021-04-06 09:03:45 +00:00
|
|
|
let mut response = match compression {
|
|
|
|
Some(CompressionMethod::Deflate) => {
|
server: rest: switch from fastest to default deflate compression level
I made some comparision with bombardier[0], the one listed here are
30s looped requests with two concurrent clients:
[ static download of ext-all.js ]:
lvl avg / stdev / max
none 1.98 MiB 100 % 5.17ms / 1.30ms / 32.38ms
fastest 813.14 KiB 42 % 20.53ms / 2.85ms / 58.71ms
default 626.35 KiB 30 % 39.70ms / 3.98ms / 85.47ms
[ deterministic (pre-defined data), but real API call ]:
lvl avg / stdev / max
none 129.09 KiB 100 % 2.70ms / 471.58us / 26.93ms
fastest 42.12 KiB 33 % 3.47ms / 606.46us / 32.42ms
default 34.82 KiB 27 % 4.28ms / 737.99us / 33.75ms
The reduction is quite better with default, but it's also slower, but
only when testing over unconstrained network. For real world
scenarios where compression actually matters, e.g., when using a
spotty train connection, we will be faster again with better
compression.
A GPRS limited connection (Firefox developer console) requires the
following load (until the DOMContentLoaded event triggered) times:
lvl t x faster
none 9m 18.6s x 1.0
fastest 3m 20.0s x 2.8
default 2m 30.0s x 3.7
So for worst case using sligthly more CPU time on the server has a
tremendous effect on the client load time.
Using a more realistical example and limiting for "Good 2G" gives:
none 1m 1.8s x 1.0
fastest 22.6s x 2.7
default 16.6s x 3.7
16s is somewhat OK, >1m just isn't...
So, use default level to ensure we get bearable load times on
clients, and if we want to improve transmission size AND speed then
we could always use a in-memory cache, only a few MiB would be
required for the compressable static files we server.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-07 15:12:01 +00:00
|
|
|
let mut enc = DeflateEncoder::with_quality(data, Level::Default);
|
2021-07-03 19:05:25 +00:00
|
|
|
enc.compress_vec(&mut file, CHUNK_SIZE_LIMIT as usize)
|
|
|
|
.await?;
|
2021-04-06 09:03:45 +00:00
|
|
|
let mut response = Response::new(enc.into_inner().into());
|
|
|
|
response.headers_mut().insert(
|
|
|
|
header::CONTENT_ENCODING,
|
|
|
|
CompressionMethod::Deflate.content_encoding(),
|
|
|
|
);
|
|
|
|
response
|
|
|
|
}
|
|
|
|
None => {
|
|
|
|
file.read_to_end(&mut data)
|
|
|
|
.await
|
|
|
|
.map_err(|err| http_err!(BAD_REQUEST, "File read failed: {}", err))?;
|
|
|
|
Response::new(data.into())
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-08-26 11:33:38 +00:00
|
|
|
response.headers_mut().insert(
|
|
|
|
header::CONTENT_TYPE,
|
2021-03-29 06:17:26 +00:00
|
|
|
header::HeaderValue::from_static(content_type),
|
|
|
|
);
|
2021-04-06 09:03:45 +00:00
|
|
|
|
2019-08-26 11:33:38 +00:00
|
|
|
Ok(response)
|
|
|
|
}
|
|
|
|
|
2021-04-06 09:03:45 +00:00
|
|
|
async fn chuncked_static_file_download(
|
|
|
|
filename: PathBuf,
|
|
|
|
content_type: &'static str,
|
|
|
|
compression: Option<CompressionMethod>,
|
|
|
|
) -> Result<Response<Body>, Error> {
|
|
|
|
let mut resp = Response::builder()
|
|
|
|
.status(StatusCode::OK)
|
|
|
|
.header(header::CONTENT_TYPE, content_type);
|
2018-12-02 10:00:52 +00:00
|
|
|
|
2019-08-26 11:33:38 +00:00
|
|
|
let file = File::open(filename)
|
|
|
|
.await
|
2020-07-29 07:38:11 +00:00
|
|
|
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
2019-08-26 11:33:38 +00:00
|
|
|
|
2021-04-06 09:03:45 +00:00
|
|
|
let body = match compression {
|
|
|
|
Some(CompressionMethod::Deflate) => {
|
|
|
|
resp = resp.header(
|
|
|
|
header::CONTENT_ENCODING,
|
|
|
|
CompressionMethod::Deflate.content_encoding(),
|
|
|
|
);
|
|
|
|
Body::wrap_stream(DeflateEncoder::with_quality(
|
|
|
|
AsyncReaderStream::new(file),
|
server: rest: switch from fastest to default deflate compression level
I made some comparision with bombardier[0], the one listed here are
30s looped requests with two concurrent clients:
[ static download of ext-all.js ]:
lvl avg / stdev / max
none 1.98 MiB 100 % 5.17ms / 1.30ms / 32.38ms
fastest 813.14 KiB 42 % 20.53ms / 2.85ms / 58.71ms
default 626.35 KiB 30 % 39.70ms / 3.98ms / 85.47ms
[ deterministic (pre-defined data), but real API call ]:
lvl avg / stdev / max
none 129.09 KiB 100 % 2.70ms / 471.58us / 26.93ms
fastest 42.12 KiB 33 % 3.47ms / 606.46us / 32.42ms
default 34.82 KiB 27 % 4.28ms / 737.99us / 33.75ms
The reduction is quite better with default, but it's also slower, but
only when testing over unconstrained network. For real world
scenarios where compression actually matters, e.g., when using a
spotty train connection, we will be faster again with better
compression.
A GPRS limited connection (Firefox developer console) requires the
following load (until the DOMContentLoaded event triggered) times:
lvl t x faster
none 9m 18.6s x 1.0
fastest 3m 20.0s x 2.8
default 2m 30.0s x 3.7
So for worst case using sligthly more CPU time on the server has a
tremendous effect on the client load time.
Using a more realistical example and limiting for "Good 2G" gives:
none 1m 1.8s x 1.0
fastest 22.6s x 2.7
default 16.6s x 3.7
16s is somewhat OK, >1m just isn't...
So, use default level to ensure we get bearable load times on
clients, and if we want to improve transmission size AND speed then
we could always use a in-memory cache, only a few MiB would be
required for the compressable static files we server.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2021-04-07 15:12:01 +00:00
|
|
|
Level::Default,
|
2021-04-06 09:03:45 +00:00
|
|
|
))
|
|
|
|
}
|
|
|
|
None => Body::wrap_stream(AsyncReaderStream::new(file)),
|
|
|
|
};
|
2019-08-26 11:33:38 +00:00
|
|
|
|
2021-04-06 09:03:45 +00:00
|
|
|
Ok(resp.body(body).unwrap())
|
2018-11-15 07:18:48 +00:00
|
|
|
}
|
|
|
|
|
2021-04-06 09:03:45 +00:00
|
|
|
async fn handle_static_file_download(
|
|
|
|
filename: PathBuf,
|
|
|
|
compression: Option<CompressionMethod>,
|
|
|
|
) -> Result<Response<Body>, Error> {
|
2019-12-17 07:56:52 +00:00
|
|
|
let metadata = tokio::fs::metadata(filename.clone())
|
2020-07-29 07:38:11 +00:00
|
|
|
.map_err(|err| http_err!(BAD_REQUEST, "File access problems: {}", err))
|
2019-12-17 07:56:52 +00:00
|
|
|
.await?;
|
|
|
|
|
2021-04-06 09:03:45 +00:00
|
|
|
let (content_type, nocomp) = extension_to_content_type(&filename);
|
|
|
|
let compression = if nocomp { None } else { compression };
|
|
|
|
|
|
|
|
if metadata.len() < CHUNK_SIZE_LIMIT {
|
|
|
|
simple_static_file_download(filename, content_type, compression).await
|
2019-12-17 07:56:52 +00:00
|
|
|
} else {
|
2021-04-06 09:03:45 +00:00
|
|
|
chuncked_static_file_download(filename, content_type, compression).await
|
2019-12-17 07:56:52 +00:00
|
|
|
}
|
2018-11-15 07:18:48 +00:00
|
|
|
}
|
|
|
|
|
2021-04-06 09:03:43 +00:00
|
|
|
// FIXME: support handling multiple compression methods
|
|
|
|
fn extract_compression_method(headers: &http::HeaderMap) -> Option<CompressionMethod> {
|
2021-04-07 15:57:19 +00:00
|
|
|
if let Some(Ok(encodings)) = headers.get(header::ACCEPT_ENCODING).map(|v| v.to_str()) {
|
|
|
|
for encoding in encodings.split(&[',', ' '][..]) {
|
|
|
|
if let Ok(method) = encoding.parse() {
|
|
|
|
return Some(method);
|
2021-04-06 09:03:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
None
|
|
|
|
}
|
|
|
|
|
2020-10-15 15:43:42 +00:00
|
|
|
async fn handle_request(
|
|
|
|
api: Arc<ApiConfig>,
|
|
|
|
req: Request<Body>,
|
|
|
|
peer: &std::net::SocketAddr,
|
|
|
|
) -> Result<Response<Body>, Error> {
|
2019-02-17 16:31:53 +00:00
|
|
|
let (parts, body) = req.into_parts();
|
|
|
|
let method = parts.method.clone();
|
2021-09-21 05:58:45 +00:00
|
|
|
let (path, components) = normalize_uri_path(parts.uri.path())?;
|
2019-02-17 16:31:53 +00:00
|
|
|
|
2018-11-15 07:18:48 +00:00
|
|
|
let comp_len = components.len();
|
|
|
|
|
server: rest: implement max URI path and query length request limits
Add a generous limit now and return the correct error (414 URI Too
Long). Otherwise we could to pretty larger GET requests, 64 KiB and
possible bigger (at 64 KiB my simple curl test failed due to
shell/curl limitations).
For now allow a 3072 characters as combined length of URI path and
query.
This is conform with the HTTP/1.1 RFCs (e.g., RFC 7231, 6.5.12 and
RFC 2616, 3.2.1) which do not specify any limits, upper or lower, but
require that all server accessible resources mus be reachable without
getting 414, which is normally fulfilled as we have various length
limits for stuff which could be in an URI, in place, e.g.:
* user id: max. 64 chars
* datastore: max. 32 chars
The only known problematic API endpoint is the catalog one, used in
the GUI's pxar file browser:
GET /api2/json/admin/datastore/<id>/catalog?..&filepath=<path>
The <path> is the encoded archive path, and can be arbitrary long.
But, this is a flawed design, as even without this new limit one can
easily generate archives which cannot be browsed anymore, as hyper
only accepts requests with max. 64 KiB in the URI.
So rather, we should move that to a GET-as-POST call, which has no
such limitations (and would not need to base32 encode the path).
Note: This change was inspired by adding a request access log, which
profits from such limits as we can then rely on certain atomicity
guarantees when writing requests to the log.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-15 15:49:16 +00:00
|
|
|
let query = parts.uri.query().unwrap_or_default();
|
|
|
|
if path.len() + query.len() > MAX_URI_QUERY_LENGTH {
|
|
|
|
return Ok(Response::builder()
|
|
|
|
.status(StatusCode::URI_TOO_LONG)
|
|
|
|
.body("".into())
|
|
|
|
.unwrap());
|
|
|
|
}
|
|
|
|
|
2019-01-28 16:30:39 +00:00
|
|
|
let env_type = api.env_type();
|
2021-09-21 05:58:50 +00:00
|
|
|
let mut rpcenv = RestEnvironment::new(env_type, Arc::clone(&api));
|
2019-01-27 09:18:52 +00:00
|
|
|
|
2020-10-15 15:43:42 +00:00
|
|
|
rpcenv.set_client_ip(Some(*peer));
|
|
|
|
|
2019-01-31 11:22:00 +00:00
|
|
|
let delay_unauth_time = std::time::Instant::now() + std::time::Duration::from_millis(3000);
|
2020-04-16 10:56:34 +00:00
|
|
|
let access_forbidden_time = std::time::Instant::now() + std::time::Duration::from_millis(500);
|
2019-01-31 11:22:00 +00:00
|
|
|
|
2019-01-22 11:10:38 +00:00
|
|
|
if comp_len >= 1 && components[0] == "api2" {
|
2018-11-15 07:18:48 +00:00
|
|
|
if comp_len >= 2 {
|
|
|
|
let format = components[1];
|
2019-11-22 12:02:05 +00:00
|
|
|
|
2021-09-27 10:59:06 +00:00
|
|
|
let formatter: &dyn OutputFormatter = match format {
|
|
|
|
"json" => JSON_FORMATTER,
|
|
|
|
"extjs" => EXTJS_FORMATTER,
|
2021-03-29 06:17:26 +00:00
|
|
|
_ => bail!("Unsupported output format '{}'.", format),
|
2018-12-05 11:42:25 +00:00
|
|
|
};
|
2018-11-15 07:18:48 +00:00
|
|
|
|
2018-11-16 08:15:33 +00:00
|
|
|
let mut uri_param = HashMap::new();
|
2020-10-02 11:17:12 +00:00
|
|
|
let api_method = api.find_method(&components[2..], method.clone(), &mut uri_param);
|
2018-11-16 08:15:33 +00:00
|
|
|
|
2020-10-02 11:17:12 +00:00
|
|
|
let mut auth_required = true;
|
|
|
|
if let Some(api_method) = api_method {
|
|
|
|
if let Permission::World = *api_method.access.permission {
|
|
|
|
auth_required = false; // no auth for endpoints with World permission
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-06 14:55:39 +00:00
|
|
|
let mut user_info: Box<dyn UserInformation + Send + Sync> =
|
|
|
|
Box::new(EmptyUserInformation {});
|
2021-09-21 05:58:48 +00:00
|
|
|
|
2020-10-02 11:17:12 +00:00
|
|
|
if auth_required {
|
2021-10-04 11:32:19 +00:00
|
|
|
match api.check_auth(&parts.headers, &method).await {
|
2021-09-21 05:58:48 +00:00
|
|
|
Ok((authid, info)) => {
|
|
|
|
rpcenv.set_auth_id(Some(authid));
|
|
|
|
user_info = info;
|
|
|
|
}
|
2021-03-31 10:21:51 +00:00
|
|
|
Err(auth_err) => {
|
|
|
|
let err = match auth_err {
|
|
|
|
AuthError::Generic(err) => err,
|
|
|
|
AuthError::NoData => {
|
|
|
|
format_err!("no authentication credentials provided.")
|
|
|
|
}
|
|
|
|
};
|
2021-09-21 05:58:50 +00:00
|
|
|
// fixme: log Username??
|
|
|
|
rpcenv.log_failed_auth(None, &err.to_string());
|
2020-11-04 15:12:13 +00:00
|
|
|
|
2019-02-16 14:52:55 +00:00
|
|
|
// always delay unauthorized calls by 3 seconds (from start of request)
|
2020-07-29 07:38:11 +00:00
|
|
|
let err = http_err!(UNAUTHORIZED, "authentication failed - {}", err);
|
2020-12-03 15:04:23 +00:00
|
|
|
tokio::time::sleep_until(Instant::from_std(delay_unauth_time)).await;
|
2021-09-27 10:59:06 +00:00
|
|
|
return Ok(formatter.format_error(err));
|
2019-02-16 14:52:55 +00:00
|
|
|
}
|
2019-01-31 11:22:00 +00:00
|
|
|
}
|
|
|
|
}
|
2019-01-27 09:42:45 +00:00
|
|
|
|
2020-10-02 11:17:12 +00:00
|
|
|
match api_method {
|
2019-11-21 08:36:41 +00:00
|
|
|
None => {
|
2020-07-29 07:38:11 +00:00
|
|
|
let err = http_err!(NOT_FOUND, "Path '{}' not found.", path);
|
2021-09-27 10:59:06 +00:00
|
|
|
return Ok(formatter.format_error(err));
|
2019-04-01 06:04:12 +00:00
|
|
|
}
|
2019-11-21 08:36:41 +00:00
|
|
|
Some(api_method) => {
|
2020-10-23 11:33:21 +00:00
|
|
|
let auth_id = rpcenv.get_auth_id();
|
2021-09-21 05:58:48 +00:00
|
|
|
let user_info = user_info;
|
2021-09-21 05:58:40 +00:00
|
|
|
|
2021-03-29 06:17:26 +00:00
|
|
|
if !check_api_permission(
|
|
|
|
api_method.access.permission,
|
|
|
|
auth_id.as_deref(),
|
|
|
|
&uri_param,
|
|
|
|
user_info.as_ref(),
|
|
|
|
) {
|
2020-07-29 07:38:11 +00:00
|
|
|
let err = http_err!(FORBIDDEN, "permission check failed");
|
2020-12-03 15:04:23 +00:00
|
|
|
tokio::time::sleep_until(Instant::from_std(access_forbidden_time)).await;
|
2021-09-27 10:59:06 +00:00
|
|
|
return Ok(formatter.format_error(err));
|
2020-04-16 08:01:59 +00:00
|
|
|
}
|
|
|
|
|
2020-03-26 11:54:20 +00:00
|
|
|
let result = if api_method.protected && env_type == RpcEnvironmentType::PUBLIC {
|
2020-10-15 15:43:42 +00:00
|
|
|
proxy_protected_request(api_method, parts, body, peer).await
|
2019-01-28 16:30:39 +00:00
|
|
|
} else {
|
2021-03-29 06:17:26 +00:00
|
|
|
handle_api_request(rpcenv, api_method, formatter, parts, body, uri_param)
|
|
|
|
.await
|
2020-03-26 11:54:20 +00:00
|
|
|
};
|
|
|
|
|
2020-10-16 09:06:46 +00:00
|
|
|
let mut response = match result {
|
|
|
|
Ok(resp) => resp,
|
2021-09-27 10:59:06 +00:00
|
|
|
Err(err) => formatter.format_error(err),
|
2020-10-16 09:06:46 +00:00
|
|
|
};
|
|
|
|
|
2020-10-23 11:33:21 +00:00
|
|
|
if let Some(auth_id) = auth_id {
|
2022-04-06 14:55:39 +00:00
|
|
|
response
|
|
|
|
.extensions_mut()
|
|
|
|
.insert(AuthStringExtension(auth_id));
|
2019-01-28 16:30:39 +00:00
|
|
|
}
|
2020-10-16 09:06:46 +00:00
|
|
|
|
|
|
|
return Ok(response);
|
2019-01-14 11:26:04 +00:00
|
|
|
}
|
2018-11-15 07:18:48 +00:00
|
|
|
}
|
|
|
|
}
|
2021-03-29 06:17:26 +00:00
|
|
|
} else {
|
2019-02-17 17:50:40 +00:00
|
|
|
// not Auth required for accessing files!
|
2018-11-15 07:18:48 +00:00
|
|
|
|
2019-04-01 05:52:30 +00:00
|
|
|
if method != hyper::Method::GET {
|
2019-11-22 12:02:05 +00:00
|
|
|
bail!("Unsupported HTTP method {}", method);
|
2019-04-01 05:52:30 +00:00
|
|
|
}
|
|
|
|
|
2018-12-01 12:37:49 +00:00
|
|
|
if comp_len == 0 {
|
2021-10-04 11:32:19 +00:00
|
|
|
match api.check_auth(&parts.headers, &method).await {
|
2021-09-21 05:58:48 +00:00
|
|
|
Ok((auth_id, _user_info)) => {
|
2021-10-04 12:49:25 +00:00
|
|
|
rpcenv.set_auth_id(Some(auth_id));
|
|
|
|
return Ok(api.get_index(rpcenv, parts).await);
|
2019-02-17 17:50:40 +00:00
|
|
|
}
|
2021-03-31 10:21:51 +00:00
|
|
|
Err(AuthError::Generic(_)) => {
|
|
|
|
tokio::time::sleep_until(Instant::from_std(delay_unauth_time)).await;
|
|
|
|
}
|
|
|
|
Err(AuthError::NoData) => {}
|
2019-02-17 17:50:40 +00:00
|
|
|
}
|
2021-10-04 12:49:25 +00:00
|
|
|
return Ok(api.get_index(rpcenv, parts).await);
|
2018-12-01 12:37:49 +00:00
|
|
|
} else {
|
|
|
|
let filename = api.find_alias(&components);
|
2021-04-06 09:03:45 +00:00
|
|
|
let compression = extract_compression_method(&parts.headers);
|
|
|
|
return handle_static_file_download(filename, compression).await;
|
2018-12-01 12:37:49 +00:00
|
|
|
}
|
2018-11-15 07:18:48 +00:00
|
|
|
}
|
|
|
|
|
2020-07-29 07:38:11 +00:00
|
|
|
Err(http_err!(NOT_FOUND, "Path '{}' not found.", path))
|
2018-11-15 07:18:48 +00:00
|
|
|
}
|