2019-01-17 10:29:38 +00:00
|
|
|
use failure::*;
|
|
|
|
|
|
|
|
use http::Uri;
|
|
|
|
use hyper::Body;
|
|
|
|
use hyper::client::Client;
|
2019-03-05 11:54:44 +00:00
|
|
|
use xdg::BaseDirectories;
|
2019-07-22 08:12:51 +00:00
|
|
|
use chrono::{DateTime, Utc};
|
2019-05-22 07:46:02 +00:00
|
|
|
use std::collections::HashSet;
|
|
|
|
use std::sync::{Arc, Mutex};
|
2019-06-26 07:17:13 +00:00
|
|
|
use std::io::Write;
|
2019-01-17 10:29:38 +00:00
|
|
|
|
2019-05-13 08:27:22 +00:00
|
|
|
use http::{Request, Response};
|
2019-04-28 08:55:03 +00:00
|
|
|
use http::header::HeaderValue;
|
|
|
|
|
2019-05-20 12:19:24 +00:00
|
|
|
use futures::*;
|
2019-01-21 17:56:48 +00:00
|
|
|
use futures::stream::Stream;
|
2019-05-20 12:19:24 +00:00
|
|
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
|
|
|
use tokio::sync::mpsc;
|
2019-07-02 11:33:58 +00:00
|
|
|
use openssl::ssl::{SslConnector, SslMethod};
|
2019-01-21 17:56:48 +00:00
|
|
|
|
2019-03-05 11:54:44 +00:00
|
|
|
use serde_json::{json, Value};
|
2019-02-13 13:31:43 +00:00
|
|
|
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
|
2019-01-21 17:56:48 +00:00
|
|
|
|
2019-08-03 11:05:38 +00:00
|
|
|
use proxmox::tools::{
|
|
|
|
digest_to_hex,
|
|
|
|
fs::{file_get_json, file_set_contents},
|
|
|
|
};
|
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
use crate::tools::{self, BroadcastFuture, tty};
|
2019-06-05 07:45:59 +00:00
|
|
|
use crate::tools::futures::{cancellable, Canceller};
|
2019-05-14 12:54:21 +00:00
|
|
|
use super::pipe_to_stream::*;
|
2019-05-23 10:29:33 +00:00
|
|
|
use super::merge_known_chunks::*;
|
|
|
|
|
2019-06-05 06:41:20 +00:00
|
|
|
use crate::backup::*;
|
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
#[derive(Clone)]
|
|
|
|
struct AuthInfo {
|
|
|
|
username: String,
|
|
|
|
ticket: String,
|
|
|
|
token: String,
|
|
|
|
}
|
2019-02-20 13:18:27 +00:00
|
|
|
|
2019-02-14 10:11:39 +00:00
|
|
|
/// HTTP(S) API client
|
2019-01-17 10:29:38 +00:00
|
|
|
pub struct HttpClient {
|
2019-07-02 11:33:58 +00:00
|
|
|
client: Client<hyper_openssl::HttpsConnector<hyper::client::HttpConnector>>,
|
2019-01-17 10:29:38 +00:00
|
|
|
server: String,
|
2019-04-28 08:55:03 +00:00
|
|
|
auth: BroadcastFuture<AuthInfo>,
|
2019-01-17 10:29:38 +00:00
|
|
|
}
|
|
|
|
|
2019-03-05 11:54:44 +00:00
|
|
|
fn store_ticket_info(server: &str, username: &str, ticket: &str, token: &str) -> Result<(), Error> {
|
|
|
|
|
|
|
|
let base = BaseDirectories::with_prefix("proxmox-backup")?;
|
|
|
|
|
|
|
|
// usually /run/user/<uid>/...
|
|
|
|
let path = base.place_runtime_file("tickets")?;
|
|
|
|
|
|
|
|
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
|
|
|
|
|
2019-08-03 11:05:38 +00:00
|
|
|
let mut data = file_get_json(&path, Some(json!({})))?;
|
2019-03-05 11:54:44 +00:00
|
|
|
|
|
|
|
let now = Utc::now().timestamp();
|
|
|
|
|
|
|
|
data[server][username] = json!({ "timestamp": now, "ticket": ticket, "token": token});
|
|
|
|
|
|
|
|
let mut new_data = json!({});
|
|
|
|
|
|
|
|
let ticket_lifetime = tools::ticket::TICKET_LIFETIME - 60;
|
|
|
|
|
|
|
|
let empty = serde_json::map::Map::new();
|
|
|
|
for (server, info) in data.as_object().unwrap_or(&empty) {
|
|
|
|
for (_user, uinfo) in info.as_object().unwrap_or(&empty) {
|
|
|
|
if let Some(timestamp) = uinfo["timestamp"].as_i64() {
|
|
|
|
let age = now - timestamp;
|
|
|
|
if age < ticket_lifetime {
|
|
|
|
new_data[server][username] = uinfo.clone();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-03 11:05:38 +00:00
|
|
|
file_set_contents(path, new_data.to_string().as_bytes(), Some(mode))?;
|
2019-03-05 11:54:44 +00:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn load_ticket_info(server: &str, username: &str) -> Option<(String, String)> {
|
|
|
|
let base = match BaseDirectories::with_prefix("proxmox-backup") {
|
|
|
|
Ok(b) => b,
|
|
|
|
_ => return None,
|
|
|
|
};
|
|
|
|
|
|
|
|
// usually /run/user/<uid>/...
|
|
|
|
let path = match base.place_runtime_file("tickets") {
|
|
|
|
Ok(p) => p,
|
|
|
|
_ => return None,
|
|
|
|
};
|
|
|
|
|
2019-08-03 11:05:38 +00:00
|
|
|
let data = match file_get_json(&path, None) {
|
2019-04-04 10:24:18 +00:00
|
|
|
Ok(v) => v,
|
|
|
|
_ => return None,
|
|
|
|
};
|
2019-03-05 11:54:44 +00:00
|
|
|
|
|
|
|
let now = Utc::now().timestamp();
|
|
|
|
|
|
|
|
let ticket_lifetime = tools::ticket::TICKET_LIFETIME - 60;
|
|
|
|
|
|
|
|
if let Some(uinfo) = data[server][username].as_object() {
|
|
|
|
if let Some(timestamp) = uinfo["timestamp"].as_i64() {
|
|
|
|
let age = now - timestamp;
|
|
|
|
if age < ticket_lifetime {
|
|
|
|
let ticket = match uinfo["ticket"].as_str() {
|
|
|
|
Some(t) => t,
|
|
|
|
None => return None,
|
|
|
|
};
|
|
|
|
let token = match uinfo["token"].as_str() {
|
|
|
|
Some(t) => t,
|
|
|
|
None => return None,
|
2019-03-06 05:45:51 +00:00
|
|
|
};
|
2019-03-05 11:54:44 +00:00
|
|
|
return Some((ticket.to_owned(), token.to_owned()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
None
|
|
|
|
}
|
|
|
|
|
2019-01-17 10:29:38 +00:00
|
|
|
impl HttpClient {
|
|
|
|
|
2019-04-30 09:44:35 +00:00
|
|
|
pub fn new(server: &str, username: &str) -> Result<Self, Error> {
|
2019-04-28 08:55:03 +00:00
|
|
|
let client = Self::build_client();
|
|
|
|
|
2019-04-30 09:44:35 +00:00
|
|
|
let password = if let Some((ticket, _token)) = load_ticket_info(server, username) {
|
|
|
|
ticket
|
|
|
|
} else {
|
|
|
|
Self::get_password(&username)?
|
|
|
|
};
|
|
|
|
|
|
|
|
let login = Self::credentials(client.clone(), server.to_owned(), username.to_owned(), password);
|
|
|
|
|
|
|
|
Ok(Self {
|
2019-04-28 08:55:03 +00:00
|
|
|
client,
|
2019-01-17 10:29:38 +00:00
|
|
|
server: String::from(server),
|
2019-04-28 08:55:03 +00:00
|
|
|
auth: BroadcastFuture::new(login),
|
2019-04-30 09:44:35 +00:00
|
|
|
})
|
2019-01-17 10:29:38 +00:00
|
|
|
}
|
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
fn get_password(_username: &str) -> Result<String, Error> {
|
2019-02-20 13:18:27 +00:00
|
|
|
use std::env::VarError::*;
|
|
|
|
match std::env::var("PBS_PASSWORD") {
|
|
|
|
Ok(p) => return Ok(p),
|
|
|
|
Err(NotUnicode(_)) => bail!("PBS_PASSWORD contains bad characters"),
|
|
|
|
Err(NotPresent) => {
|
|
|
|
// Try another method
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're on a TTY, query the user for a password
|
|
|
|
if tty::stdin_isatty() {
|
|
|
|
return Ok(String::from_utf8(tty::read_password("Password: ")?)?);
|
|
|
|
}
|
|
|
|
|
|
|
|
bail!("no password input mechanism available");
|
|
|
|
}
|
|
|
|
|
2019-07-02 11:33:58 +00:00
|
|
|
fn build_client() -> Client<hyper_openssl::HttpsConnector<hyper::client::HttpConnector>> {
|
|
|
|
|
|
|
|
let mut ssl_connector_builder = SslConnector::builder(SslMethod::tls()).unwrap();
|
|
|
|
|
|
|
|
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE); // fixme!
|
|
|
|
|
2019-02-13 10:03:02 +00:00
|
|
|
let mut httpc = hyper::client::HttpConnector::new(1);
|
2019-06-28 14:00:58 +00:00
|
|
|
httpc.set_nodelay(true); // important for h2 download performance!
|
2019-07-04 08:51:18 +00:00
|
|
|
httpc.set_recv_buffer_size(Some(1024*1024)); //important for h2 download performance!
|
2019-02-13 10:03:02 +00:00
|
|
|
httpc.enforce_http(false); // we want https...
|
2019-07-02 11:33:58 +00:00
|
|
|
|
|
|
|
let https = hyper_openssl::HttpsConnector::with_connector(httpc, ssl_connector_builder).unwrap();
|
|
|
|
|
2019-05-16 08:24:23 +00:00
|
|
|
Client::builder()
|
|
|
|
//.http2_initial_stream_window_size( (1 << 31) - 2)
|
|
|
|
//.http2_initial_connection_window_size( (1 << 31) - 2)
|
|
|
|
.build::<_, Body>(https)
|
2019-03-06 09:45:38 +00:00
|
|
|
}
|
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
pub fn request(&self, mut req: Request<Body>) -> impl Future<Item=Value, Error=Error> {
|
2019-01-17 10:29:38 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
let login = self.auth.listen();
|
2019-01-17 10:29:38 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
let client = self.client.clone();
|
2019-01-17 10:29:38 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
login.and_then(move |auth| {
|
2019-01-17 10:29:38 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
|
|
|
|
req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
|
|
|
|
req.headers_mut().insert("CSRFPreventionToken", HeaderValue::from_str(&auth.token).unwrap());
|
2019-01-17 10:29:38 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
let request = Self::api_request(client, req);
|
2019-01-17 10:29:38 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
request
|
|
|
|
})
|
2019-01-21 17:56:48 +00:00
|
|
|
}
|
|
|
|
|
2019-05-13 07:12:03 +00:00
|
|
|
pub fn get(&self, path: &str, data: Option<Value>) -> impl Future<Item=Value, Error=Error> {
|
2019-03-06 09:45:38 +00:00
|
|
|
|
2019-05-13 07:12:03 +00:00
|
|
|
let req = Self::request_builder(&self.server, "GET", path, data).unwrap();
|
2019-04-28 08:55:03 +00:00
|
|
|
self.request(req)
|
2019-03-06 09:45:38 +00:00
|
|
|
}
|
|
|
|
|
2019-05-13 07:12:03 +00:00
|
|
|
pub fn delete(&mut self, path: &str, data: Option<Value>) -> impl Future<Item=Value, Error=Error> {
|
2019-03-06 09:45:38 +00:00
|
|
|
|
2019-05-13 07:12:03 +00:00
|
|
|
let req = Self::request_builder(&self.server, "DELETE", path, data).unwrap();
|
2019-04-28 08:55:03 +00:00
|
|
|
self.request(req)
|
2019-03-06 09:45:38 +00:00
|
|
|
}
|
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
pub fn post(&mut self, path: &str, data: Option<Value>) -> impl Future<Item=Value, Error=Error> {
|
2019-03-13 10:56:37 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
let req = Self::request_builder(&self.server, "POST", path, data).unwrap();
|
|
|
|
self.request(req)
|
2019-03-13 10:56:37 +00:00
|
|
|
}
|
|
|
|
|
2019-06-26 07:17:13 +00:00
|
|
|
pub fn download<W: Write>(&mut self, path: &str, output: W) -> impl Future<Item=W, Error=Error> {
|
2019-03-13 10:56:37 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
let mut req = Self::request_builder(&self.server, "GET", path, None).unwrap();
|
2019-03-13 10:56:37 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
let login = self.auth.listen();
|
2019-03-13 10:56:37 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
let client = self.client.clone();
|
2019-01-21 17:56:48 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
login.and_then(move |auth| {
|
2019-02-20 13:09:55 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
|
|
|
|
req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
|
2019-03-03 10:29:00 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
client.request(req)
|
|
|
|
.map_err(Error::from)
|
|
|
|
.and_then(|resp| {
|
2019-06-25 10:43:55 +00:00
|
|
|
let status = resp.status();
|
|
|
|
if !status.is_success() {
|
|
|
|
future::Either::A(
|
|
|
|
HttpClient::api_response(resp)
|
|
|
|
.and_then(|_| { bail!("unknown error"); })
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
future::Either::B(
|
|
|
|
resp.into_body()
|
|
|
|
.map_err(Error::from)
|
2019-06-26 07:17:13 +00:00
|
|
|
.fold(output, move |mut acc, chunk| {
|
|
|
|
acc.write_all(&chunk)?;
|
|
|
|
Ok::<_, Error>(acc)
|
2019-06-25 10:43:55 +00:00
|
|
|
})
|
|
|
|
)
|
|
|
|
}
|
2019-04-28 08:55:03 +00:00
|
|
|
})
|
|
|
|
})
|
2019-03-03 10:29:00 +00:00
|
|
|
}
|
|
|
|
|
2019-07-25 10:17:35 +00:00
|
|
|
pub fn upload(
|
|
|
|
&mut self,
|
|
|
|
content_type: &str,
|
|
|
|
body: Body,
|
|
|
|
path: &str,
|
|
|
|
data: Option<Value>,
|
|
|
|
) -> impl Future<Item=Value, Error=Error> {
|
2019-02-20 13:09:55 +00:00
|
|
|
|
|
|
|
let path = path.trim_matches('/');
|
2019-07-25 10:17:35 +00:00
|
|
|
let mut url = format!("https://{}:8007/{}", &self.server, path);
|
|
|
|
|
|
|
|
if let Some(data) = data {
|
|
|
|
let query = tools::json_object_to_query(data).unwrap();
|
|
|
|
url.push('?');
|
|
|
|
url.push_str(&query);
|
|
|
|
}
|
|
|
|
|
|
|
|
let url: Uri = url.parse().unwrap();
|
2019-02-20 13:09:55 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
let req = Request::builder()
|
2019-02-20 13:09:55 +00:00
|
|
|
.method("POST")
|
|
|
|
.uri(url)
|
|
|
|
.header("User-Agent", "proxmox-backup-client/1.0")
|
2019-04-28 08:55:03 +00:00
|
|
|
.header("Content-Type", content_type)
|
|
|
|
.body(body).unwrap();
|
2019-02-20 13:09:55 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
self.request(req)
|
2019-01-21 17:56:48 +00:00
|
|
|
}
|
|
|
|
|
2019-05-22 11:05:51 +00:00
|
|
|
pub fn start_backup(
|
|
|
|
&self,
|
|
|
|
datastore: &str,
|
|
|
|
backup_type: &str,
|
|
|
|
backup_id: &str,
|
2019-07-25 11:44:01 +00:00
|
|
|
backup_time: DateTime<Utc>,
|
2019-05-29 08:17:38 +00:00
|
|
|
debug: bool,
|
2019-07-03 12:26:07 +00:00
|
|
|
) -> impl Future<Item=Arc<BackupClient>, Error=Error> {
|
2019-04-29 09:57:58 +00:00
|
|
|
|
2019-07-25 11:44:01 +00:00
|
|
|
let param = json!({
|
|
|
|
"backup-type": backup_type,
|
|
|
|
"backup-id": backup_id,
|
|
|
|
"backup-time": backup_time.timestamp(),
|
|
|
|
"store": datastore,
|
|
|
|
"debug": debug
|
|
|
|
});
|
|
|
|
|
2019-06-26 10:09:18 +00:00
|
|
|
let req = Self::request_builder(&self.server, "GET", "/api2/json/backup", Some(param)).unwrap();
|
2019-04-29 09:57:58 +00:00
|
|
|
|
2019-06-26 10:09:18 +00:00
|
|
|
self.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
|
|
|
|
.map(|(h2, canceller)| BackupClient::new(h2, canceller))
|
|
|
|
}
|
|
|
|
|
2019-06-27 07:01:41 +00:00
|
|
|
pub fn start_backup_reader(
|
|
|
|
&self,
|
|
|
|
datastore: &str,
|
|
|
|
backup_type: &str,
|
|
|
|
backup_id: &str,
|
2019-07-22 08:12:51 +00:00
|
|
|
backup_time: DateTime<Utc>,
|
2019-06-27 07:01:41 +00:00
|
|
|
debug: bool,
|
2019-07-03 12:26:07 +00:00
|
|
|
) -> impl Future<Item=Arc<BackupReader>, Error=Error> {
|
2019-06-27 07:01:41 +00:00
|
|
|
|
|
|
|
let param = json!({
|
|
|
|
"backup-type": backup_type,
|
|
|
|
"backup-id": backup_id,
|
|
|
|
"backup-time": backup_time.timestamp(),
|
|
|
|
"store": datastore,
|
|
|
|
"debug": debug,
|
|
|
|
});
|
|
|
|
let req = Self::request_builder(&self.server, "GET", "/api2/json/reader", Some(param)).unwrap();
|
|
|
|
|
|
|
|
self.start_h2_connection(req, String::from(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!()))
|
|
|
|
.map(|(h2, canceller)| BackupReader::new(h2, canceller))
|
|
|
|
}
|
|
|
|
|
2019-06-26 10:09:18 +00:00
|
|
|
pub fn start_h2_connection(
|
|
|
|
&self,
|
|
|
|
mut req: Request<Body>,
|
|
|
|
protocol_name: String,
|
|
|
|
) -> impl Future<Item=(H2Client, Canceller), Error=Error> {
|
2019-04-29 09:57:58 +00:00
|
|
|
|
2019-06-26 10:09:18 +00:00
|
|
|
let login = self.auth.listen();
|
2019-04-29 09:57:58 +00:00
|
|
|
let client = self.client.clone();
|
|
|
|
|
|
|
|
login.and_then(move |auth| {
|
|
|
|
|
|
|
|
let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET));
|
|
|
|
req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
|
2019-06-26 10:09:18 +00:00
|
|
|
req.headers_mut().insert("UPGRADE", HeaderValue::from_str(&protocol_name).unwrap());
|
2019-04-29 09:57:58 +00:00
|
|
|
|
|
|
|
client.request(req)
|
|
|
|
.map_err(Error::from)
|
|
|
|
.and_then(|resp| {
|
|
|
|
|
|
|
|
let status = resp.status();
|
|
|
|
if status != http::StatusCode::SWITCHING_PROTOCOLS {
|
2019-05-22 15:28:25 +00:00
|
|
|
future::Either::A(Self::api_response(resp).and_then(|_| { bail!("unknown error"); }))
|
|
|
|
} else {
|
|
|
|
future::Either::B(resp.into_body().on_upgrade().map_err(Error::from))
|
2019-04-29 09:57:58 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
.and_then(|upgraded| {
|
2019-06-29 09:05:36 +00:00
|
|
|
let max_window_size = (1 << 31) - 2;
|
2019-06-28 14:00:58 +00:00
|
|
|
|
|
|
|
h2::client::Builder::new()
|
2019-06-29 09:05:36 +00:00
|
|
|
.initial_connection_window_size(max_window_size)
|
|
|
|
.initial_window_size(max_window_size)
|
2019-06-28 14:00:58 +00:00
|
|
|
.max_frame_size(4*1024*1024)
|
|
|
|
.handshake(upgraded)
|
|
|
|
.map_err(Error::from)
|
2019-04-29 09:57:58 +00:00
|
|
|
})
|
|
|
|
.and_then(|(h2, connection)| {
|
|
|
|
let connection = connection
|
|
|
|
.map_err(|_| panic!("HTTP/2.0 connection failed"));
|
|
|
|
|
2019-06-05 07:45:59 +00:00
|
|
|
let (connection, canceller) = cancellable(connection)?;
|
|
|
|
// A cancellable future returns an Option which is None when cancelled and
|
|
|
|
// Some when it finished instead, since we don't care about the return type we
|
|
|
|
// need to map it away:
|
|
|
|
let connection = connection.map(|_| ());
|
|
|
|
|
2019-04-29 09:57:58 +00:00
|
|
|
// Spawn a new task to drive the connection state
|
|
|
|
hyper::rt::spawn(connection);
|
|
|
|
|
|
|
|
// Wait until the `SendRequest` handle has available capacity.
|
2019-06-05 07:45:59 +00:00
|
|
|
Ok(h2.ready()
|
2019-06-26 10:09:18 +00:00
|
|
|
.map(move |c| (H2Client::new(c), canceller))
|
|
|
|
.map_err(Error::from))
|
2019-04-29 09:57:58 +00:00
|
|
|
})
|
2019-06-05 07:45:59 +00:00
|
|
|
.flatten()
|
2019-04-29 09:57:58 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
fn credentials(
|
2019-07-02 11:33:58 +00:00
|
|
|
client: Client<hyper_openssl::HttpsConnector<hyper::client::HttpConnector>>,
|
2019-04-30 09:44:35 +00:00
|
|
|
server: String,
|
|
|
|
username: String,
|
|
|
|
password: String,
|
2019-06-07 11:10:56 +00:00
|
|
|
) -> Box<dyn Future<Item=AuthInfo, Error=Error> + Send> {
|
2019-02-27 11:12:00 +00:00
|
|
|
|
2019-04-30 09:44:35 +00:00
|
|
|
let server2 = server.clone();
|
2019-02-27 11:12:00 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
let create_request = futures::future::lazy(move || {
|
2019-04-30 09:44:35 +00:00
|
|
|
let data = json!({ "username": username, "password": password });
|
2019-04-28 08:55:03 +00:00
|
|
|
let req = Self::request_builder(&server, "POST", "/api2/json/access/ticket", Some(data)).unwrap();
|
2019-04-30 09:44:35 +00:00
|
|
|
Self::api_request(client, req)
|
2019-04-28 08:55:03 +00:00
|
|
|
});
|
2019-02-13 13:31:43 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
let login_future = create_request
|
|
|
|
.and_then(move |cred| {
|
|
|
|
let auth = AuthInfo {
|
|
|
|
username: cred["data"]["username"].as_str().unwrap().to_owned(),
|
|
|
|
ticket: cred["data"]["ticket"].as_str().unwrap().to_owned(),
|
|
|
|
token: cred["data"]["CSRFPreventionToken"].as_str().unwrap().to_owned(),
|
|
|
|
};
|
2019-02-13 13:31:43 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
let _ = store_ticket_info(&server2, &auth.username, &auth.ticket, &auth.token);
|
2019-02-13 13:31:43 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
Ok(auth)
|
|
|
|
});
|
2019-02-13 13:31:43 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
Box::new(login_future)
|
2019-03-05 11:54:44 +00:00
|
|
|
}
|
|
|
|
|
2019-05-22 11:24:33 +00:00
|
|
|
fn api_response(response: Response<Body>) -> impl Future<Item=Value, Error=Error> {
|
|
|
|
|
|
|
|
let status = response.status();
|
|
|
|
|
|
|
|
response
|
|
|
|
.into_body()
|
|
|
|
.concat2()
|
|
|
|
.map_err(Error::from)
|
|
|
|
.and_then(move |data| {
|
|
|
|
|
|
|
|
let text = String::from_utf8(data.to_vec()).unwrap();
|
|
|
|
if status.is_success() {
|
|
|
|
if text.len() > 0 {
|
|
|
|
let value: Value = serde_json::from_str(&text)?;
|
|
|
|
Ok(value)
|
|
|
|
} else {
|
|
|
|
Ok(Value::Null)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
bail!("HTTP Error {}: {}", status, text);
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
fn api_request(
|
2019-07-02 11:33:58 +00:00
|
|
|
client: Client<hyper_openssl::HttpsConnector<hyper::client::HttpConnector>>,
|
2019-04-28 08:55:03 +00:00
|
|
|
req: Request<Body>
|
|
|
|
) -> impl Future<Item=Value, Error=Error> {
|
2019-03-05 11:54:44 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
client.request(req)
|
|
|
|
.map_err(Error::from)
|
2019-05-22 11:24:33 +00:00
|
|
|
.and_then(Self::api_response)
|
2019-02-13 13:31:43 +00:00
|
|
|
}
|
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
pub fn request_builder(server: &str, method: &str, path: &str, data: Option<Value>) -> Result<Request<Body>, Error> {
|
2019-02-18 05:24:28 +00:00
|
|
|
let path = path.trim_matches('/');
|
2019-04-28 08:55:03 +00:00
|
|
|
let url: Uri = format!("https://{}:8007/{}", server, path).parse()?;
|
|
|
|
|
|
|
|
if let Some(data) = data {
|
|
|
|
if method == "POST" {
|
|
|
|
let request = Request::builder()
|
|
|
|
.method(method)
|
|
|
|
.uri(url)
|
|
|
|
.header("User-Agent", "proxmox-backup-client/1.0")
|
|
|
|
.header(hyper::header::CONTENT_TYPE, "application/json")
|
|
|
|
.body(Body::from(data.to_string()))?;
|
|
|
|
return Ok(request);
|
|
|
|
} else {
|
2019-05-13 07:12:03 +00:00
|
|
|
let query = tools::json_object_to_query(data)?;
|
|
|
|
let url: Uri = format!("https://{}:8007/{}?{}", server, path, query).parse()?;
|
|
|
|
let request = Request::builder()
|
|
|
|
.method(method)
|
|
|
|
.uri(url)
|
|
|
|
.header("User-Agent", "proxmox-backup-client/1.0")
|
|
|
|
.header(hyper::header::CONTENT_TYPE, "application/x-www-form-urlencoded")
|
|
|
|
.body(Body::empty())?;
|
|
|
|
return Ok(request);
|
2019-04-28 08:55:03 +00:00
|
|
|
}
|
|
|
|
}
|
2019-02-13 13:31:43 +00:00
|
|
|
|
2019-01-21 17:56:48 +00:00
|
|
|
let request = Request::builder()
|
2019-04-28 08:55:03 +00:00
|
|
|
.method(method)
|
2019-01-21 17:56:48 +00:00
|
|
|
.uri(url)
|
|
|
|
.header("User-Agent", "proxmox-backup-client/1.0")
|
2019-04-28 08:55:03 +00:00
|
|
|
.header(hyper::header::CONTENT_TYPE, "application/x-www-form-urlencoded")
|
|
|
|
.body(Body::empty())?;
|
2019-01-21 17:56:48 +00:00
|
|
|
|
2019-04-28 08:55:03 +00:00
|
|
|
Ok(request)
|
2019-01-17 10:29:38 +00:00
|
|
|
}
|
|
|
|
}
|
2019-05-13 08:27:22 +00:00
|
|
|
|
2019-06-27 07:01:41 +00:00
|
|
|
|
|
|
|
pub struct BackupReader {
|
|
|
|
h2: H2Client,
|
2019-07-03 12:26:07 +00:00
|
|
|
canceller: Canceller,
|
2019-06-27 07:01:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for BackupReader {
|
|
|
|
|
|
|
|
fn drop(&mut self) {
|
2019-07-03 12:26:07 +00:00
|
|
|
self.canceller.cancel();
|
2019-06-27 07:01:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl BackupReader {
|
|
|
|
|
2019-07-03 12:26:07 +00:00
|
|
|
pub fn new(h2: H2Client, canceller: Canceller) -> Arc<Self> {
|
|
|
|
Arc::new(Self { h2, canceller: canceller })
|
2019-06-27 07:01:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get(&self, path: &str, param: Option<Value>) -> impl Future<Item=Value, Error=Error> {
|
|
|
|
self.h2.get(path, param)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn put(&self, path: &str, param: Option<Value>) -> impl Future<Item=Value, Error=Error> {
|
|
|
|
self.h2.put(path, param)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn post(&self, path: &str, param: Option<Value>) -> impl Future<Item=Value, Error=Error> {
|
|
|
|
self.h2.post(path, param)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn download<W: Write>(
|
|
|
|
&self,
|
|
|
|
file_name: &str,
|
|
|
|
output: W,
|
|
|
|
) -> impl Future<Item=W, Error=Error> {
|
|
|
|
let path = "download";
|
|
|
|
let param = json!({ "file-name": file_name });
|
|
|
|
self.h2.download(path, Some(param), output)
|
|
|
|
}
|
|
|
|
|
2019-06-29 11:43:10 +00:00
|
|
|
pub fn speedtest<W: Write>(
|
|
|
|
&self,
|
|
|
|
output: W,
|
|
|
|
) -> impl Future<Item=W, Error=Error> {
|
|
|
|
self.h2.download("speedtest", None, output)
|
|
|
|
}
|
|
|
|
|
2019-06-28 14:00:58 +00:00
|
|
|
pub fn download_chunk<W: Write>(
|
|
|
|
&self,
|
|
|
|
digest: &[u8; 32],
|
|
|
|
output: W,
|
|
|
|
) -> impl Future<Item=W, Error=Error> {
|
|
|
|
let path = "chunk";
|
2019-08-03 11:05:38 +00:00
|
|
|
let param = json!({ "digest": digest_to_hex(digest) });
|
2019-06-28 14:00:58 +00:00
|
|
|
self.h2.download(path, Some(param), output)
|
|
|
|
}
|
|
|
|
|
2019-07-03 12:36:02 +00:00
|
|
|
pub fn force_close(self) {
|
2019-07-03 12:26:07 +00:00
|
|
|
self.canceller.cancel();
|
2019-06-27 07:01:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-22 11:05:51 +00:00
|
|
|
pub struct BackupClient {
|
2019-05-22 15:28:25 +00:00
|
|
|
h2: H2Client,
|
2019-07-03 12:26:07 +00:00
|
|
|
canceller: Canceller,
|
2019-05-13 08:27:22 +00:00
|
|
|
}
|
|
|
|
|
2019-06-27 07:01:41 +00:00
|
|
|
impl Drop for BackupClient {
|
|
|
|
|
|
|
|
fn drop(&mut self) {
|
2019-07-03 12:26:07 +00:00
|
|
|
self.canceller.cancel();
|
2019-06-27 07:01:41 +00:00
|
|
|
}
|
|
|
|
}
|
2019-05-23 07:42:37 +00:00
|
|
|
|
2019-08-01 10:39:02 +00:00
|
|
|
pub struct BackupStats {
|
|
|
|
pub size: u64,
|
2019-08-06 09:26:06 +00:00
|
|
|
pub csum: [u8; 32],
|
2019-08-01 10:39:02 +00:00
|
|
|
}
|
|
|
|
|
2019-05-22 11:05:51 +00:00
|
|
|
impl BackupClient {
|
2019-05-13 08:27:22 +00:00
|
|
|
|
2019-07-03 12:26:07 +00:00
|
|
|
pub fn new(h2: H2Client, canceller: Canceller) -> Arc<Self> {
|
|
|
|
Arc::new(Self { h2, canceller })
|
2019-06-05 07:45:59 +00:00
|
|
|
}
|
|
|
|
|
2019-05-13 08:27:22 +00:00
|
|
|
pub fn get(&self, path: &str, param: Option<Value>) -> impl Future<Item=Value, Error=Error> {
|
2019-05-22 15:28:25 +00:00
|
|
|
self.h2.get(path, param)
|
2019-05-13 08:27:22 +00:00
|
|
|
}
|
|
|
|
|
2019-05-20 12:19:24 +00:00
|
|
|
pub fn put(&self, path: &str, param: Option<Value>) -> impl Future<Item=Value, Error=Error> {
|
2019-05-22 15:28:25 +00:00
|
|
|
self.h2.put(path, param)
|
2019-05-20 12:19:24 +00:00
|
|
|
}
|
|
|
|
|
2019-05-13 08:27:22 +00:00
|
|
|
pub fn post(&self, path: &str, param: Option<Value>) -> impl Future<Item=Value, Error=Error> {
|
2019-05-22 15:28:25 +00:00
|
|
|
self.h2.post(path, param)
|
2019-05-14 10:58:59 +00:00
|
|
|
}
|
|
|
|
|
2019-07-03 12:26:07 +00:00
|
|
|
pub fn finish(self: Arc<Self>) -> impl Future<Item=(), Error=Error> {
|
2019-07-04 13:11:23 +00:00
|
|
|
self.h2.clone()
|
|
|
|
.post("finish", None)
|
|
|
|
.map(move |_| {
|
|
|
|
self.canceller.cancel();
|
|
|
|
})
|
2019-06-05 09:41:50 +00:00
|
|
|
}
|
|
|
|
|
2019-07-03 12:26:07 +00:00
|
|
|
pub fn force_close(self) {
|
|
|
|
self.canceller.cancel();
|
2019-05-23 04:28:10 +00:00
|
|
|
}
|
|
|
|
|
2019-06-24 11:56:37 +00:00
|
|
|
pub fn upload_blob_from_data(
|
|
|
|
&self,
|
|
|
|
data: Vec<u8>,
|
|
|
|
file_name: &str,
|
|
|
|
crypt_config: Option<Arc<CryptConfig>>,
|
|
|
|
compress: bool,
|
2019-08-02 08:00:15 +00:00
|
|
|
sign_only: bool,
|
2019-08-01 10:39:02 +00:00
|
|
|
) -> impl Future<Item=BackupStats, Error=Error> {
|
2019-06-24 11:56:37 +00:00
|
|
|
|
|
|
|
let h2 = self.h2.clone();
|
|
|
|
let file_name = file_name.to_owned();
|
2019-08-01 10:39:02 +00:00
|
|
|
let size = data.len() as u64;
|
2019-06-24 11:56:37 +00:00
|
|
|
|
|
|
|
futures::future::ok(())
|
|
|
|
.and_then(move |_| {
|
|
|
|
let blob = if let Some(ref crypt_config) = crypt_config {
|
2019-08-02 08:00:15 +00:00
|
|
|
if sign_only {
|
|
|
|
DataBlob::create_signed(&data, crypt_config, compress)?
|
|
|
|
} else {
|
|
|
|
DataBlob::encode(&data, Some(crypt_config), compress)?
|
|
|
|
}
|
2019-06-24 11:56:37 +00:00
|
|
|
} else {
|
|
|
|
DataBlob::encode(&data, None, compress)?
|
|
|
|
};
|
|
|
|
|
|
|
|
let raw_data = blob.into_inner();
|
|
|
|
Ok(raw_data)
|
|
|
|
})
|
|
|
|
.and_then(move |raw_data| {
|
2019-08-06 09:26:06 +00:00
|
|
|
let csum = openssl::sha::sha256(&raw_data);
|
2019-06-24 11:56:37 +00:00
|
|
|
let param = json!({"encoded-size": raw_data.len(), "file-name": file_name });
|
|
|
|
h2.upload("blob", Some(param), raw_data)
|
2019-08-01 10:39:02 +00:00
|
|
|
.map(move |_| {
|
2019-08-06 09:26:06 +00:00
|
|
|
BackupStats { size, csum }
|
2019-08-01 10:39:02 +00:00
|
|
|
})
|
2019-06-24 11:56:37 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn upload_blob_from_file<P: AsRef<std::path::Path>>(
|
2019-06-03 05:46:49 +00:00
|
|
|
&self,
|
2019-06-03 08:39:44 +00:00
|
|
|
src_path: P,
|
2019-06-03 05:46:49 +00:00
|
|
|
file_name: &str,
|
2019-06-23 07:48:23 +00:00
|
|
|
crypt_config: Option<Arc<CryptConfig>>,
|
|
|
|
compress: bool,
|
2019-08-01 10:39:02 +00:00
|
|
|
) -> impl Future<Item=BackupStats, Error=Error> {
|
2019-06-03 05:46:49 +00:00
|
|
|
|
|
|
|
let h2 = self.h2.clone();
|
|
|
|
let file_name = file_name.to_owned();
|
2019-06-03 08:39:44 +00:00
|
|
|
let src_path = src_path.as_ref().to_owned();
|
2019-06-03 05:46:49 +00:00
|
|
|
|
2019-06-03 08:53:09 +00:00
|
|
|
let task = tokio::fs::File::open(src_path.clone())
|
|
|
|
.map_err(move |err| format_err!("unable to open file {:?} - {}", src_path, err))
|
2019-06-23 07:48:23 +00:00
|
|
|
.and_then(move |file| {
|
2019-06-03 05:46:49 +00:00
|
|
|
let contents = vec![];
|
|
|
|
tokio::io::read_to_end(file, contents)
|
|
|
|
.map_err(Error::from)
|
|
|
|
.and_then(move |(_, contents)| {
|
2019-06-23 07:48:23 +00:00
|
|
|
let blob = if let Some(ref crypt_config) = crypt_config {
|
|
|
|
DataBlob::encode(&contents, Some(crypt_config), compress)?
|
|
|
|
} else {
|
|
|
|
DataBlob::encode(&contents, None, compress)?
|
|
|
|
};
|
|
|
|
let raw_data = blob.into_inner();
|
2019-08-01 10:39:02 +00:00
|
|
|
Ok((raw_data, contents.len()))
|
2019-06-23 07:48:23 +00:00
|
|
|
})
|
2019-08-01 10:39:02 +00:00
|
|
|
.and_then(move |(raw_data, size)| {
|
2019-08-06 09:26:06 +00:00
|
|
|
let csum = openssl::sha::sha256(&raw_data);
|
2019-06-23 07:48:23 +00:00
|
|
|
let param = json!({"encoded-size": raw_data.len(), "file-name": file_name });
|
|
|
|
h2.upload("blob", Some(param), raw_data)
|
2019-08-01 10:39:02 +00:00
|
|
|
.map(move |_| {
|
2019-08-06 09:26:06 +00:00
|
|
|
BackupStats { size: size as u64, csum }
|
2019-08-01 10:39:02 +00:00
|
|
|
})
|
2019-06-03 05:46:49 +00:00
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
task
|
|
|
|
}
|
|
|
|
|
2019-05-28 04:18:55 +00:00
|
|
|
pub fn upload_stream(
|
2019-05-23 04:28:10 +00:00
|
|
|
&self,
|
|
|
|
archive_name: &str,
|
|
|
|
stream: impl Stream<Item=bytes::BytesMut, Error=Error>,
|
2019-05-28 04:18:55 +00:00
|
|
|
prefix: &str,
|
|
|
|
fixed_size: Option<u64>,
|
2019-06-13 09:47:23 +00:00
|
|
|
crypt_config: Option<Arc<CryptConfig>>,
|
2019-08-01 10:39:02 +00:00
|
|
|
) -> impl Future<Item=BackupStats, Error=Error> {
|
2019-05-23 04:28:10 +00:00
|
|
|
|
|
|
|
let known_chunks = Arc::new(Mutex::new(HashSet::new()));
|
|
|
|
|
|
|
|
let h2 = self.h2.clone();
|
|
|
|
let h2_2 = self.h2.clone();
|
|
|
|
let h2_3 = self.h2.clone();
|
|
|
|
let h2_4 = self.h2.clone();
|
|
|
|
|
2019-05-28 04:18:55 +00:00
|
|
|
let mut param = json!({ "archive-name": archive_name });
|
|
|
|
if let Some(size) = fixed_size {
|
|
|
|
param["size"] = size.into();
|
|
|
|
}
|
|
|
|
|
|
|
|
let index_path = format!("{}_index", prefix);
|
|
|
|
let close_path = format!("{}_close", prefix);
|
2019-05-23 04:28:10 +00:00
|
|
|
|
2019-05-30 06:10:06 +00:00
|
|
|
let prefix = prefix.to_owned();
|
|
|
|
|
2019-05-28 04:18:55 +00:00
|
|
|
Self::download_chunk_list(h2, &index_path, archive_name, known_chunks.clone())
|
2019-05-23 04:28:10 +00:00
|
|
|
.and_then(move |_| {
|
2019-05-28 04:18:55 +00:00
|
|
|
h2_2.post(&index_path, Some(param))
|
2019-05-23 04:28:10 +00:00
|
|
|
})
|
|
|
|
.and_then(move |res| {
|
|
|
|
let wid = res.as_u64().unwrap();
|
2019-06-13 09:47:23 +00:00
|
|
|
Self::upload_chunk_info_stream(h2_3, wid, stream, &prefix, known_chunks.clone(), crypt_config)
|
2019-08-01 10:39:02 +00:00
|
|
|
.and_then(move |(chunk_count, size, _speed)| {
|
2019-05-23 06:50:36 +00:00
|
|
|
let param = json!({
|
|
|
|
"wid": wid ,
|
|
|
|
"chunk-count": chunk_count,
|
|
|
|
"size": size,
|
|
|
|
});
|
2019-05-28 04:18:55 +00:00
|
|
|
h2_4.post(&close_path, Some(param))
|
2019-08-01 10:39:02 +00:00
|
|
|
.map(move |_| {
|
2019-08-06 09:26:06 +00:00
|
|
|
BackupStats { size: size as u64, csum: [0u8; 32] }
|
2019-08-01 10:39:02 +00:00
|
|
|
})
|
|
|
|
})
|
2019-05-23 04:28:10 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-05-22 11:05:51 +00:00
|
|
|
fn response_queue() -> (
|
2019-05-20 12:19:24 +00:00
|
|
|
mpsc::Sender<h2::client::ResponseFuture>,
|
|
|
|
sync::oneshot::Receiver<Result<(), Error>>
|
|
|
|
) {
|
|
|
|
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
|
|
|
|
let (verify_result_tx, verify_result_rx) = sync::oneshot::channel();
|
2019-05-16 08:24:23 +00:00
|
|
|
|
2019-05-20 12:19:24 +00:00
|
|
|
hyper::rt::spawn(
|
|
|
|
verify_queue_rx
|
|
|
|
.map_err(Error::from)
|
|
|
|
.for_each(|response: h2::client::ResponseFuture| {
|
|
|
|
response
|
|
|
|
.map_err(Error::from)
|
2019-05-22 15:28:25 +00:00
|
|
|
.and_then(H2Client::h2api_response)
|
2019-05-20 12:19:24 +00:00
|
|
|
.and_then(|result| {
|
|
|
|
println!("RESPONSE: {:?}", result);
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
.map_err(|err| format_err!("pipelined request failed: {}", err))
|
|
|
|
})
|
|
|
|
.then(|result|
|
|
|
|
verify_result_tx.send(result)
|
|
|
|
)
|
|
|
|
.map_err(|_| { /* ignore closed channel */ })
|
|
|
|
);
|
2019-05-16 08:24:23 +00:00
|
|
|
|
2019-05-20 12:19:24 +00:00
|
|
|
(verify_queue_tx, verify_result_rx)
|
|
|
|
}
|
|
|
|
|
2019-05-30 06:10:06 +00:00
|
|
|
fn append_chunk_queue(h2: H2Client, wid: u64, path: String) -> (
|
2019-05-27 05:24:32 +00:00
|
|
|
mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>,
|
2019-05-26 07:57:34 +00:00
|
|
|
sync::oneshot::Receiver<Result<(), Error>>
|
|
|
|
) {
|
2019-05-27 05:40:50 +00:00
|
|
|
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(64);
|
2019-05-26 07:57:34 +00:00
|
|
|
let (verify_result_tx, verify_result_rx) = sync::oneshot::channel();
|
|
|
|
|
|
|
|
let h2_2 = h2.clone();
|
|
|
|
|
|
|
|
hyper::rt::spawn(
|
|
|
|
verify_queue_rx
|
|
|
|
.map_err(Error::from)
|
2019-05-27 05:24:32 +00:00
|
|
|
.and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<h2::client::ResponseFuture>)| {
|
|
|
|
match (response, merged_chunk_info) {
|
|
|
|
(Some(response), MergedChunkInfo::Known(list)) => {
|
2019-05-26 07:57:34 +00:00
|
|
|
future::Either::A(
|
2019-05-27 05:24:32 +00:00
|
|
|
response
|
|
|
|
.map_err(Error::from)
|
|
|
|
.and_then(H2Client::h2api_response)
|
2019-05-27 05:40:50 +00:00
|
|
|
.and_then(move |_result| {
|
2019-05-27 05:24:32 +00:00
|
|
|
Ok(MergedChunkInfo::Known(list))
|
2019-05-26 07:57:34 +00:00
|
|
|
})
|
|
|
|
)
|
|
|
|
}
|
2019-05-27 05:24:32 +00:00
|
|
|
(None, MergedChunkInfo::Known(list)) => {
|
2019-05-26 07:57:34 +00:00
|
|
|
future::Either::B(future::ok(MergedChunkInfo::Known(list)))
|
|
|
|
}
|
2019-05-27 05:24:32 +00:00
|
|
|
_ => unreachable!(),
|
2019-05-26 07:57:34 +00:00
|
|
|
}
|
|
|
|
})
|
2019-05-26 08:52:56 +00:00
|
|
|
.merge_known_chunks()
|
2019-05-26 07:57:34 +00:00
|
|
|
.and_then(move |merged_chunk_info| {
|
|
|
|
match merged_chunk_info {
|
|
|
|
MergedChunkInfo::Known(chunk_list) => {
|
|
|
|
let mut digest_list = vec![];
|
|
|
|
let mut offset_list = vec![];
|
|
|
|
for (offset, digest) in chunk_list {
|
2019-06-14 09:40:04 +00:00
|
|
|
//println!("append chunk {} (offset {})", proxmox::tools::digest_to_hex(&digest), offset);
|
2019-08-03 11:05:38 +00:00
|
|
|
digest_list.push(digest_to_hex(&digest));
|
2019-05-26 07:57:34 +00:00
|
|
|
offset_list.push(offset);
|
|
|
|
}
|
|
|
|
println!("append chunks list len ({})", digest_list.len());
|
|
|
|
let param = json!({ "wid": wid, "digest-list": digest_list, "offset-list": offset_list });
|
2019-05-28 04:18:55 +00:00
|
|
|
let mut request = H2Client::request_builder("localhost", "PUT", &path, None).unwrap();
|
2019-05-26 07:57:34 +00:00
|
|
|
request.headers_mut().insert(hyper::header::CONTENT_TYPE, HeaderValue::from_static("application/json"));
|
|
|
|
let param_data = bytes::Bytes::from(param.to_string().as_bytes());
|
|
|
|
let upload_data = Some(param_data);
|
|
|
|
h2_2.send_request(request, upload_data)
|
|
|
|
.and_then(move |response| {
|
|
|
|
response
|
|
|
|
.map_err(Error::from)
|
|
|
|
.and_then(H2Client::h2api_response)
|
|
|
|
.and_then(|_| Ok(()))
|
|
|
|
})
|
|
|
|
.map_err(|err| format_err!("pipelined request failed: {}", err))
|
|
|
|
}
|
|
|
|
_ => unreachable!(),
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.for_each(|_| Ok(()))
|
|
|
|
.then(|result|
|
|
|
|
verify_result_tx.send(result)
|
|
|
|
)
|
|
|
|
.map_err(|_| { /* ignore closed channel */ })
|
|
|
|
);
|
|
|
|
|
|
|
|
(verify_queue_tx, verify_result_rx)
|
|
|
|
}
|
|
|
|
|
2019-05-22 11:05:51 +00:00
|
|
|
fn download_chunk_list(
|
2019-05-22 15:28:25 +00:00
|
|
|
h2: H2Client,
|
2019-05-22 07:46:02 +00:00
|
|
|
path: &str,
|
|
|
|
archive_name: &str,
|
|
|
|
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
|
|
|
) -> impl Future<Item=(), Error=Error> {
|
|
|
|
|
|
|
|
let param = json!({ "archive-name": archive_name });
|
2019-05-22 15:28:25 +00:00
|
|
|
let request = H2Client::request_builder("localhost", "GET", path, Some(param)).unwrap();
|
2019-05-22 07:46:02 +00:00
|
|
|
|
2019-05-22 15:28:25 +00:00
|
|
|
h2.send_request(request, None)
|
2019-05-22 07:46:02 +00:00
|
|
|
.and_then(move |response| {
|
|
|
|
response
|
|
|
|
.map_err(Error::from)
|
|
|
|
.and_then(move |resp| {
|
|
|
|
let status = resp.status();
|
2019-05-27 07:35:30 +00:00
|
|
|
|
2019-05-22 07:46:02 +00:00
|
|
|
if !status.is_success() {
|
2019-05-27 07:35:30 +00:00
|
|
|
future::Either::A(H2Client::h2api_response(resp).and_then(|_| { bail!("unknown error"); }))
|
|
|
|
} else {
|
|
|
|
future::Either::B(future::ok(resp.into_body()))
|
2019-05-22 07:46:02 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
.and_then(move |mut body| {
|
|
|
|
|
|
|
|
let mut release_capacity = body.release_capacity().clone();
|
|
|
|
|
2019-06-05 06:41:20 +00:00
|
|
|
DigestListDecoder::new(body.map_err(Error::from))
|
2019-05-22 07:46:02 +00:00
|
|
|
.for_each(move |chunk| {
|
|
|
|
let _ = release_capacity.release_capacity(chunk.len());
|
2019-08-03 11:05:38 +00:00
|
|
|
println!("GOT DOWNLOAD {}", digest_to_hex(&chunk));
|
2019-05-22 07:46:02 +00:00
|
|
|
known_chunks.lock().unwrap().insert(chunk);
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-05-28 04:18:55 +00:00
|
|
|
fn upload_chunk_info_stream(
|
2019-05-22 15:28:25 +00:00
|
|
|
h2: H2Client,
|
2019-05-20 12:19:24 +00:00
|
|
|
wid: u64,
|
2019-06-13 09:47:23 +00:00
|
|
|
stream: impl Stream<Item=bytes::BytesMut, Error=Error>,
|
2019-05-30 06:10:06 +00:00
|
|
|
prefix: &str,
|
2019-05-22 07:46:02 +00:00
|
|
|
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
2019-06-13 09:47:23 +00:00
|
|
|
crypt_config: Option<Arc<CryptConfig>>,
|
2019-05-23 06:50:36 +00:00
|
|
|
) -> impl Future<Item=(usize, usize, usize), Error=Error> {
|
2019-05-16 08:24:23 +00:00
|
|
|
|
2019-05-20 12:19:24 +00:00
|
|
|
let repeat = std::sync::Arc::new(AtomicUsize::new(0));
|
|
|
|
let repeat2 = repeat.clone();
|
2019-05-16 08:24:23 +00:00
|
|
|
|
2019-05-20 12:19:24 +00:00
|
|
|
let stream_len = std::sync::Arc::new(AtomicUsize::new(0));
|
|
|
|
let stream_len2 = stream_len.clone();
|
2019-05-17 09:51:14 +00:00
|
|
|
|
2019-05-30 06:10:06 +00:00
|
|
|
let append_chunk_path = format!("{}_index", prefix);
|
|
|
|
let upload_chunk_path = format!("{}_chunk", prefix);
|
|
|
|
|
|
|
|
let (upload_queue, upload_result) = Self::append_chunk_queue(h2.clone(), wid, append_chunk_path.to_owned());
|
2019-05-17 09:51:14 +00:00
|
|
|
|
2019-05-20 12:19:24 +00:00
|
|
|
let start_time = std::time::Instant::now();
|
2019-05-17 09:51:14 +00:00
|
|
|
|
2019-05-20 12:19:24 +00:00
|
|
|
stream
|
2019-06-13 09:47:23 +00:00
|
|
|
.and_then(move |data| {
|
|
|
|
|
|
|
|
let chunk_len = data.len();
|
|
|
|
|
2019-05-20 12:19:24 +00:00
|
|
|
repeat.fetch_add(1, Ordering::SeqCst);
|
2019-06-13 09:47:23 +00:00
|
|
|
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
|
|
|
|
|
|
|
|
let mut chunk_builder = DataChunkBuilder::new(data.as_ref())
|
|
|
|
.compress(true);
|
|
|
|
|
|
|
|
if let Some(ref crypt_config) = crypt_config {
|
|
|
|
chunk_builder = chunk_builder.crypt_config(crypt_config);
|
|
|
|
}
|
2019-05-26 08:52:56 +00:00
|
|
|
|
|
|
|
let mut known_chunks = known_chunks.lock().unwrap();
|
2019-06-13 09:47:23 +00:00
|
|
|
let digest = chunk_builder.digest();
|
|
|
|
let chunk_is_known = known_chunks.contains(digest);
|
2019-05-26 08:52:56 +00:00
|
|
|
if chunk_is_known {
|
2019-06-13 09:47:23 +00:00
|
|
|
Ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
|
2019-05-26 08:52:56 +00:00
|
|
|
} else {
|
2019-06-13 09:47:23 +00:00
|
|
|
known_chunks.insert(*digest);
|
|
|
|
let chunk = chunk_builder.build()?;
|
|
|
|
Ok(MergedChunkInfo::New(ChunkInfo { chunk, chunk_len: chunk_len as u64, offset }))
|
2019-05-26 08:52:56 +00:00
|
|
|
}
|
2019-05-23 10:29:33 +00:00
|
|
|
})
|
2019-05-26 08:52:56 +00:00
|
|
|
.merge_known_chunks()
|
2019-05-23 10:29:33 +00:00
|
|
|
.for_each(move |merged_chunk_info| {
|
2019-05-27 05:24:32 +00:00
|
|
|
|
|
|
|
if let MergedChunkInfo::New(chunk_info) = merged_chunk_info {
|
|
|
|
let offset = chunk_info.offset;
|
2019-06-13 09:47:23 +00:00
|
|
|
let digest = *chunk_info.chunk.digest();
|
2019-08-03 11:05:38 +00:00
|
|
|
let digest_str = digest_to_hex(&digest);
|
2019-05-27 05:24:32 +00:00
|
|
|
let upload_queue = upload_queue.clone();
|
|
|
|
|
2019-06-13 09:47:23 +00:00
|
|
|
println!("upload new chunk {} ({} bytes, offset {})", digest_str,
|
|
|
|
chunk_info.chunk_len, offset);
|
|
|
|
|
|
|
|
let chunk_data = chunk_info.chunk.raw_data();
|
|
|
|
let param = json!({
|
|
|
|
"wid": wid,
|
|
|
|
"digest": digest_str,
|
|
|
|
"size": chunk_info.chunk_len,
|
|
|
|
"encoded-size": chunk_data.len(),
|
|
|
|
});
|
2019-05-27 05:24:32 +00:00
|
|
|
|
2019-05-30 06:10:06 +00:00
|
|
|
let request = H2Client::request_builder("localhost", "POST", &upload_chunk_path, Some(param)).unwrap();
|
2019-06-13 09:47:23 +00:00
|
|
|
let upload_data = Some(bytes::Bytes::from(chunk_data));
|
2019-05-27 05:24:32 +00:00
|
|
|
|
2019-05-28 06:53:27 +00:00
|
|
|
let new_info = MergedChunkInfo::Known(vec![(offset, digest)]);
|
2019-05-27 05:24:32 +00:00
|
|
|
|
|
|
|
future::Either::A(
|
|
|
|
h2.send_request(request, upload_data)
|
|
|
|
.and_then(move |response| {
|
|
|
|
upload_queue.clone().send((new_info, Some(response)))
|
|
|
|
.map(|_| ()).map_err(Error::from)
|
|
|
|
})
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
|
|
|
|
future::Either::B(
|
|
|
|
upload_queue.clone().send((merged_chunk_info, None))
|
|
|
|
.map(|_| ()).map_err(Error::from)
|
|
|
|
)
|
|
|
|
}
|
2019-05-20 12:19:24 +00:00
|
|
|
})
|
|
|
|
.then(move |result| {
|
2019-07-24 07:33:52 +00:00
|
|
|
//println!("RESULT {:?}", result);
|
2019-05-20 12:19:24 +00:00
|
|
|
upload_result.map_err(Error::from).and_then(|upload1_result| {
|
|
|
|
Ok(upload1_result.and(result))
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.flatten()
|
|
|
|
.and_then(move |_| {
|
|
|
|
let repeat = repeat2.load(Ordering::SeqCst);
|
|
|
|
let stream_len = stream_len2.load(Ordering::SeqCst);
|
|
|
|
let speed = ((stream_len*1000000)/(1024*1024))/(start_time.elapsed().as_micros() as usize);
|
|
|
|
println!("Uploaded {} chunks in {} seconds ({} MB/s).", repeat, start_time.elapsed().as_secs(), speed);
|
|
|
|
if repeat > 0 {
|
|
|
|
println!("Average chunk size was {} bytes.", stream_len/repeat);
|
|
|
|
println!("Time per request: {} microseconds.", (start_time.elapsed().as_micros())/(repeat as u128));
|
|
|
|
}
|
2019-05-23 06:50:36 +00:00
|
|
|
Ok((repeat, stream_len, speed))
|
2019-05-20 12:19:24 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn upload_speedtest(&self) -> impl Future<Item=usize, Error=Error> {
|
|
|
|
|
|
|
|
let mut data = vec![];
|
|
|
|
// generate pseudo random byte sequence
|
|
|
|
for i in 0..1024*1024 {
|
|
|
|
for j in 0..4 {
|
|
|
|
let byte = ((i >> (j<<3))&0xff) as u8;
|
|
|
|
data.push(byte);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let item_len = data.len();
|
|
|
|
|
|
|
|
let repeat = std::sync::Arc::new(AtomicUsize::new(0));
|
|
|
|
let repeat2 = repeat.clone();
|
|
|
|
|
2019-05-22 11:05:51 +00:00
|
|
|
let (upload_queue, upload_result) = Self::response_queue();
|
2019-05-20 12:19:24 +00:00
|
|
|
|
|
|
|
let start_time = std::time::Instant::now();
|
|
|
|
|
2019-05-22 11:05:51 +00:00
|
|
|
let h2 = self.h2.clone();
|
2019-05-20 12:19:24 +00:00
|
|
|
|
|
|
|
futures::stream::repeat(data)
|
|
|
|
.take_while(move |_| {
|
|
|
|
repeat.fetch_add(1, Ordering::SeqCst);
|
|
|
|
Ok(start_time.elapsed().as_secs() < 5)
|
|
|
|
})
|
|
|
|
.for_each(move |data| {
|
2019-05-22 11:05:51 +00:00
|
|
|
let h2 = h2.clone();
|
2019-05-20 12:19:24 +00:00
|
|
|
|
|
|
|
let upload_queue = upload_queue.clone();
|
|
|
|
|
|
|
|
println!("send test data ({} bytes)", data.len());
|
2019-05-22 15:28:25 +00:00
|
|
|
let request = H2Client::request_builder("localhost", "POST", "speedtest", None).unwrap();
|
|
|
|
h2.send_request(request, Some(bytes::Bytes::from(data)))
|
2019-05-20 12:19:24 +00:00
|
|
|
.and_then(move |response| {
|
|
|
|
upload_queue.send(response)
|
|
|
|
.map(|_| ()).map_err(Error::from)
|
2019-05-16 08:24:23 +00:00
|
|
|
})
|
|
|
|
})
|
2019-05-20 12:19:24 +00:00
|
|
|
.then(move |result| {
|
|
|
|
println!("RESULT {:?}", result);
|
|
|
|
upload_result.map_err(Error::from).and_then(|upload1_result| {
|
|
|
|
Ok(upload1_result.and(result))
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.flatten()
|
|
|
|
.and_then(move |_| {
|
|
|
|
let repeat = repeat2.load(Ordering::SeqCst);
|
|
|
|
println!("Uploaded {} chunks in {} seconds.", repeat, start_time.elapsed().as_secs());
|
|
|
|
let speed = ((item_len*1000000*(repeat as usize))/(1024*1024))/(start_time.elapsed().as_micros() as usize);
|
|
|
|
if repeat > 0 {
|
|
|
|
println!("Time per request: {} microseconds.", (start_time.elapsed().as_micros())/(repeat as u128));
|
|
|
|
}
|
|
|
|
Ok(speed)
|
|
|
|
})
|
2019-05-16 08:24:23 +00:00
|
|
|
}
|
2019-05-22 15:28:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct H2Client {
|
|
|
|
h2: h2::client::SendRequest<bytes::Bytes>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl H2Client {
|
|
|
|
|
|
|
|
pub fn new(h2: h2::client::SendRequest<bytes::Bytes>) -> Self {
|
|
|
|
Self { h2 }
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get(&self, path: &str, param: Option<Value>) -> impl Future<Item=Value, Error=Error> {
|
|
|
|
let req = Self::request_builder("localhost", "GET", path, param).unwrap();
|
|
|
|
self.request(req)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn put(&self, path: &str, param: Option<Value>) -> impl Future<Item=Value, Error=Error> {
|
|
|
|
let req = Self::request_builder("localhost", "PUT", path, param).unwrap();
|
|
|
|
self.request(req)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn post(&self, path: &str, param: Option<Value>) -> impl Future<Item=Value, Error=Error> {
|
|
|
|
let req = Self::request_builder("localhost", "POST", path, param).unwrap();
|
|
|
|
self.request(req)
|
|
|
|
}
|
|
|
|
|
2019-06-27 07:01:41 +00:00
|
|
|
pub fn download<W: Write>(&self, path: &str, param: Option<Value>, output: W) -> impl Future<Item=W, Error=Error> {
|
|
|
|
let request = Self::request_builder("localhost", "GET", path, param).unwrap();
|
|
|
|
|
|
|
|
self.send_request(request, None)
|
|
|
|
.and_then(move |response| {
|
|
|
|
response
|
|
|
|
.map_err(Error::from)
|
|
|
|
.and_then(move |resp| {
|
|
|
|
let status = resp.status();
|
|
|
|
if !status.is_success() {
|
|
|
|
future::Either::A(
|
|
|
|
H2Client::h2api_response(resp)
|
|
|
|
.and_then(|_| { bail!("unknown error"); })
|
|
|
|
)
|
|
|
|
} else {
|
2019-06-28 05:02:43 +00:00
|
|
|
let mut body = resp.into_body();
|
|
|
|
let mut release_capacity = body.release_capacity().clone();
|
|
|
|
|
2019-06-27 07:01:41 +00:00
|
|
|
future::Either::B(
|
2019-06-28 05:02:43 +00:00
|
|
|
body
|
2019-06-27 07:01:41 +00:00
|
|
|
.map_err(Error::from)
|
|
|
|
.fold(output, move |mut acc, chunk| {
|
2019-06-28 05:02:43 +00:00
|
|
|
let _ = release_capacity.release_capacity(chunk.len());
|
2019-06-27 07:01:41 +00:00
|
|
|
acc.write_all(&chunk)?;
|
|
|
|
Ok::<_, Error>(acc)
|
|
|
|
})
|
|
|
|
)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-05-22 15:28:25 +00:00
|
|
|
pub fn upload(&self, path: &str, param: Option<Value>, data: Vec<u8>) -> impl Future<Item=Value, Error=Error> {
|
|
|
|
let request = Self::request_builder("localhost", "POST", path, param).unwrap();
|
|
|
|
|
|
|
|
self.h2.clone()
|
|
|
|
.ready()
|
|
|
|
.map_err(Error::from)
|
|
|
|
.and_then(move |mut send_request| {
|
|
|
|
let (response, stream) = send_request.send_request(request, false).unwrap();
|
|
|
|
PipeToSendStream::new(bytes::Bytes::from(data), stream)
|
|
|
|
.and_then(|_| {
|
|
|
|
response
|
|
|
|
.map_err(Error::from)
|
|
|
|
.and_then(Self::h2api_response)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2019-05-16 08:24:23 +00:00
|
|
|
|
2019-05-13 08:27:22 +00:00
|
|
|
fn request(
|
2019-05-22 15:28:25 +00:00
|
|
|
&self,
|
2019-05-13 08:27:22 +00:00
|
|
|
request: Request<()>,
|
|
|
|
) -> impl Future<Item=Value, Error=Error> {
|
|
|
|
|
2019-05-22 15:28:25 +00:00
|
|
|
self.send_request(request, None)
|
2019-05-20 12:19:24 +00:00
|
|
|
.and_then(move |response| {
|
|
|
|
response
|
|
|
|
.map_err(Error::from)
|
|
|
|
.and_then(Self::h2api_response)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
fn send_request(
|
2019-05-22 15:28:25 +00:00
|
|
|
&self,
|
2019-05-20 12:19:24 +00:00
|
|
|
request: Request<()>,
|
|
|
|
data: Option<bytes::Bytes>,
|
|
|
|
) -> impl Future<Item=h2::client::ResponseFuture, Error=Error> {
|
|
|
|
|
2019-05-22 15:28:25 +00:00
|
|
|
self.h2.clone()
|
2019-05-13 10:11:18 +00:00
|
|
|
.ready()
|
|
|
|
.map_err(Error::from)
|
|
|
|
.and_then(move |mut send_request| {
|
2019-05-20 12:19:24 +00:00
|
|
|
if let Some(data) = data {
|
|
|
|
let (response, stream) = send_request.send_request(request, false).unwrap();
|
2019-06-13 09:47:23 +00:00
|
|
|
future::Either::A(PipeToSendStream::new(data, stream)
|
2019-05-20 12:19:24 +00:00
|
|
|
.and_then(move |_| {
|
|
|
|
future::ok(response)
|
|
|
|
}))
|
|
|
|
} else {
|
|
|
|
let (response, _stream) = send_request.send_request(request, true).unwrap();
|
|
|
|
future::Either::B(future::ok(response))
|
|
|
|
}
|
2019-05-13 08:27:22 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
fn h2api_response(response: Response<h2::RecvStream>) -> impl Future<Item=Value, Error=Error> {
|
|
|
|
|
|
|
|
let status = response.status();
|
|
|
|
|
|
|
|
let (_head, mut body) = response.into_parts();
|
|
|
|
|
|
|
|
// The `release_capacity` handle allows the caller to manage
|
|
|
|
// flow control.
|
|
|
|
//
|
|
|
|
// Whenever data is received, the caller is responsible for
|
|
|
|
// releasing capacity back to the server once it has freed
|
|
|
|
// the data from memory.
|
|
|
|
let mut release_capacity = body.release_capacity().clone();
|
|
|
|
|
|
|
|
body
|
|
|
|
.map(move |chunk| {
|
|
|
|
// Let the server send more data.
|
|
|
|
let _ = release_capacity.release_capacity(chunk.len());
|
|
|
|
chunk
|
|
|
|
})
|
|
|
|
.concat2()
|
|
|
|
.map_err(Error::from)
|
|
|
|
.and_then(move |data| {
|
|
|
|
let text = String::from_utf8(data.to_vec()).unwrap();
|
|
|
|
if status.is_success() {
|
|
|
|
if text.len() > 0 {
|
|
|
|
let mut value: Value = serde_json::from_str(&text)?;
|
|
|
|
if let Some(map) = value.as_object_mut() {
|
|
|
|
if let Some(data) = map.remove("data") {
|
|
|
|
return Ok(data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bail!("got result without data property");
|
|
|
|
} else {
|
|
|
|
Ok(Value::Null)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
bail!("HTTP Error {}: {}", status, text);
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-05-24 06:32:55 +00:00
|
|
|
// Note: We always encode parameters with the url
|
2019-05-13 08:27:22 +00:00
|
|
|
pub fn request_builder(server: &str, method: &str, path: &str, data: Option<Value>) -> Result<Request<()>, Error> {
|
|
|
|
let path = path.trim_matches('/');
|
|
|
|
|
|
|
|
if let Some(data) = data {
|
|
|
|
let query = tools::json_object_to_query(data)?;
|
2019-05-24 06:32:55 +00:00
|
|
|
// We detected problem with hyper around 6000 characters - seo we try to keep on the safe side
|
|
|
|
if query.len() > 4096 { bail!("h2 query data too large ({} bytes) - please encode data inside body", query.len()); }
|
2019-05-13 08:27:22 +00:00
|
|
|
let url: Uri = format!("https://{}:8007/{}?{}", server, path, query).parse()?;
|
2019-05-24 06:32:55 +00:00
|
|
|
let request = Request::builder()
|
2019-05-13 08:27:22 +00:00
|
|
|
.method(method)
|
|
|
|
.uri(url)
|
|
|
|
.header("User-Agent", "proxmox-backup-client/1.0")
|
|
|
|
.header(hyper::header::CONTENT_TYPE, "application/x-www-form-urlencoded")
|
|
|
|
.body(())?;
|
|
|
|
return Ok(request);
|
2019-05-24 06:32:55 +00:00
|
|
|
} else {
|
|
|
|
let url: Uri = format!("https://{}:8007/{}", server, path).parse()?;
|
|
|
|
let request = Request::builder()
|
|
|
|
.method(method)
|
|
|
|
.uri(url)
|
|
|
|
.header("User-Agent", "proxmox-backup-client/1.0")
|
|
|
|
.header(hyper::header::CONTENT_TYPE, "application/x-www-form-urlencoded")
|
|
|
|
.body(())?;
|
2019-05-13 08:27:22 +00:00
|
|
|
|
2019-05-24 06:32:55 +00:00
|
|
|
Ok(request)
|
|
|
|
}
|
2019-05-13 08:27:22 +00:00
|
|
|
}
|
|
|
|
}
|