src/api2/backup.rs: new required backup-time parameter

The client should pass the time as parameter.
This commit is contained in:
Dietmar Maurer 2019-07-25 13:44:01 +02:00
parent e128d4e84f
commit ca5d0b61ca
4 changed files with 42 additions and 12 deletions

View File

@ -7,7 +7,6 @@ use futures::*;
use hyper::header::{HeaderValue, UPGRADE};
use hyper::{Body, Response, StatusCode};
use hyper::http::request::Parts;
use chrono::{Local, TimeZone};
use serde_json::{json, Value};
@ -38,6 +37,8 @@ pub fn api_method_upgrade_backup() -> ApiAsyncMethod {
.required("backup-type", StringSchema::new("Backup type.")
.format(Arc::new(ApiStringFormat::Enum(&["vm", "ct", "host"]))))
.required("backup-id", StringSchema::new("Backup ID."))
.required("backup-time", IntegerSchema::new("Backup time (Unix epoch.)")
.minimum(1547797308))
.optional("debug", BooleanSchema::new("Enable verbose debug logging."))
)
}
@ -57,7 +58,7 @@ fn upgrade_to_backup_protocol(
let backup_type = tools::required_string_param(&param, "backup-type")?;
let backup_id = tools::required_string_param(&param, "backup-id")?;
let backup_time = Local.timestamp(Local::now().timestamp(), 0);
let backup_time = tools::required_integer_param(&param, "backup-time")?;
let protocols = parts
.headers
@ -80,7 +81,13 @@ fn upgrade_to_backup_protocol(
let backup_group = BackupGroup::new(backup_type, backup_id);
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group).unwrap_or(None);
let backup_dir = BackupDir::new_with_group(backup_group, backup_time.timestamp());
let backup_dir = BackupDir::new_with_group(backup_group, backup_time);
if let Some(last) = &last_backup {
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
bail!("backup timestamp is older than last backup.");
}
}
let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
if !is_new { bail!("backup directorty already exists."); }

View File

@ -424,6 +424,8 @@ fn create_backup(
let verbose = param["verbose"].as_bool().unwrap_or(false);
let backup_time_opt = param["backup-time"].as_i64();
let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
if let Some(size) = chunk_size_opt {
@ -434,6 +436,8 @@ fn create_backup(
let backup_id = param["host-id"].as_str().unwrap_or(&tools::nodename());
let backup_type = "host";
let include_dev = param["include-dev"].as_array();
let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
@ -507,14 +511,18 @@ fn create_backup(
}
}
let backup_time = Utc.timestamp(Utc::now().timestamp(), 0);
let backup_time = Utc.timestamp(backup_time_opt.unwrap_or(Utc::now().timestamp()), 0);
let client = HttpClient::new(repo.host(), repo.user())?;
record_repository(&repo);
println!("Starting backup");
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
println!("Client name: {}", tools::nodename());
println!("Start Time: {}", backup_time.to_rfc3339());
let start_time = Local::now();
println!("Starting protocol: {}", start_time.to_rfc3339());
let (crypt_config, rsa_encrypted_key) = match keyfile {
None => (None, None),
@ -535,7 +543,7 @@ fn create_backup(
}
};
let client = client.start_backup(repo.store(), "host", &backup_id, verbose).wait()?;
let client = client.start_backup(repo.store(), backup_type, &backup_id, backup_time, verbose).wait()?;
for (backup_type, filename, target, size) in upload_list {
match backup_type {
@ -543,7 +551,7 @@ fn create_backup(
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
client.upload_blob_from_file(&filename, &target, crypt_config.clone(), true).wait()?;
}
BackupType::LOGFILE => {
BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
client.upload_blob_from_file(&filename, &target, crypt_config.clone(), true).wait()?;
}
@ -592,8 +600,8 @@ fn create_backup(
client.finish().wait()?;
let end_time = Utc.timestamp(Utc::now().timestamp(), 0);
let elapsed = end_time.signed_duration_since(backup_time);
let end_time = Local.timestamp(Local::now().timestamp(), 0);
let elapsed = end_time.signed_duration_since(start_time);
println!("Duration: {}", elapsed);
println!("End Time: {}", end_time.to_rfc3339());
@ -1321,6 +1329,11 @@ fn main() {
.optional(
"host-id",
StringSchema::new("Use specified ID for the backup group name ('host/<id>'). The default is the system hostname."))
.optional(
"backup-time",
IntegerSchema::new("Backup time (Unix epoch.)")
.minimum(1547797308)
)
.optional(
"chunk-size",
IntegerSchema::new("Chunk size in KB. Must be a power of 2.")

View File

@ -12,7 +12,9 @@ fn upload_speed() -> Result<usize, Error> {
let client = HttpClient::new(host, username)?;
let client = client.start_backup(datastore, "host", "speedtest", false).wait()?;
let backup_time = chrono::Utc::now();
let client = client.start_backup(datastore, "host", "speedtest", backup_time, false).wait()?;
println!("start upload speed test");
let res = client.upload_speedtest().wait()?;

View File

@ -282,10 +282,18 @@ impl HttpClient {
datastore: &str,
backup_type: &str,
backup_id: &str,
backup_time: DateTime<Utc>,
debug: bool,
) -> impl Future<Item=Arc<BackupClient>, Error=Error> {
let param = json!({"backup-type": backup_type, "backup-id": backup_id, "store": datastore, "debug": debug});
let param = json!({
"backup-type": backup_type,
"backup-id": backup_id,
"backup-time": backup_time.timestamp(),
"store": datastore,
"debug": debug
});
let req = Self::request_builder(&self.server, "GET", "/api2/json/backup", Some(param)).unwrap();
self.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))