add benchmark flag to backup creation for proper cleanup when running a benchmark
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com> Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>
This commit is contained in:
parent
871181d984
commit
61d7b5013c
|
@ -38,6 +38,7 @@ pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
|
||||||
("backup-id", false, &BACKUP_ID_SCHEMA),
|
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||||
("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
|
("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
|
||||||
|
("benchmark", true, &BooleanSchema::new("Job is a benchmark (do not keep data).").schema()),
|
||||||
]),
|
]),
|
||||||
)
|
)
|
||||||
).access(
|
).access(
|
||||||
|
@ -56,6 +57,7 @@ fn upgrade_to_backup_protocol(
|
||||||
|
|
||||||
async move {
|
async move {
|
||||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||||
|
let benchmark = param["benchmark"].as_bool().unwrap_or(false);
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
|
||||||
|
@ -90,11 +92,24 @@ async move {
|
||||||
|
|
||||||
let backup_group = BackupGroup::new(backup_type, backup_id);
|
let backup_group = BackupGroup::new(backup_type, backup_id);
|
||||||
|
|
||||||
|
let worker_type = if backup_type == "host" && backup_id == "benchmark" {
|
||||||
|
if !benchmark {
|
||||||
|
bail!("unable to run benchmark without --benchmark flags");
|
||||||
|
}
|
||||||
|
"benchmark"
|
||||||
|
} else {
|
||||||
|
if benchmark {
|
||||||
|
bail!("benchmark flags is only allowed on 'host/benchmark'");
|
||||||
|
}
|
||||||
|
"backup"
|
||||||
|
};
|
||||||
|
|
||||||
// lock backup group to only allow one backup per group at a time
|
// lock backup group to only allow one backup per group at a time
|
||||||
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &userid)?;
|
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &userid)?;
|
||||||
|
|
||||||
// permission check
|
// permission check
|
||||||
if owner != userid { // only the owner is allowed to create additional snapshots
|
if owner != userid && worker_type != "benchmark" {
|
||||||
|
// only the owner is allowed to create additional snapshots
|
||||||
bail!("backup owner check failed ({} != {})", userid, owner);
|
bail!("backup owner check failed ({} != {})", userid, owner);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,14 +131,15 @@ async move {
|
||||||
let (path, is_new, _snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
|
let (path, is_new, _snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
|
||||||
if !is_new { bail!("backup directory already exists."); }
|
if !is_new { bail!("backup directory already exists."); }
|
||||||
|
|
||||||
WorkerTask::spawn("backup", Some(worker_id), userid.clone(), true, move |worker| {
|
|
||||||
|
WorkerTask::spawn(worker_type, Some(worker_id), userid.clone(), true, move |worker| {
|
||||||
let mut env = BackupEnvironment::new(
|
let mut env = BackupEnvironment::new(
|
||||||
env_type, userid, worker.clone(), datastore, backup_dir);
|
env_type, userid, worker.clone(), datastore, backup_dir);
|
||||||
|
|
||||||
env.debug = debug;
|
env.debug = debug;
|
||||||
env.last_backup = last_backup;
|
env.last_backup = last_backup;
|
||||||
|
|
||||||
env.log(format!("starting new backup on datastore '{}': {:?}", store, path));
|
env.log(format!("starting new {} on datastore '{}': {:?}", worker_type, store, path));
|
||||||
|
|
||||||
let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
|
let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
|
||||||
|
|
||||||
|
@ -160,7 +176,11 @@ async move {
|
||||||
req = req_fut => req,
|
req = req_fut => req,
|
||||||
abrt = abort_future => abrt,
|
abrt = abort_future => abrt,
|
||||||
};
|
};
|
||||||
|
if benchmark {
|
||||||
|
env.log("benchmark finished successfully");
|
||||||
|
env.remove_backup()?;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
match (res, env.ensure_finished()) {
|
match (res, env.ensure_finished()) {
|
||||||
(Ok(_), Ok(())) => {
|
(Ok(_), Ok(())) => {
|
||||||
env.log("backup finished successfully");
|
env.log("backup finished successfully");
|
||||||
|
|
|
@ -1026,6 +1026,7 @@ async fn create_backup(
|
||||||
&backup_id,
|
&backup_id,
|
||||||
backup_time,
|
backup_time,
|
||||||
verbose,
|
verbose,
|
||||||
|
false
|
||||||
).await?;
|
).await?;
|
||||||
|
|
||||||
let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await {
|
let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await {
|
||||||
|
|
|
@ -226,6 +226,7 @@ async fn test_upload_speed(
|
||||||
"benchmark",
|
"benchmark",
|
||||||
backup_time,
|
backup_time,
|
||||||
false,
|
false,
|
||||||
|
true
|
||||||
).await?;
|
).await?;
|
||||||
|
|
||||||
if verbose { eprintln!("Start TLS speed test"); }
|
if verbose { eprintln!("Start TLS speed test"); }
|
||||||
|
|
|
@ -53,6 +53,7 @@ impl BackupWriter {
|
||||||
backup_id: &str,
|
backup_id: &str,
|
||||||
backup_time: DateTime<Utc>,
|
backup_time: DateTime<Utc>,
|
||||||
debug: bool,
|
debug: bool,
|
||||||
|
benchmark: bool
|
||||||
) -> Result<Arc<BackupWriter>, Error> {
|
) -> Result<Arc<BackupWriter>, Error> {
|
||||||
|
|
||||||
let param = json!({
|
let param = json!({
|
||||||
|
@ -60,7 +61,8 @@ impl BackupWriter {
|
||||||
"backup-id": backup_id,
|
"backup-id": backup_id,
|
||||||
"backup-time": backup_time.timestamp(),
|
"backup-time": backup_time.timestamp(),
|
||||||
"store": datastore,
|
"store": datastore,
|
||||||
"debug": debug
|
"debug": debug,
|
||||||
|
"benchmark": benchmark
|
||||||
});
|
});
|
||||||
|
|
||||||
let req = HttpClient::request_builder(
|
let req = HttpClient::request_builder(
|
||||||
|
|
Loading…
Reference in New Issue