src/api2/admin/datastore.rs: remove test_prune, add dry-run to normal prune

And use display_task_log on the client side.
This commit is contained in:
Dietmar Maurer 2019-12-08 10:59:47 +01:00
parent ec137a99c6
commit 3b03abfe14
2 changed files with 35 additions and 95 deletions

View File

@ -287,65 +287,6 @@ macro_rules! add_common_prune_prameters {
}
}
const API_METHOD_TEST_PRUNE: ApiMethod = ApiMethod::new(
&ApiHandler::Sync(&test_prune),
&ObjectSchema::new(
"Test what prune would do.",
&add_common_prune_prameters!([
("backup-id", false, &BACKUP_ID_SCHEMA),
("backup-type", false, &BACKUP_TYPE_SCHEMA),
],[
("store", false, &StringSchema::new("Datastore name.").schema()),
])
)
);
fn test_prune(
param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let store = param["store"].as_str().unwrap();
let backup_type = tools::required_string_param(&param, "backup-type")?;
let backup_id = tools::required_string_param(&param, "backup-id")?;
let group = BackupGroup::new(backup_type, backup_id);
let datastore = DataStore::lookup_datastore(store)?;
let prune_options = PruneOptions {
keep_last: param["keep-last"].as_u64(),
keep_hourly: param["keep-hourly"].as_u64(),
keep_daily: param["keep-daily"].as_u64(),
keep_weekly: param["keep-weekly"].as_u64(),
keep_monthly: param["keep-monthly"].as_u64(),
keep_yearly: param["keep-yearly"].as_u64(),
};
let list = group.list_backups(&datastore.base_path())?;
let result: Vec<(Value)> = if !prune_options.keeps_something() {
list.iter().map(|info| {
json!({
"backup-time": info.backup_dir.backup_time().timestamp(),
"keep": true,
})
}).collect()
} else {
let prune_info = compute_prune_info(list, &prune_options)?;
prune_info.iter().map(|(info, keep)| {
json!({
"backup-time": info.backup_dir.backup_time().timestamp(),
"keep": keep,
})
}).collect()
};
Ok(json!(result))
}
const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
&ApiHandler::Sync(&prune),
&ObjectSchema::new(
@ -353,6 +294,10 @@ const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
&add_common_prune_prameters!([
("backup-id", false, &BACKUP_ID_SCHEMA),
("backup-type", false, &BACKUP_TYPE_SCHEMA),
("dry-run", true, &BooleanSchema::new(
"Just show what prune would do, but do not delete anything.")
.schema()
),
],[
("store", false, &StringSchema::new("Datastore name.").schema()),
])
@ -370,6 +315,8 @@ fn prune(
let backup_type = tools::required_string_param(&param, "backup-type")?;
let backup_id = tools::required_string_param(&param, "backup-id")?;
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
let group = BackupGroup::new(backup_type, backup_id);
let datastore = DataStore::lookup_datastore(store)?;
@ -390,7 +337,11 @@ fn prune(
worker.log("No prune selection - keeping all files.");
return Ok(());
} else {
worker.log(format!("Starting prune on store {}", store));
if dry_run {
worker.log(format!("Testing prune on store {}", store));
} else {
worker.log(format!("Starting prune on store {}", store));
}
}
let list = group.list_backups(&datastore.base_path())?;
@ -400,10 +351,21 @@ fn prune(
prune_info.reverse(); // delete older snapshots first
for (info, keep) in prune_info {
if keep {
worker.log(format!("keep {:?}", info.backup_dir.relative_path()));
} else {
worker.log(format!("remove {:?}", info.backup_dir.relative_path()));
let backup_time = info.backup_dir.backup_time();
let timestamp = BackupDir::backup_time_to_string(backup_time);
let group = info.backup_dir.group();
let msg = format!(
"{}/{}/{} {}",
group.backup_type(),
group.backup_id(),
timestamp,
if keep { "keep" } else { "remove" },
);
worker.log(msg);
if !(dry_run || keep) {
datastore.remove_backup_dir(&info.backup_dir)?;
}
}
@ -672,7 +634,6 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
(
"prune",
&Router::new()
.get(&API_METHOD_TEST_PRUNE)
.post(&API_METHOD_PRUNE)
),
(

View File

@ -1188,47 +1188,26 @@ fn prune(
let group = BackupGroup::parse(group)?;
let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
param.as_object_mut().unwrap().remove("repository");
param.as_object_mut().unwrap().remove("group");
param.as_object_mut().unwrap().remove("dry-run");
param.as_object_mut().unwrap().remove("output-format");
param["backup-type"] = group.backup_type().into();
param["backup-id"] = group.backup_id().into();
if dry_run {
let result = async_main(async { client.get(&path, Some(param)).await })?;
let data = &result["data"];
let result = async_main(async { client.post(&path, Some(param)).await })?;
if output_format == "text" {
for item in data.as_array().unwrap() {
let timestamp = item["backup-time"].as_i64().unwrap();
let timestamp = BackupDir::backup_time_to_string(Utc.timestamp(timestamp, 0));
let keep = item["keep"].as_bool().unwrap();
println!("{}/{}/{} {}",
group.backup_type(),
group.backup_id(),
timestamp,
if keep { "keep" } else { "remove" },
);
}
} else {
format_and_print_result(&data, &output_format);
record_repository(&repo);
let data = &result["data"];
if output_format == "text" {
if let Some(upid) = data.as_str() {
display_task_log(client, upid, true)?;
}
} else {
let result = async_main(async { client.post(&path, Some(param)).await })?;
let data = &result["data"];
if output_format == "text" {
if let Some(upid) = data.as_str() {
display_task_log(client, upid, true)?;
}
} else {
format_and_print_result(&data, &output_format);
}
format_and_print_result(&data, &output_format);
}
record_repository(&repo);
Ok(Value::Null)
}