src/bin/proxmox-backup-client.rs: new display task log helper
New helper to display worker task logs. Use it for prune (experimental).
This commit is contained in:
parent
6b508dd563
commit
163e9bbe91
|
@ -383,6 +383,7 @@ fn prune(
|
||||||
keep_yearly: param["keep-yearly"].as_u64(),
|
keep_yearly: param["keep-yearly"].as_u64(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// We use a WorkerTask just to have a task log, but run synchrounously
|
||||||
let worker = WorkerTask::new("prune", Some(store.to_owned()), "root@pam", true)?;
|
let worker = WorkerTask::new("prune", Some(store.to_owned()), "root@pam", true)?;
|
||||||
let result = try_block! {
|
let result = try_block! {
|
||||||
if !prune_options.keeps_something() {
|
if !prune_options.keeps_something() {
|
||||||
|
@ -416,7 +417,7 @@ fn prune(
|
||||||
bail!("prune failed - {}", err);
|
bail!("prune failed - {}", err);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(json!(null))
|
Ok(json!(worker.to_string())) // return the UPID
|
||||||
}
|
}
|
||||||
|
|
||||||
#[sortable]
|
#[sortable]
|
||||||
|
|
|
@ -1124,6 +1124,49 @@ fn upload_log(
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn display_task_log(
|
||||||
|
client: HttpClient,
|
||||||
|
upid_str: &str,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
println!("TESTLOG {}", upid_str);
|
||||||
|
|
||||||
|
let path = format!("api2/json/nodes/localhost/tasks/{}/log", upid_str);
|
||||||
|
|
||||||
|
let mut start = 1;
|
||||||
|
let limit = 500;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let param = json!({ "start": start, "limit": limit, "test-status": true });
|
||||||
|
let result = async_main(async { client.get(&path, Some(param)).await })?;
|
||||||
|
|
||||||
|
let active = result["active"].as_bool().unwrap();
|
||||||
|
let total = result["total"].as_u64().unwrap();
|
||||||
|
let data = result["data"].as_array().unwrap();
|
||||||
|
|
||||||
|
let lines = data.len();
|
||||||
|
|
||||||
|
for item in data {
|
||||||
|
let n = item["n"].as_u64().unwrap();
|
||||||
|
let t = item["t"].as_str().unwrap();
|
||||||
|
if n != start { bail!("got wrong line number in response data ({} != {}", n, start); }
|
||||||
|
start += 1;
|
||||||
|
println!("{}", t);
|
||||||
|
}
|
||||||
|
|
||||||
|
if start > total {
|
||||||
|
if active {
|
||||||
|
std::thread::sleep(std::time::Duration::from_millis(1000));
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if lines != limit { bail!("got wrong number of lines from server ({} != {})", lines, limit); }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn prune(
|
fn prune(
|
||||||
mut param: Value,
|
mut param: Value,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
|
@ -1138,33 +1181,48 @@ fn prune(
|
||||||
|
|
||||||
let group = tools::required_string_param(¶m, "group")?;
|
let group = tools::required_string_param(¶m, "group")?;
|
||||||
let group = BackupGroup::parse(group)?;
|
let group = BackupGroup::parse(group)?;
|
||||||
|
let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
|
||||||
|
|
||||||
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
|
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
|
||||||
|
|
||||||
param.as_object_mut().unwrap().remove("repository");
|
param.as_object_mut().unwrap().remove("repository");
|
||||||
param.as_object_mut().unwrap().remove("group");
|
param.as_object_mut().unwrap().remove("group");
|
||||||
param.as_object_mut().unwrap().remove("dry-run");
|
param.as_object_mut().unwrap().remove("dry-run");
|
||||||
|
param.as_object_mut().unwrap().remove("output-format");
|
||||||
|
|
||||||
param["backup-type"] = group.backup_type().into();
|
param["backup-type"] = group.backup_type().into();
|
||||||
param["backup-id"] = group.backup_id().into();
|
param["backup-id"] = group.backup_id().into();
|
||||||
|
|
||||||
if dry_run {
|
if dry_run {
|
||||||
let result = async_main(async move { client.get(&path, Some(param)).await })?;
|
let result = async_main(async { client.get(&path, Some(param)).await })?;
|
||||||
let data = &result["data"];
|
let data = &result["data"];
|
||||||
|
|
||||||
for item in data.as_array().unwrap() {
|
if output_format == "text" {
|
||||||
let timestamp = item["backup-time"].as_i64().unwrap();
|
for item in data.as_array().unwrap() {
|
||||||
let timestamp = BackupDir::backup_time_to_string(Utc.timestamp(timestamp, 0));
|
let timestamp = item["backup-time"].as_i64().unwrap();
|
||||||
let keep = item["keep"].as_bool().unwrap();
|
let timestamp = BackupDir::backup_time_to_string(Utc.timestamp(timestamp, 0));
|
||||||
println!("{}/{}/{} {}",
|
let keep = item["keep"].as_bool().unwrap();
|
||||||
group.backup_type(),
|
println!("{}/{}/{} {}",
|
||||||
group.backup_id(),
|
group.backup_type(),
|
||||||
timestamp,
|
group.backup_id(),
|
||||||
if keep { "keep" } else { "remove" },
|
timestamp,
|
||||||
);
|
if keep { "keep" } else { "remove" },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
format_and_print_result(&data, &output_format);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let _result = async_main(async move { client.post(&path, Some(param)).await })?;
|
let result = async_main(async { client.post(&path, Some(param)).await })?;
|
||||||
|
let data = &result["data"];
|
||||||
|
if output_format == "text" {
|
||||||
|
if let Some(upid) = data.as_str() {
|
||||||
|
println!("UPID {:?}", data);
|
||||||
|
display_task_log(client, upid)?;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
format_and_print_result(&data, &output_format);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
record_repository(&repo);
|
record_repository(&repo);
|
||||||
|
|
||||||
|
@ -2157,6 +2215,7 @@ We do not extraxt '.pxar' archives when writing to stdandard output.
|
||||||
.schema()),
|
.schema()),
|
||||||
("group", false, &StringSchema::new("Backup group.").schema()),
|
("group", false, &StringSchema::new("Backup group.").schema()),
|
||||||
], [
|
], [
|
||||||
|
("output-format", true, &OUTPUT_FORMAT),
|
||||||
("repository", true, &REPO_URL_SCHEMA),
|
("repository", true, &REPO_URL_SCHEMA),
|
||||||
])
|
])
|
||||||
)
|
)
|
||||||
|
|
Loading…
Reference in New Issue