cleanup: use serde(flatten) for SyncJobStatus, improve code reuse

This commit is contained in:
Dietmar Maurer 2021-02-19 09:36:39 +01:00
parent e6122a657e
commit 70842b9ef2
2 changed files with 50 additions and 123 deletions

View File

@ -7,16 +7,35 @@ use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment};
use proxmox::api::router::SubdirMap;
use proxmox::{list_subdirs_api_method, sortable};
use crate::api2::types::*;
use crate::api2::pull::do_sync_job;
use crate::api2::config::sync::{check_sync_job_modify_access, check_sync_job_read_access};
use crate::config::cached_user_info::CachedUserInfo;
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
use crate::server::UPID;
use crate::server::jobstate::{Job, JobState};
use crate::tools::systemd::time::{
parse_calendar_event, compute_next_event};
use crate::{
api2::{
types::{
DATASTORE_SCHEMA,
JOB_ID_SCHEMA,
Authid,
},
pull::do_sync_job,
config::sync::{
check_sync_job_modify_access,
check_sync_job_read_access,
},
},
config::{
cached_user_info::CachedUserInfo,
sync::{
self,
SyncJobStatus,
SyncJobConfig,
},
},
server::{
jobstate::{
Job,
JobState,
compute_schedule_status,
},
},
};
#[api(
input: {
@ -30,7 +49,7 @@ use crate::tools::systemd::time::{
returns: {
description: "List configured jobs and their status.",
type: Array,
items: { type: sync::SyncJobStatus },
items: { type: SyncJobStatus },
},
access: {
description: "Limited to sync jobs where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
@ -49,48 +68,29 @@ pub fn list_sync_jobs(
let (config, digest) = sync::config()?;
let mut list: Vec<SyncJobStatus> = config
let job_config_iter = config
.convert_to_typed_array("sync")?
.into_iter()
.filter(|job: &SyncJobStatus| {
.filter(|job: &SyncJobConfig| {
if let Some(store) = &store {
&job.store == store
} else {
true
}
})
.filter(|job: &SyncJobStatus| {
let as_config: SyncJobConfig = job.into();
check_sync_job_read_access(&user_info, &auth_id, &as_config)
}).collect();
.filter(|job: &SyncJobConfig| {
check_sync_job_read_access(&user_info, &auth_id, &job)
});
for job in &mut list {
let mut list = Vec::new();
for job in job_config_iter {
let last_state = JobState::load("syncjob", &job.id)
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
let (upid, endtime, state, starttime) = match last_state {
JobState::Created { time } => (None, None, None, time),
JobState::Started { upid } => {
let parsed_upid: UPID = upid.parse()?;
(Some(upid), None, None, parsed_upid.starttime)
},
JobState::Finished { upid, state } => {
let parsed_upid: UPID = upid.parse()?;
(Some(upid), Some(state.endtime()), Some(state.to_string()), parsed_upid.starttime)
},
};
job.last_run_upid = upid;
job.last_run_state = state;
job.last_run_endtime = endtime;
let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
let last = job.last_run_endtime.unwrap_or(starttime);
job.next_run = (|| -> Option<i64> {
let schedule = job.schedule.as_ref()?;
let event = parse_calendar_event(&schedule).ok()?;
// ignore errors
compute_next_event(&event, last, false).unwrap_or(None)
})();
list.push(SyncJobStatus { config: job, status });
}
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();

View File

@ -71,98 +71,25 @@ pub struct SyncJobConfig {
pub schedule: Option<String>,
}
impl From<&SyncJobStatus> for SyncJobConfig {
fn from(job_status: &SyncJobStatus) -> Self {
Self {
id: job_status.id.clone(),
store: job_status.store.clone(),
owner: job_status.owner.clone(),
remote: job_status.remote.clone(),
remote_store: job_status.remote_store.clone(),
remove_vanished: job_status.remove_vanished,
comment: job_status.comment.clone(),
schedule: job_status.schedule.clone(),
}
}
}
// FIXME: generate duplicate schemas/structs from one listing?
#[api(
properties: {
id: {
schema: JOB_ID_SCHEMA,
config: {
type: SyncJobConfig,
},
store: {
schema: DATASTORE_SCHEMA,
status: {
type: JobScheduleStatus,
},
owner: {
type: Authid,
optional: true,
},
remote: {
schema: REMOTE_ID_SCHEMA,
},
"remote-store": {
schema: DATASTORE_SCHEMA,
},
"remove-vanished": {
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
schedule: {
optional: true,
schema: SYNC_SCHEDULE_SCHEMA,
},
"next-run": {
description: "Estimated time of the next run (UNIX epoch).",
optional: true,
type: Integer,
},
"last-run-state": {
description: "Result of the last run.",
optional: true,
type: String,
},
"last-run-upid": {
description: "Task UPID of the last run.",
optional: true,
type: String,
},
"last-run-endtime": {
description: "Endtime of the last run.",
optional: true,
type: Integer,
},
}
},
)]
#[serde(rename_all="kebab-case")]
#[derive(Serialize,Deserialize)]
/// Status of Sync Job
pub struct SyncJobStatus {
pub id: String,
pub store: String,
#[serde(skip_serializing_if="Option::is_none")]
pub owner: Option<Authid>,
pub remote: String,
pub remote_store: String,
#[serde(skip_serializing_if="Option::is_none")]
pub remove_vanished: Option<bool>,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub schedule: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub next_run: Option<i64>,
#[serde(skip_serializing_if="Option::is_none")]
pub last_run_state: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub last_run_upid: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub last_run_endtime: Option<i64>,
#[serde(flatten)]
pub config: SyncJobConfig,
#[serde(flatten)]
pub status: JobScheduleStatus,
}
fn init() -> SectionConfig {