cleanup: use serde(flatten) for SyncJobStatus, improve code reuse

This commit is contained in:
Dietmar Maurer 2021-02-19 09:36:39 +01:00
parent e6122a657e
commit 70842b9ef2
2 changed files with 50 additions and 123 deletions

View File

@ -7,16 +7,35 @@ use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment};
use proxmox::api::router::SubdirMap; use proxmox::api::router::SubdirMap;
use proxmox::{list_subdirs_api_method, sortable}; use proxmox::{list_subdirs_api_method, sortable};
use crate::api2::types::*; use crate::{
use crate::api2::pull::do_sync_job; api2::{
use crate::api2::config::sync::{check_sync_job_modify_access, check_sync_job_read_access}; types::{
DATASTORE_SCHEMA,
use crate::config::cached_user_info::CachedUserInfo; JOB_ID_SCHEMA,
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig}; Authid,
use crate::server::UPID; },
use crate::server::jobstate::{Job, JobState}; pull::do_sync_job,
use crate::tools::systemd::time::{ config::sync::{
parse_calendar_event, compute_next_event}; check_sync_job_modify_access,
check_sync_job_read_access,
},
},
config::{
cached_user_info::CachedUserInfo,
sync::{
self,
SyncJobStatus,
SyncJobConfig,
},
},
server::{
jobstate::{
Job,
JobState,
compute_schedule_status,
},
},
};
#[api( #[api(
input: { input: {
@ -30,7 +49,7 @@ use crate::tools::systemd::time::{
returns: { returns: {
description: "List configured jobs and their status.", description: "List configured jobs and their status.",
type: Array, type: Array,
items: { type: sync::SyncJobStatus }, items: { type: SyncJobStatus },
}, },
access: { access: {
description: "Limited to sync jobs where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.", description: "Limited to sync jobs where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
@ -49,48 +68,29 @@ pub fn list_sync_jobs(
let (config, digest) = sync::config()?; let (config, digest) = sync::config()?;
let mut list: Vec<SyncJobStatus> = config let job_config_iter = config
.convert_to_typed_array("sync")? .convert_to_typed_array("sync")?
.into_iter() .into_iter()
.filter(|job: &SyncJobStatus| { .filter(|job: &SyncJobConfig| {
if let Some(store) = &store { if let Some(store) = &store {
&job.store == store &job.store == store
} else { } else {
true true
} }
}) })
.filter(|job: &SyncJobStatus| { .filter(|job: &SyncJobConfig| {
let as_config: SyncJobConfig = job.into(); check_sync_job_read_access(&user_info, &auth_id, &job)
check_sync_job_read_access(&user_info, &auth_id, &as_config) });
}).collect();
for job in &mut list { let mut list = Vec::new();
for job in job_config_iter {
let last_state = JobState::load("syncjob", &job.id) let last_state = JobState::load("syncjob", &job.id)
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?; .map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
let (upid, endtime, state, starttime) = match last_state {
JobState::Created { time } => (None, None, None, time),
JobState::Started { upid } => {
let parsed_upid: UPID = upid.parse()?;
(Some(upid), None, None, parsed_upid.starttime)
},
JobState::Finished { upid, state } => {
let parsed_upid: UPID = upid.parse()?;
(Some(upid), Some(state.endtime()), Some(state.to_string()), parsed_upid.starttime)
},
};
job.last_run_upid = upid; let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
job.last_run_state = state;
job.last_run_endtime = endtime;
let last = job.last_run_endtime.unwrap_or(starttime); list.push(SyncJobStatus { config: job, status });
job.next_run = (|| -> Option<i64> {
let schedule = job.schedule.as_ref()?;
let event = parse_calendar_event(&schedule).ok()?;
// ignore errors
compute_next_event(&event, last, false).unwrap_or(None)
})();
} }
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into(); rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();

View File

@ -71,98 +71,25 @@ pub struct SyncJobConfig {
pub schedule: Option<String>, pub schedule: Option<String>,
} }
impl From<&SyncJobStatus> for SyncJobConfig {
fn from(job_status: &SyncJobStatus) -> Self {
Self {
id: job_status.id.clone(),
store: job_status.store.clone(),
owner: job_status.owner.clone(),
remote: job_status.remote.clone(),
remote_store: job_status.remote_store.clone(),
remove_vanished: job_status.remove_vanished,
comment: job_status.comment.clone(),
schedule: job_status.schedule.clone(),
}
}
}
// FIXME: generate duplicate schemas/structs from one listing?
#[api( #[api(
properties: { properties: {
id: { config: {
schema: JOB_ID_SCHEMA, type: SyncJobConfig,
}, },
store: { status: {
schema: DATASTORE_SCHEMA, type: JobScheduleStatus,
}, },
owner: { },
type: Authid,
optional: true,
},
remote: {
schema: REMOTE_ID_SCHEMA,
},
"remote-store": {
schema: DATASTORE_SCHEMA,
},
"remove-vanished": {
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
schedule: {
optional: true,
schema: SYNC_SCHEDULE_SCHEMA,
},
"next-run": {
description: "Estimated time of the next run (UNIX epoch).",
optional: true,
type: Integer,
},
"last-run-state": {
description: "Result of the last run.",
optional: true,
type: String,
},
"last-run-upid": {
description: "Task UPID of the last run.",
optional: true,
type: String,
},
"last-run-endtime": {
description: "Endtime of the last run.",
optional: true,
type: Integer,
},
}
)] )]
#[serde(rename_all="kebab-case")] #[serde(rename_all="kebab-case")]
#[derive(Serialize,Deserialize)] #[derive(Serialize,Deserialize)]
/// Status of Sync Job /// Status of Sync Job
pub struct SyncJobStatus { pub struct SyncJobStatus {
pub id: String, #[serde(flatten)]
pub store: String, pub config: SyncJobConfig,
#[serde(skip_serializing_if="Option::is_none")] #[serde(flatten)]
pub owner: Option<Authid>, pub status: JobScheduleStatus,
pub remote: String,
pub remote_store: String,
#[serde(skip_serializing_if="Option::is_none")]
pub remove_vanished: Option<bool>,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub schedule: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub next_run: Option<i64>,
#[serde(skip_serializing_if="Option::is_none")]
pub last_run_state: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub last_run_upid: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub last_run_endtime: Option<i64>,
} }
fn init() -> SectionConfig { fn init() -> SectionConfig {