moved tape_job.rs to pbs_config workspace
This commit is contained in:
		
							
								
								
									
										393
									
								
								pbs-api-types/src/jobs.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										393
									
								
								pbs-api-types/src/jobs.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,393 @@ | ||||
| use serde::{Deserialize, Serialize}; | ||||
|  | ||||
| use proxmox::const_regex; | ||||
|  | ||||
| use proxmox::api::{api, schema::*}; | ||||
|  | ||||
| use crate::{ | ||||
|     Userid, Authid, REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, | ||||
|     SINGLE_LINE_COMMENT_SCHEMA, PROXMOX_SAFE_ID_FORMAT, DATASTORE_SCHEMA, | ||||
| }; | ||||
|  | ||||
| const_regex!{ | ||||
|  | ||||
|     /// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID' | ||||
|     pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):"); | ||||
|     /// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:ACTUAL_JOB_ID' | ||||
|     pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):"); | ||||
| } | ||||
|  | ||||
| pub const UPID_SCHEMA: Schema = StringSchema::new("Unique Process/Task ID.") | ||||
|     .max_length(256) | ||||
|     .schema(); | ||||
|  | ||||
| pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.") | ||||
|     .format(&PROXMOX_SAFE_ID_FORMAT) | ||||
|     .min_length(3) | ||||
|     .max_length(32) | ||||
|     .schema(); | ||||
|  | ||||
| pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new( | ||||
|     "Run sync job at specified schedule.") | ||||
|     .format(&ApiStringFormat::VerifyFn(pbs_systemd::time::verify_calendar_event)) | ||||
|     .type_text("<calendar-event>") | ||||
|     .schema(); | ||||
|  | ||||
| pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new( | ||||
|     "Run garbage collection job at specified schedule.") | ||||
|     .format(&ApiStringFormat::VerifyFn(pbs_systemd::time::verify_calendar_event)) | ||||
|     .type_text("<calendar-event>") | ||||
|     .schema(); | ||||
|  | ||||
| pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new( | ||||
|     "Run prune job at specified schedule.") | ||||
|     .format(&ApiStringFormat::VerifyFn(pbs_systemd::time::verify_calendar_event)) | ||||
|     .type_text("<calendar-event>") | ||||
|     .schema(); | ||||
|  | ||||
| pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new( | ||||
|     "Run verify job at specified schedule.") | ||||
|     .format(&ApiStringFormat::VerifyFn(pbs_systemd::time::verify_calendar_event)) | ||||
|     .type_text("<calendar-event>") | ||||
|     .schema(); | ||||
|  | ||||
| pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( | ||||
|     "Delete vanished backups. This remove the local copy if the remote backup was deleted.") | ||||
|     .default(true) | ||||
|     .schema(); | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         "next-run": { | ||||
|             description: "Estimated time of the next run (UNIX epoch).", | ||||
|             optional: true, | ||||
|             type: Integer, | ||||
|         }, | ||||
|         "last-run-state": { | ||||
|             description: "Result of the last run.", | ||||
|             optional: true, | ||||
|             type: String, | ||||
|         }, | ||||
|         "last-run-upid": { | ||||
|             description: "Task UPID of the last run.", | ||||
|             optional: true, | ||||
|             type: String, | ||||
|         }, | ||||
|         "last-run-endtime": { | ||||
|             description: "Endtime of the last run.", | ||||
|             optional: true, | ||||
|             type: Integer, | ||||
|         }, | ||||
|     } | ||||
| )] | ||||
| #[derive(Serialize,Deserialize,Default)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| /// Job Scheduling Status | ||||
| pub struct JobScheduleStatus { | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub next_run: Option<i64>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub last_run_state: Option<String>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub last_run_upid: Option<String>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub last_run_endtime: Option<i64>, | ||||
| } | ||||
|  | ||||
| #[api()] | ||||
| #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] | ||||
| #[serde(rename_all = "lowercase")] | ||||
| /// When do we send notifications | ||||
| pub enum Notify { | ||||
|     /// Never send notification | ||||
|     Never, | ||||
|     /// Send notifications for failed and successful jobs | ||||
|     Always, | ||||
|     /// Send notifications for failed jobs only | ||||
|     Error, | ||||
| } | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         gc: { | ||||
|             type: Notify, | ||||
|             optional: true, | ||||
|         }, | ||||
|         verify: { | ||||
|             type: Notify, | ||||
|             optional: true, | ||||
|         }, | ||||
|         sync: { | ||||
|             type: Notify, | ||||
|             optional: true, | ||||
|         }, | ||||
|     }, | ||||
| )] | ||||
| #[derive(Debug, Serialize, Deserialize)] | ||||
| /// Datastore notify settings | ||||
| pub struct DatastoreNotify { | ||||
|     /// Garbage collection settings | ||||
|     pub gc: Option<Notify>, | ||||
|     /// Verify job setting | ||||
|     pub verify: Option<Notify>, | ||||
|     /// Sync job setting | ||||
|     pub sync: Option<Notify>, | ||||
| } | ||||
|  | ||||
| pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new( | ||||
|     "Datastore notification setting") | ||||
|     .format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA)) | ||||
|     .schema(); | ||||
|  | ||||
| pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( | ||||
|     "Do not verify backups that are already verified if their verification is not outdated.") | ||||
|     .default(true) | ||||
|     .schema(); | ||||
|  | ||||
| pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new( | ||||
|     "Days after that a verification becomes outdated") | ||||
|     .minimum(1) | ||||
|     .schema(); | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         id: { | ||||
|             schema: JOB_ID_SCHEMA, | ||||
|         }, | ||||
|         store: { | ||||
|             schema: DATASTORE_SCHEMA, | ||||
|         }, | ||||
|         "ignore-verified": { | ||||
|             optional: true, | ||||
|             schema: IGNORE_VERIFIED_BACKUPS_SCHEMA, | ||||
|         }, | ||||
|         "outdated-after": { | ||||
|             optional: true, | ||||
|             schema: VERIFICATION_OUTDATED_AFTER_SCHEMA, | ||||
|         }, | ||||
|         comment: { | ||||
|             optional: true, | ||||
|             schema: SINGLE_LINE_COMMENT_SCHEMA, | ||||
|         }, | ||||
|         schedule: { | ||||
|             optional: true, | ||||
|             schema: VERIFICATION_SCHEDULE_SCHEMA, | ||||
|         }, | ||||
|     } | ||||
| )] | ||||
| #[derive(Serialize,Deserialize)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| /// Verification Job | ||||
| pub struct VerificationJobConfig { | ||||
|     /// unique ID to address this job | ||||
|     pub id: String, | ||||
|     /// the datastore ID this verificaiton job affects | ||||
|     pub store: String, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     /// if not set to false, check the age of the last snapshot verification to filter | ||||
|     /// out recent ones, depending on 'outdated_after' configuration. | ||||
|     pub ignore_verified: Option<bool>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     /// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false. | ||||
|     pub outdated_after: Option<i64>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub comment: Option<String>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     /// when to schedule this job in calendar event notation | ||||
|     pub schedule: Option<String>, | ||||
| } | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         config: { | ||||
|             type: VerificationJobConfig, | ||||
|         }, | ||||
|         status: { | ||||
|             type: JobScheduleStatus, | ||||
|         }, | ||||
|     }, | ||||
| )] | ||||
| #[derive(Serialize,Deserialize)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| /// Status of Verification Job | ||||
| pub struct VerificationJobStatus { | ||||
|     #[serde(flatten)] | ||||
|     pub config: VerificationJobConfig, | ||||
|     #[serde(flatten)] | ||||
|     pub status: JobScheduleStatus, | ||||
| } | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         store: { | ||||
|            schema: DATASTORE_SCHEMA, | ||||
|         }, | ||||
|         pool: { | ||||
|             schema: MEDIA_POOL_NAME_SCHEMA, | ||||
|         }, | ||||
|         drive: { | ||||
|             schema: DRIVE_NAME_SCHEMA, | ||||
|         }, | ||||
|         "eject-media": { | ||||
|             description: "Eject media upon job completion.", | ||||
|             type: bool, | ||||
|             optional: true, | ||||
|         }, | ||||
|         "export-media-set": { | ||||
|             description: "Export media set upon job completion.", | ||||
|             type: bool, | ||||
|             optional: true, | ||||
|         }, | ||||
|         "latest-only": { | ||||
|             description: "Backup latest snapshots only.", | ||||
|             type: bool, | ||||
|             optional: true, | ||||
|         }, | ||||
|         "notify-user": { | ||||
|             optional: true, | ||||
|             type: Userid, | ||||
|         }, | ||||
|     } | ||||
| )] | ||||
| #[derive(Serialize,Deserialize,Clone)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| /// Tape Backup Job Setup | ||||
| pub struct TapeBackupJobSetup { | ||||
|     pub store: String, | ||||
|     pub pool: String, | ||||
|     pub drive: String, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub eject_media: Option<bool>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub export_media_set: Option<bool>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub latest_only: Option<bool>, | ||||
|     /// Send job email notification to this user | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub notify_user: Option<Userid>, | ||||
| } | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         id: { | ||||
|             schema: JOB_ID_SCHEMA, | ||||
|         }, | ||||
|         setup: { | ||||
|             type: TapeBackupJobSetup, | ||||
|         }, | ||||
|         comment: { | ||||
|             optional: true, | ||||
|             schema: SINGLE_LINE_COMMENT_SCHEMA, | ||||
|         }, | ||||
|         schedule: { | ||||
|             optional: true, | ||||
|             schema: SYNC_SCHEDULE_SCHEMA, | ||||
|         }, | ||||
|     } | ||||
| )] | ||||
| #[derive(Serialize,Deserialize,Clone)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| /// Tape Backup Job | ||||
| pub struct TapeBackupJobConfig { | ||||
|     pub id: String, | ||||
|     #[serde(flatten)] | ||||
|     pub setup: TapeBackupJobSetup, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub comment: Option<String>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub schedule: Option<String>, | ||||
| } | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         config: { | ||||
|             type: TapeBackupJobConfig, | ||||
|         }, | ||||
|         status: { | ||||
|             type: JobScheduleStatus, | ||||
|         }, | ||||
|     }, | ||||
| )] | ||||
| #[derive(Serialize,Deserialize)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| /// Status of Tape Backup Job | ||||
| pub struct TapeBackupJobStatus { | ||||
|     #[serde(flatten)] | ||||
|     pub config: TapeBackupJobConfig, | ||||
|     #[serde(flatten)] | ||||
|     pub status: JobScheduleStatus, | ||||
|     /// Next tape used (best guess) | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub next_media_label: Option<String>, | ||||
| } | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         id: { | ||||
|             schema: JOB_ID_SCHEMA, | ||||
|         }, | ||||
|         store: { | ||||
|            schema: DATASTORE_SCHEMA, | ||||
|         }, | ||||
|         "owner": { | ||||
|             type: Authid, | ||||
|             optional: true, | ||||
|         }, | ||||
|         remote: { | ||||
|             schema: REMOTE_ID_SCHEMA, | ||||
|         }, | ||||
|         "remote-store": { | ||||
|             schema: DATASTORE_SCHEMA, | ||||
|         }, | ||||
|         "remove-vanished": { | ||||
|             schema: REMOVE_VANISHED_BACKUPS_SCHEMA, | ||||
|             optional: true, | ||||
|         }, | ||||
|         comment: { | ||||
|             optional: true, | ||||
|             schema: SINGLE_LINE_COMMENT_SCHEMA, | ||||
|         }, | ||||
|         schedule: { | ||||
|             optional: true, | ||||
|             schema: SYNC_SCHEDULE_SCHEMA, | ||||
|         }, | ||||
|     } | ||||
| )] | ||||
| #[derive(Serialize,Deserialize,Clone)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| /// Sync Job | ||||
| pub struct SyncJobConfig { | ||||
|     pub id: String, | ||||
|     pub store: String, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub owner: Option<Authid>, | ||||
|     pub remote: String, | ||||
|     pub remote_store: String, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub remove_vanished: Option<bool>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub comment: Option<String>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub schedule: Option<String>, | ||||
| } | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         config: { | ||||
|             type: SyncJobConfig, | ||||
|         }, | ||||
|         status: { | ||||
|             type: JobScheduleStatus, | ||||
|         }, | ||||
|     }, | ||||
| )] | ||||
|  | ||||
| #[derive(Serialize,Deserialize)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| /// Status of Sync Job | ||||
| pub struct SyncJobStatus { | ||||
|     #[serde(flatten)] | ||||
|     pub config: SyncJobConfig, | ||||
|     #[serde(flatten)] | ||||
|     pub status: JobScheduleStatus, | ||||
| } | ||||
| @ -34,6 +34,9 @@ macro_rules! SNAPSHOT_PATH_REGEX_STR { | ||||
|     ); | ||||
| } | ||||
|  | ||||
| mod jobs; | ||||
| pub use jobs::*; | ||||
|  | ||||
| mod key_derivation; | ||||
| pub use key_derivation::{Kdf, KeyInfo}; | ||||
|  | ||||
| @ -667,3 +670,33 @@ pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType { | ||||
|         &TaskListItem::API_SCHEMA, | ||||
|     ).schema(), | ||||
| }; | ||||
|  | ||||
| #[api()] | ||||
| #[derive(Debug, Clone, Serialize, Deserialize)] | ||||
| #[serde(rename_all = "PascalCase")] | ||||
| /// Describes a package for which an update is available. | ||||
| pub struct APTUpdateInfo { | ||||
|     /// Package name | ||||
|     pub package: String, | ||||
|     /// Package title | ||||
|     pub title: String, | ||||
|     /// Package architecture | ||||
|     pub arch: String, | ||||
|     /// Human readable package description | ||||
|     pub description: String, | ||||
|     /// New version to be updated to | ||||
|     pub version: String, | ||||
|     /// Old version currently installed | ||||
|     pub old_version: String, | ||||
|     /// Package origin | ||||
|     pub origin: String, | ||||
|     /// Package priority in human-readable form | ||||
|     pub priority: String, | ||||
|     /// Package section | ||||
|     pub section: String, | ||||
|     /// URL under which the package's changelog can be retrieved | ||||
|     pub change_log_url: String, | ||||
|     /// Custom extra field for additional package information | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub extra_info: Option<String>, | ||||
| } | ||||
|  | ||||
| @ -4,6 +4,7 @@ pub mod key_config; | ||||
| pub mod media_pool; | ||||
| pub mod remote; | ||||
| pub mod tape_encryption_keys; | ||||
| pub mod tape_job; | ||||
|  | ||||
| use anyhow::{format_err, Error}; | ||||
|  | ||||
|  | ||||
							
								
								
									
										66
									
								
								pbs-config/src/tape_job.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										66
									
								
								pbs-config/src/tape_job.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,66 @@ | ||||
| use anyhow::{Error}; | ||||
| use lazy_static::lazy_static; | ||||
| use std::collections::HashMap; | ||||
|  | ||||
| use proxmox::api::{ | ||||
|     schema::{Schema, ApiType}, | ||||
|     section_config::{ | ||||
|         SectionConfig, | ||||
|         SectionConfigData, | ||||
|         SectionConfigPlugin, | ||||
|     } | ||||
| }; | ||||
|  | ||||
| use pbs_api_types::{TapeBackupJobConfig, JOB_ID_SCHEMA}; | ||||
|  | ||||
| use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard}; | ||||
|  | ||||
| lazy_static! { | ||||
|     pub static ref CONFIG: SectionConfig = init(); | ||||
| } | ||||
|  | ||||
| fn init() -> SectionConfig { | ||||
|     let obj_schema = match TapeBackupJobConfig::API_SCHEMA { | ||||
|         Schema::AllOf(ref allof_schema) => allof_schema, | ||||
|         _ => unreachable!(), | ||||
|     }; | ||||
|  | ||||
|     let plugin = SectionConfigPlugin::new("backup".to_string(), Some(String::from("id")), obj_schema); | ||||
|     let mut config = SectionConfig::new(&JOB_ID_SCHEMA); | ||||
|     config.register_plugin(plugin); | ||||
|  | ||||
|     config | ||||
| } | ||||
|  | ||||
| pub const TAPE_JOB_CFG_FILENAME: &str = "/etc/proxmox-backup/tape-job.cfg"; | ||||
| pub const TAPE_JOB_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.tape-job.lck"; | ||||
|  | ||||
| /// Get exclusive lock | ||||
| pub fn lock() -> Result<BackupLockGuard, Error> { | ||||
|     open_backup_lockfile( TAPE_JOB_CFG_LOCKFILE, None, true) | ||||
| } | ||||
|  | ||||
| pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> { | ||||
|  | ||||
|     let content = proxmox::tools::fs::file_read_optional_string(TAPE_JOB_CFG_FILENAME)? | ||||
|         .unwrap_or_else(|| "".to_string()); | ||||
|  | ||||
|     let digest = openssl::sha::sha256(content.as_bytes()); | ||||
|     let data = CONFIG.parse(TAPE_JOB_CFG_FILENAME, &content)?; | ||||
|     Ok((data, digest)) | ||||
| } | ||||
|  | ||||
| pub fn save_config(config: &SectionConfigData) -> Result<(), Error> { | ||||
|     let raw = CONFIG.write(TAPE_JOB_CFG_FILENAME, &config)?; | ||||
|     replace_backup_config(TAPE_JOB_CFG_FILENAME, raw.as_bytes()) | ||||
| } | ||||
|  | ||||
| // shell completion helper | ||||
|  | ||||
| /// List all tape job IDs | ||||
| pub fn complete_tape_job_id(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> { | ||||
|     match config() { | ||||
|         Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(), | ||||
|         Err(_) => return vec![], | ||||
|     } | ||||
| } | ||||
| @ -7,13 +7,10 @@ use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment}; | ||||
| use proxmox::api::router::SubdirMap; | ||||
| use proxmox::{list_subdirs_api_method, sortable}; | ||||
|  | ||||
| use pbs_api_types::{DATASTORE_SCHEMA, JOB_ID_SCHEMA, Authid, SyncJobConfig, SyncJobStatus}; | ||||
|  | ||||
| use crate::{ | ||||
|     api2::{ | ||||
|         types::{ | ||||
|             DATASTORE_SCHEMA, | ||||
|             JOB_ID_SCHEMA, | ||||
|             Authid, | ||||
|         }, | ||||
|         pull::do_sync_job, | ||||
|         config::sync::{ | ||||
|             check_sync_job_modify_access, | ||||
| @ -22,11 +19,7 @@ use crate::{ | ||||
|     }, | ||||
|     config::{ | ||||
|         cached_user_info::CachedUserInfo, | ||||
|         sync::{ | ||||
|             self, | ||||
|             SyncJobStatus, | ||||
|             SyncJobConfig, | ||||
|         }, | ||||
|         sync, | ||||
|     }, | ||||
|     server::{ | ||||
|         jobstate::{ | ||||
|  | ||||
| @ -7,11 +7,11 @@ use proxmox::api::router::SubdirMap; | ||||
| use proxmox::{list_subdirs_api_method, sortable}; | ||||
| use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment}; | ||||
|  | ||||
| use pbs_api_types::{VerificationJobConfig, VerificationJobStatus, JOB_ID_SCHEMA, Authid}; | ||||
|  | ||||
| use crate::{ | ||||
|     api2::types::{ | ||||
|         DATASTORE_SCHEMA, | ||||
|         JOB_ID_SCHEMA, | ||||
|         Authid, | ||||
|     }, | ||||
|     server::{ | ||||
|         do_verification_job, | ||||
| @ -22,16 +22,12 @@ use crate::{ | ||||
|         }, | ||||
|     }, | ||||
|     config::{ | ||||
|         verify, | ||||
|         acl::{ | ||||
|             PRIV_DATASTORE_AUDIT, | ||||
|             PRIV_DATASTORE_VERIFY, | ||||
|         }, | ||||
|         cached_user_info::CachedUserInfo, | ||||
|         verify::{ | ||||
|             self, | ||||
|             VerificationJobConfig, | ||||
|             VerificationJobStatus, | ||||
|         }, | ||||
|     }, | ||||
| }; | ||||
|  | ||||
| @ -48,7 +44,7 @@ use crate::{ | ||||
|     returns: { | ||||
|         description: "List configured jobs and their status (filtered by access)", | ||||
|         type: Array, | ||||
|         items: { type: verify::VerificationJobStatus }, | ||||
|         items: { type: VerificationJobStatus }, | ||||
|     }, | ||||
|     access: { | ||||
|         permission: &Permission::Anybody, | ||||
|  | ||||
| @ -8,11 +8,12 @@ use proxmox::http_err; | ||||
| use pbs_client::{HttpClient, HttpClientOptions}; | ||||
| use pbs_api_types::{ | ||||
|     REMOTE_ID_SCHEMA, REMOTE_PASSWORD_SCHEMA, Remote, RemoteConfig, RemoteConfigUpdater, | ||||
|     Authid, PROXMOX_CONFIG_DIGEST_SCHEMA, DataStoreListItem, | ||||
|     Authid, PROXMOX_CONFIG_DIGEST_SCHEMA, DataStoreListItem, SyncJobConfig, | ||||
| }; | ||||
|  | ||||
| use crate::config::cached_user_info::CachedUserInfo; | ||||
| use crate::config::acl::{PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY}; | ||||
| use crate::config::sync; | ||||
|  | ||||
| #[api( | ||||
|     input: { | ||||
| @ -247,8 +248,6 @@ pub fn update_remote( | ||||
| /// Remove a remote from the configuration file. | ||||
| pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> { | ||||
|  | ||||
|     use crate::config::sync::{self, SyncJobConfig}; | ||||
|  | ||||
|     let (sync_jobs, _) = sync::config()?; | ||||
|  | ||||
|     let job_list: Vec<SyncJobConfig>  = sync_jobs.convert_to_typed_array("sync")?; | ||||
|  | ||||
| @ -4,7 +4,11 @@ use ::serde::{Deserialize, Serialize}; | ||||
|  | ||||
| use proxmox::api::{api, Permission, Router, RpcEnvironment}; | ||||
|  | ||||
| use crate::api2::types::*; | ||||
| use pbs_api_types::{ | ||||
|     Authid, SyncJobConfig, | ||||
|     SINGLE_LINE_COMMENT_SCHEMA, JOB_ID_SCHEMA, REMOTE_ID_SCHEMA, DATASTORE_SCHEMA, | ||||
|     REMOVE_VANISHED_BACKUPS_SCHEMA, SYNC_SCHEDULE_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA, | ||||
| }; | ||||
|  | ||||
| use crate::config::acl::{ | ||||
|     PRIV_DATASTORE_AUDIT, | ||||
| @ -15,8 +19,7 @@ use crate::config::acl::{ | ||||
|     PRIV_REMOTE_READ, | ||||
| }; | ||||
|  | ||||
| use crate::config::cached_user_info::CachedUserInfo; | ||||
| use crate::config::sync::{self, SyncJobConfig}; | ||||
| use crate::config::{sync, cached_user_info::CachedUserInfo}; | ||||
| use pbs_config::open_backup_lockfile; | ||||
|  | ||||
| pub fn check_sync_job_read_access( | ||||
| @ -77,7 +80,7 @@ pub fn check_sync_job_modify_access( | ||||
|     returns: { | ||||
|         description: "List configured jobs.", | ||||
|         type: Array, | ||||
|         items: { type: sync::SyncJobConfig }, | ||||
|         items: { type: SyncJobConfig }, | ||||
|     }, | ||||
|     access: { | ||||
|         description: "Limited to sync job entries where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.", | ||||
| @ -154,7 +157,7 @@ pub fn create_sync_job( | ||||
|  | ||||
|     let _lock = open_backup_lockfile(sync::SYNC_CFG_LOCKFILE, None, true)?; | ||||
|  | ||||
|     let sync_job: sync::SyncJobConfig = serde_json::from_value(param)?; | ||||
|     let sync_job: SyncJobConfig = serde_json::from_value(param)?; | ||||
|     if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) { | ||||
|         bail!("permission check failed"); | ||||
|     } | ||||
| @ -182,7 +185,7 @@ pub fn create_sync_job( | ||||
|             }, | ||||
|         }, | ||||
|     }, | ||||
|     returns: { type: sync::SyncJobConfig }, | ||||
|     returns: { type: SyncJobConfig }, | ||||
|     access: { | ||||
|         description: "Limited to sync job entries where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.", | ||||
|         permission: &Permission::Anybody, | ||||
| @ -306,7 +309,7 @@ pub fn update_sync_job( | ||||
|         crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; | ||||
|     } | ||||
|  | ||||
|     let mut data: sync::SyncJobConfig = config.lookup("sync", &id)?; | ||||
|     let mut data: SyncJobConfig = config.lookup("sync", &id)?; | ||||
|  | ||||
|      if let Some(delete) = delete { | ||||
|         for delete_prop in delete { | ||||
|  | ||||
| @ -3,31 +3,27 @@ use serde_json::Value; | ||||
| use ::serde::{Deserialize, Serialize}; | ||||
|  | ||||
| use proxmox::api::{api, Router, RpcEnvironment, Permission}; | ||||
| use pbs_config::open_backup_lockfile; | ||||
|  | ||||
| use pbs_api_types::{ | ||||
|     Authid, | ||||
|     Userid, | ||||
|     TapeBackupJobConfig, | ||||
|     JOB_ID_SCHEMA, | ||||
|     DATASTORE_SCHEMA, | ||||
|     DRIVE_NAME_SCHEMA, | ||||
|     PROXMOX_CONFIG_DIGEST_SCHEMA, | ||||
|     SINGLE_LINE_COMMENT_SCHEMA, | ||||
|     MEDIA_POOL_NAME_SCHEMA, | ||||
|     SYNC_SCHEDULE_SCHEMA, | ||||
| }; | ||||
|  | ||||
| use crate::{ | ||||
|     api2::types::{ | ||||
|         Authid, | ||||
|         Userid, | ||||
|         JOB_ID_SCHEMA, | ||||
|         DATASTORE_SCHEMA, | ||||
|         DRIVE_NAME_SCHEMA, | ||||
|         PROXMOX_CONFIG_DIGEST_SCHEMA, | ||||
|         SINGLE_LINE_COMMENT_SCHEMA, | ||||
|         MEDIA_POOL_NAME_SCHEMA, | ||||
|         SYNC_SCHEDULE_SCHEMA, | ||||
|     }, | ||||
|     config::{ | ||||
|         self, | ||||
|         cached_user_info::CachedUserInfo, | ||||
|         acl::{ | ||||
|             PRIV_TAPE_AUDIT, | ||||
|             PRIV_TAPE_MODIFY, | ||||
|         }, | ||||
|         tape_job::{ | ||||
|             TAPE_JOB_CFG_LOCKFILE, | ||||
|             TapeBackupJobConfig, | ||||
|         } | ||||
|     }, | ||||
| }; | ||||
|  | ||||
| @ -53,7 +49,7 @@ pub fn list_tape_backup_jobs( | ||||
|     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | ||||
|     let user_info = CachedUserInfo::new()?; | ||||
|  | ||||
|     let (config, digest) = config::tape_job::config()?; | ||||
|     let (config, digest) = pbs_config::tape_job::config()?; | ||||
|  | ||||
|     let list = config.convert_to_typed_array::<TapeBackupJobConfig>("backup")?; | ||||
|  | ||||
| @ -89,9 +85,9 @@ pub fn create_tape_backup_job( | ||||
|     job: TapeBackupJobConfig, | ||||
|     _rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<(), Error> { | ||||
|     let _lock = open_backup_lockfile(TAPE_JOB_CFG_LOCKFILE, None, true)?; | ||||
|     let _lock = pbs_config::tape_job::lock()?; | ||||
|  | ||||
|     let (mut config, _digest) = config::tape_job::config()?; | ||||
|     let (mut config, _digest) = pbs_config::tape_job::config()?; | ||||
|  | ||||
|     if config.sections.get(&job.id).is_some() { | ||||
|         bail!("job '{}' already exists.", job.id); | ||||
| @ -99,7 +95,7 @@ pub fn create_tape_backup_job( | ||||
|  | ||||
|     config.set_data(&job.id, "backup", &job)?; | ||||
|  | ||||
|     config::tape_job::save_config(&config)?; | ||||
|     pbs_config::tape_job::save_config(&config)?; | ||||
|  | ||||
|     crate::server::jobstate::create_state_file("tape-backup-job", &job.id)?; | ||||
|  | ||||
| @ -125,7 +121,7 @@ pub fn read_tape_backup_job( | ||||
|     mut rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<TapeBackupJobConfig, Error> { | ||||
|  | ||||
|     let (config, digest) = config::tape_job::config()?; | ||||
|     let (config, digest) = pbs_config::tape_job::config()?; | ||||
|  | ||||
|     let job = config.lookup("backup", &id)?; | ||||
|  | ||||
| @ -232,9 +228,9 @@ pub fn update_tape_backup_job( | ||||
|     delete: Option<Vec<DeletableProperty>>, | ||||
|     digest: Option<String>, | ||||
| ) -> Result<(), Error> { | ||||
|     let _lock = open_backup_lockfile(TAPE_JOB_CFG_LOCKFILE, None, true)?; | ||||
|     let _lock = pbs_config::tape_job::lock()?; | ||||
|  | ||||
|     let (mut config, expected_digest) = config::tape_job::config()?; | ||||
|     let (mut config, expected_digest) = pbs_config::tape_job::config()?; | ||||
|  | ||||
|     let mut data: TapeBackupJobConfig = config.lookup("backup", &id)?; | ||||
|  | ||||
| @ -279,7 +275,7 @@ pub fn update_tape_backup_job( | ||||
|  | ||||
|     config.set_data(&id, "backup", &data)?; | ||||
|  | ||||
|     config::tape_job::save_config(&config)?; | ||||
|     pbs_config::tape_job::save_config(&config)?; | ||||
|  | ||||
|     if schedule_changed { | ||||
|         crate::server::jobstate::update_job_last_run_time("tape-backup-job", &id)?; | ||||
| @ -311,9 +307,9 @@ pub fn delete_tape_backup_job( | ||||
|     digest: Option<String>, | ||||
|     _rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<(), Error> { | ||||
|     let _lock = open_backup_lockfile(TAPE_JOB_CFG_LOCKFILE, None, true)?; | ||||
|     let _lock = pbs_config::tape_job::lock()?; | ||||
|  | ||||
|     let (mut config, expected_digest) = config::tape_job::config()?; | ||||
|     let (mut config, expected_digest) = pbs_config::tape_job::config()?; | ||||
|  | ||||
|     if let Some(ref digest) = digest { | ||||
|         let digest = proxmox::tools::hex_to_digest(digest)?; | ||||
| @ -327,7 +323,7 @@ pub fn delete_tape_backup_job( | ||||
|         Err(_) => { bail!("job '{}' does not exist.", id) }, | ||||
|     }; | ||||
|  | ||||
|     config::tape_job::save_config(&config)?; | ||||
|     pbs_config::tape_job::save_config(&config)?; | ||||
|  | ||||
|     crate::server::jobstate::remove_state_file("tape-backup-job", &id)?; | ||||
|  | ||||
|  | ||||
| @ -4,7 +4,12 @@ use ::serde::{Deserialize, Serialize}; | ||||
|  | ||||
| use proxmox::api::{api, Permission, Router, RpcEnvironment}; | ||||
|  | ||||
| use crate::api2::types::*; | ||||
| use pbs_api_types::{ | ||||
|     Authid, VerificationJobConfig, | ||||
|     SINGLE_LINE_COMMENT_SCHEMA, JOB_ID_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, | ||||
|     VERIFICATION_OUTDATED_AFTER_SCHEMA, VERIFICATION_SCHEDULE_SCHEMA, | ||||
|     DATASTORE_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA, | ||||
| }; | ||||
|  | ||||
| use crate::config::acl::{ | ||||
|     PRIV_DATASTORE_AUDIT, | ||||
| @ -12,7 +17,7 @@ use crate::config::acl::{ | ||||
| }; | ||||
|  | ||||
| use crate::config::cached_user_info::CachedUserInfo; | ||||
| use crate::config::verify::{self, VerificationJobConfig}; | ||||
| use crate::config::verify; | ||||
| use pbs_config::open_backup_lockfile; | ||||
|  | ||||
| #[api( | ||||
| @ -22,7 +27,7 @@ use pbs_config::open_backup_lockfile; | ||||
|     returns: { | ||||
|         description: "List configured jobs.", | ||||
|         type: Array, | ||||
|         items: { type: verify::VerificationJobConfig }, | ||||
|         items: { type: VerificationJobConfig }, | ||||
|     }, | ||||
|     access: { | ||||
|         permission: &Permission::Anybody, | ||||
| @ -97,7 +102,7 @@ pub fn create_verification_job( | ||||
|     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | ||||
|     let user_info = CachedUserInfo::new()?; | ||||
|  | ||||
|     let verification_job: verify::VerificationJobConfig = serde_json::from_value(param)?; | ||||
|     let verification_job: VerificationJobConfig = serde_json::from_value(param)?; | ||||
|  | ||||
|     user_info.check_privs(&auth_id, &["datastore", &verification_job.store], PRIV_DATASTORE_VERIFY, false)?; | ||||
|  | ||||
| @ -126,7 +131,7 @@ pub fn create_verification_job( | ||||
|             }, | ||||
|         }, | ||||
|     }, | ||||
|     returns: { type: verify::VerificationJobConfig }, | ||||
|     returns: { type: VerificationJobConfig }, | ||||
|     access: { | ||||
|         permission: &Permission::Anybody, | ||||
|         description: "Requires Datastore.Audit or Datastore.Verify on job's datastore.", | ||||
| @ -142,7 +147,7 @@ pub fn read_verification_job( | ||||
|  | ||||
|     let (config, digest) = verify::config()?; | ||||
|  | ||||
|     let verification_job: verify::VerificationJobConfig = config.lookup("verification", &id)?; | ||||
|     let verification_job: VerificationJobConfig = config.lookup("verification", &id)?; | ||||
|  | ||||
|     let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_VERIFY; | ||||
|     user_info.check_privs(&auth_id, &["datastore", &verification_job.store], required_privs, true)?; | ||||
| @ -239,7 +244,7 @@ pub fn update_verification_job( | ||||
|         crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; | ||||
|     } | ||||
|  | ||||
|     let mut data: verify::VerificationJobConfig = config.lookup("verification", &id)?; | ||||
|     let mut data: VerificationJobConfig = config.lookup("verification", &id)?; | ||||
|  | ||||
|     // check existing store | ||||
|     user_info.check_privs(&auth_id, &["datastore", &data.store], PRIV_DATASTORE_VERIFY, true)?; | ||||
| @ -318,7 +323,7 @@ pub fn delete_verification_job( | ||||
|  | ||||
|     let (mut config, expected_digest) = verify::config()?; | ||||
|  | ||||
|     let job: verify::VerificationJobConfig = config.lookup("verification", &id)?; | ||||
|     let job: VerificationJobConfig = config.lookup("verification", &id)?; | ||||
|     user_info.check_privs(&auth_id, &["datastore", &job.store], PRIV_DATASTORE_VERIFY, true)?; | ||||
|  | ||||
|     if let Some(ref digest) = digest { | ||||
|  | ||||
| @ -14,10 +14,10 @@ use pbs_api_types::{ | ||||
|  | ||||
| use crate::server::{WorkerTask, jobstate::Job, pull::pull_store}; | ||||
| use crate::backup::DataStore; | ||||
| use crate::api2::types::REMOVE_VANISHED_BACKUPS_SCHEMA; | ||||
|  | ||||
| use pbs_api_types::{SyncJobConfig, REMOVE_VANISHED_BACKUPS_SCHEMA}; | ||||
|  | ||||
| use crate::config::{ | ||||
|     sync::SyncJobConfig, | ||||
|     acl::{PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ}, | ||||
|     cached_user_info::CachedUserInfo, | ||||
| }; | ||||
|  | ||||
| @ -15,25 +15,23 @@ use proxmox::{ | ||||
|     }, | ||||
| }; | ||||
|  | ||||
| use pbs_api_types::{Authid, Userid}; | ||||
| use pbs_api_types::{ | ||||
|     Authid, Userid, TapeBackupJobConfig, TapeBackupJobSetup, TapeBackupJobStatus, MediaPoolConfig, | ||||
|     UPID_SCHEMA, JOB_ID_SCHEMA, | ||||
| }; | ||||
|  | ||||
| use pbs_datastore::{task_log, task_warn, StoreProgress}; | ||||
| use pbs_datastore::backup_info::{BackupDir, BackupInfo}; | ||||
| use pbs_datastore::task::TaskState; | ||||
|  | ||||
| use crate::{ | ||||
|     config::{ | ||||
|         self, | ||||
|         cached_user_info::CachedUserInfo, | ||||
|         acl::{ | ||||
|             PRIV_DATASTORE_READ, | ||||
|             PRIV_TAPE_AUDIT, | ||||
|             PRIV_TAPE_WRITE, | ||||
|         }, | ||||
|         tape_job::{ | ||||
|             TapeBackupJobConfig, | ||||
|             TapeBackupJobSetup, | ||||
|             TapeBackupJobStatus, | ||||
|         }, | ||||
|     }, | ||||
|     server::{ | ||||
|         lookup_user_email, | ||||
| @ -45,11 +43,6 @@ use crate::{ | ||||
|         }, | ||||
|     }, | ||||
|     backup::DataStore, | ||||
|     api2::types::{ | ||||
|         UPID_SCHEMA, | ||||
|         JOB_ID_SCHEMA, | ||||
|         MediaPoolConfig, | ||||
|     }, | ||||
|     server::WorkerTask, | ||||
|     tape::{ | ||||
|         TAPE_STATUS_DIR, | ||||
| @ -121,7 +114,7 @@ pub fn list_tape_backup_jobs( | ||||
|     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | ||||
|     let user_info = CachedUserInfo::new()?; | ||||
|  | ||||
|     let (job_config, digest) = config::tape_job::config()?; | ||||
|     let (job_config, digest) = pbs_config::tape_job::config()?; | ||||
|     let (pool_config, _pool_digest) = pbs_config::media_pool::config()?; | ||||
|     let (drive_config, _digest) = pbs_config::drive::config()?; | ||||
|  | ||||
| @ -310,7 +303,7 @@ pub fn run_tape_backup_job( | ||||
| ) -> Result<String, Error> { | ||||
|     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | ||||
|  | ||||
|     let (config, _digest) = config::tape_job::config()?; | ||||
|     let (config, _digest) = pbs_config::tape_job::config()?; | ||||
|     let backup_job: TapeBackupJobConfig = config.lookup("backup", &id)?; | ||||
|  | ||||
|     check_backup_permission( | ||||
|  | ||||
| @ -27,11 +27,6 @@ pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| { | ||||
| const_regex!{ | ||||
|     pub SYSTEMD_DATETIME_REGEX = r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$"; //  fixme: define in common_regex ? | ||||
|  | ||||
|     /// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID' | ||||
|     pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):"); | ||||
|     /// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:ACTUAL_JOB_ID' | ||||
|     pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):"); | ||||
|  | ||||
|     pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$"); | ||||
|  | ||||
|     pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$"); | ||||
| @ -202,10 +197,6 @@ pub struct AclListItem { | ||||
|     pub roleid: String, | ||||
| } | ||||
|  | ||||
| pub const UPID_SCHEMA: Schema = StringSchema::new("Unique Process/Task ID.") | ||||
|     .max_length(256) | ||||
|     .schema(); | ||||
|  | ||||
| pub const DATASTORE_MAP_SCHEMA: Schema = StringSchema::new("Datastore mapping.") | ||||
|     .format(&DATASTORE_MAP_FORMAT) | ||||
|     .min_length(3) | ||||
| @ -225,50 +216,6 @@ pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new( | ||||
|     .format(&ApiStringFormat::PropertyString(&DATASTORE_MAP_ARRAY_SCHEMA)) | ||||
|     .schema(); | ||||
|  | ||||
| pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new( | ||||
|     "Run sync job at specified schedule.") | ||||
|     .format(&ApiStringFormat::VerifyFn(pbs_systemd::time::verify_calendar_event)) | ||||
|     .type_text("<calendar-event>") | ||||
|     .schema(); | ||||
|  | ||||
| pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new( | ||||
|     "Run garbage collection job at specified schedule.") | ||||
|     .format(&ApiStringFormat::VerifyFn(pbs_systemd::time::verify_calendar_event)) | ||||
|     .type_text("<calendar-event>") | ||||
|     .schema(); | ||||
|  | ||||
| pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new( | ||||
|     "Run prune job at specified schedule.") | ||||
|     .format(&ApiStringFormat::VerifyFn(pbs_systemd::time::verify_calendar_event)) | ||||
|     .type_text("<calendar-event>") | ||||
|     .schema(); | ||||
|  | ||||
| pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new( | ||||
|     "Run verify job at specified schedule.") | ||||
|     .format(&ApiStringFormat::VerifyFn(pbs_systemd::time::verify_calendar_event)) | ||||
|     .type_text("<calendar-event>") | ||||
|     .schema(); | ||||
|  | ||||
| pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.") | ||||
|     .format(&PROXMOX_SAFE_ID_FORMAT) | ||||
|     .min_length(3) | ||||
|     .max_length(32) | ||||
|     .schema(); | ||||
|  | ||||
| pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( | ||||
|     "Delete vanished backups. This remove the local copy if the remote backup was deleted.") | ||||
|     .default(true) | ||||
|     .schema(); | ||||
|  | ||||
| pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new( | ||||
|     "Do not verify backups that are already verified if their verification is not outdated.") | ||||
|     .default(true) | ||||
|     .schema(); | ||||
|  | ||||
| pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new( | ||||
|     "Days after that a verification becomes outdated") | ||||
|     .minimum(1) | ||||
|     .schema(); | ||||
|  | ||||
| pub const HOSTNAME_SCHEMA: Schema = StringSchema::new("Hostname (as defined in RFC1123).") | ||||
|     .format(&HOSTNAME_FORMAT) | ||||
| @ -690,120 +637,6 @@ pub enum RRDTimeFrameResolution { | ||||
|     Year = 60*10080, | ||||
| } | ||||
|  | ||||
| #[api()] | ||||
| #[derive(Debug, Clone, Serialize, Deserialize)] | ||||
| #[serde(rename_all = "PascalCase")] | ||||
| /// Describes a package for which an update is available. | ||||
| pub struct APTUpdateInfo { | ||||
|     /// Package name | ||||
|     pub package: String, | ||||
|     /// Package title | ||||
|     pub title: String, | ||||
|     /// Package architecture | ||||
|     pub arch: String, | ||||
|     /// Human readable package description | ||||
|     pub description: String, | ||||
|     /// New version to be updated to | ||||
|     pub version: String, | ||||
|     /// Old version currently installed | ||||
|     pub old_version: String, | ||||
|     /// Package origin | ||||
|     pub origin: String, | ||||
|     /// Package priority in human-readable form | ||||
|     pub priority: String, | ||||
|     /// Package section | ||||
|     pub section: String, | ||||
|     /// URL under which the package's changelog can be retrieved | ||||
|     pub change_log_url: String, | ||||
|     /// Custom extra field for additional package information | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub extra_info: Option<String>, | ||||
| } | ||||
|  | ||||
| #[api()] | ||||
| #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] | ||||
| #[serde(rename_all = "lowercase")] | ||||
| /// When do we send notifications | ||||
| pub enum Notify { | ||||
|     /// Never send notification | ||||
|     Never, | ||||
|     /// Send notifications for failed and successful jobs | ||||
|     Always, | ||||
|     /// Send notifications for failed jobs only | ||||
|     Error, | ||||
| } | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         gc: { | ||||
|             type: Notify, | ||||
|             optional: true, | ||||
|         }, | ||||
|         verify: { | ||||
|             type: Notify, | ||||
|             optional: true, | ||||
|         }, | ||||
|         sync: { | ||||
|             type: Notify, | ||||
|             optional: true, | ||||
|         }, | ||||
|     }, | ||||
| )] | ||||
| #[derive(Debug, Serialize, Deserialize)] | ||||
| /// Datastore notify settings | ||||
| pub struct DatastoreNotify { | ||||
|     /// Garbage collection settings | ||||
|     pub gc: Option<Notify>, | ||||
|     /// Verify job setting | ||||
|     pub verify: Option<Notify>, | ||||
|     /// Sync job setting | ||||
|     pub sync: Option<Notify>, | ||||
| } | ||||
|  | ||||
| pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new( | ||||
|     "Datastore notification setting") | ||||
|     .format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA)) | ||||
|     .schema(); | ||||
|  | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         "next-run": { | ||||
|             description: "Estimated time of the next run (UNIX epoch).", | ||||
|             optional: true, | ||||
|             type: Integer, | ||||
|         }, | ||||
|         "last-run-state": { | ||||
|             description: "Result of the last run.", | ||||
|             optional: true, | ||||
|             type: String, | ||||
|         }, | ||||
|         "last-run-upid": { | ||||
|             description: "Task UPID of the last run.", | ||||
|             optional: true, | ||||
|             type: String, | ||||
|         }, | ||||
|         "last-run-endtime": { | ||||
|             description: "Endtime of the last run.", | ||||
|             optional: true, | ||||
|             type: Integer, | ||||
|         }, | ||||
|     } | ||||
| )] | ||||
| #[derive(Serialize,Deserialize,Default)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| /// Job Scheduling Status | ||||
| pub struct JobScheduleStatus { | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub next_run: Option<i64>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub last_run_state: Option<String>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub last_run_upid: Option<String>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub last_run_endtime: Option<i64>, | ||||
| } | ||||
|  | ||||
| #[api] | ||||
| #[derive(Serialize, Deserialize, Default)] | ||||
| #[serde(rename_all = "kebab-case")] | ||||
|  | ||||
| @ -56,7 +56,7 @@ fn main() -> Result<(), Error> { | ||||
|             "apidata.js" => generate_api_tree(), | ||||
|             "datastore.cfg" => dump_section_config(&config::datastore::CONFIG), | ||||
|             "tape.cfg" => dump_section_config(&pbs_config::drive::CONFIG), | ||||
|             "tape-job.cfg" => dump_section_config(&config::tape_job::CONFIG), | ||||
|             "tape-job.cfg" => dump_section_config(&pbs_config::tape_job::CONFIG), | ||||
|             "user.cfg" => dump_section_config(&config::user::CONFIG), | ||||
|             "remote.cfg" => dump_section_config(&pbs_config::remote::CONFIG), | ||||
|             "sync.cfg" => dump_section_config(&config::sync::CONFIG), | ||||
|  | ||||
| @ -32,7 +32,7 @@ use pbs_buildcfg::configdir; | ||||
| use pbs_systemd::time::{compute_next_event, parse_calendar_event}; | ||||
| use pbs_tools::logrotate::LogRotate; | ||||
|  | ||||
| use proxmox_backup::api2::types::Authid; | ||||
| use pbs_api_types::{Authid, TapeBackupJobConfig, VerificationJobConfig, SyncJobConfig}; | ||||
| use proxmox_backup::server; | ||||
| use proxmox_backup::auth_helpers::*; | ||||
| use proxmox_backup::tools::{ | ||||
| @ -520,12 +520,8 @@ async fn schedule_datastore_prune() { | ||||
|  | ||||
| async fn schedule_datastore_sync_jobs() { | ||||
|  | ||||
|     use proxmox_backup::config::sync::{ | ||||
|         self, | ||||
|         SyncJobConfig, | ||||
|     }; | ||||
|  | ||||
|     let config = match sync::config() { | ||||
|     let config = match proxmox_backup::config::sync::config() { | ||||
|         Err(err) => { | ||||
|             eprintln!("unable to read sync job config - {}", err); | ||||
|             return; | ||||
| @ -564,12 +560,7 @@ async fn schedule_datastore_sync_jobs() { | ||||
|  | ||||
| async fn schedule_datastore_verify_jobs() { | ||||
|  | ||||
|     use proxmox_backup::config::verify::{ | ||||
|         self, | ||||
|         VerificationJobConfig, | ||||
|     }; | ||||
|  | ||||
|     let config = match verify::config() { | ||||
|     let config = match proxmox_backup::config::verify::config() { | ||||
|         Err(err) => { | ||||
|             eprintln!("unable to read verification job config - {}", err); | ||||
|             return; | ||||
| @ -605,12 +596,7 @@ async fn schedule_datastore_verify_jobs() { | ||||
|  | ||||
| async fn schedule_tape_backup_jobs() { | ||||
|  | ||||
|     use proxmox_backup::config::tape_job::{ | ||||
|         self, | ||||
|         TapeBackupJobConfig, | ||||
|     }; | ||||
|  | ||||
|     let config = match tape_job::config() { | ||||
|     let config = match pbs_config::tape_job::config() { | ||||
|         Err(err) => { | ||||
|             eprintln!("unable to read tape job config - {}", err); | ||||
|             return; | ||||
|  | ||||
| @ -3,14 +3,12 @@ use serde_json::Value; | ||||
|  | ||||
| use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler}; | ||||
|  | ||||
| use pbs_api_types::JOB_ID_SCHEMA; | ||||
| use pbs_client::{connect_to_localhost, view_task_result}; | ||||
|  | ||||
| use proxmox_backup::{ | ||||
|     config, | ||||
|     api2::{ | ||||
|         self, | ||||
|         types::*, | ||||
|     }, | ||||
|     api2, | ||||
| }; | ||||
|  | ||||
| #[api( | ||||
| @ -112,17 +110,17 @@ pub fn backup_job_commands() -> CommandLineInterface { | ||||
|         .insert("show", | ||||
|                 CliCommand::new(&API_METHOD_SHOW_TAPE_BACKUP_JOB) | ||||
|                 .arg_param(&["id"]) | ||||
|                 .completion_cb("id", config::tape_job::complete_tape_job_id) | ||||
|                 .completion_cb("id", pbs_config::tape_job::complete_tape_job_id) | ||||
|         ) | ||||
|         .insert("run", | ||||
|                 CliCommand::new(&API_METHOD_RUN_TAPE_BACKUP_JOB) | ||||
|                 .arg_param(&["id"]) | ||||
|                 .completion_cb("id", config::tape_job::complete_tape_job_id) | ||||
|                 .completion_cb("id", pbs_config::tape_job::complete_tape_job_id) | ||||
|         ) | ||||
|         .insert("create", | ||||
|                 CliCommand::new(&api2::config::tape_backup_job::API_METHOD_CREATE_TAPE_BACKUP_JOB) | ||||
|                 .arg_param(&["id"]) | ||||
|                 .completion_cb("id", config::tape_job::complete_tape_job_id) | ||||
|                 .completion_cb("id", pbs_config::tape_job::complete_tape_job_id) | ||||
|                 .completion_cb("schedule", config::datastore::complete_calendar_event) | ||||
|                 .completion_cb("store", config::datastore::complete_datastore_name) | ||||
|                 .completion_cb("pool", pbs_config::media_pool::complete_pool_name) | ||||
| @ -131,7 +129,7 @@ pub fn backup_job_commands() -> CommandLineInterface { | ||||
|         .insert("update", | ||||
|                 CliCommand::new(&api2::config::tape_backup_job::API_METHOD_UPDATE_TAPE_BACKUP_JOB) | ||||
|                 .arg_param(&["id"]) | ||||
|                 .completion_cb("id", config::tape_job::complete_tape_job_id) | ||||
|                 .completion_cb("id", pbs_config::tape_job::complete_tape_job_id) | ||||
|                 .completion_cb("schedule", config::datastore::complete_calendar_event) | ||||
|                 .completion_cb("store", config::datastore::complete_datastore_name) | ||||
|                 .completion_cb("pool", pbs_config::media_pool::complete_pool_name) | ||||
| @ -140,7 +138,7 @@ pub fn backup_job_commands() -> CommandLineInterface { | ||||
|         .insert("remove", | ||||
|                 CliCommand::new(&api2::config::tape_backup_job::API_METHOD_DELETE_TAPE_BACKUP_JOB) | ||||
|                 .arg_param(&["id"]) | ||||
|                 .completion_cb("id", config::tape_job::complete_tape_job_id) | ||||
|                 .completion_cb("id", pbs_config::tape_job::complete_tape_job_id) | ||||
|         ); | ||||
|  | ||||
|     cmd_def.into() | ||||
|  | ||||
| @ -25,7 +25,6 @@ pub mod tfa; | ||||
| pub mod token_shadow; | ||||
| pub mod user; | ||||
| pub mod verify; | ||||
| pub mod tape_job; | ||||
|  | ||||
| /// Check configuration directory permissions | ||||
| /// | ||||
|  | ||||
| @ -1,10 +1,8 @@ | ||||
| use anyhow::{Error}; | ||||
| use lazy_static::lazy_static; | ||||
| use std::collections::HashMap; | ||||
| use serde::{Serialize, Deserialize}; | ||||
|  | ||||
| use proxmox::api::{ | ||||
|     api, | ||||
|     schema::*, | ||||
|     section_config::{ | ||||
|         SectionConfig, | ||||
| @ -13,82 +11,12 @@ use proxmox::api::{ | ||||
|     } | ||||
| }; | ||||
|  | ||||
| use crate::api2::types::*; | ||||
| use pbs_api_types::{JOB_ID_SCHEMA, SyncJobConfig}; | ||||
|  | ||||
| lazy_static! { | ||||
|     pub static ref CONFIG: SectionConfig = init(); | ||||
| } | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         id: { | ||||
|             schema: JOB_ID_SCHEMA, | ||||
|         }, | ||||
|         store: { | ||||
|            schema: DATASTORE_SCHEMA, | ||||
|         }, | ||||
|         "owner": { | ||||
|             type: Authid, | ||||
|             optional: true, | ||||
|         }, | ||||
|         remote: { | ||||
|             schema: REMOTE_ID_SCHEMA, | ||||
|         }, | ||||
|         "remote-store": { | ||||
|             schema: DATASTORE_SCHEMA, | ||||
|         }, | ||||
|         "remove-vanished": { | ||||
|             schema: REMOVE_VANISHED_BACKUPS_SCHEMA, | ||||
|             optional: true, | ||||
|         }, | ||||
|         comment: { | ||||
|             optional: true, | ||||
|             schema: SINGLE_LINE_COMMENT_SCHEMA, | ||||
|         }, | ||||
|         schedule: { | ||||
|             optional: true, | ||||
|             schema: SYNC_SCHEDULE_SCHEMA, | ||||
|         }, | ||||
|     } | ||||
| )] | ||||
| #[derive(Serialize,Deserialize,Clone)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| /// Sync Job | ||||
| pub struct SyncJobConfig { | ||||
|     pub id: String, | ||||
|     pub store: String, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub owner: Option<Authid>, | ||||
|     pub remote: String, | ||||
|     pub remote_store: String, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub remove_vanished: Option<bool>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub comment: Option<String>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub schedule: Option<String>, | ||||
| } | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         config: { | ||||
|             type: SyncJobConfig, | ||||
|         }, | ||||
|         status: { | ||||
|             type: JobScheduleStatus, | ||||
|         }, | ||||
|     }, | ||||
| )] | ||||
|  | ||||
| #[derive(Serialize,Deserialize)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| /// Status of Sync Job | ||||
| pub struct SyncJobStatus { | ||||
|     #[serde(flatten)] | ||||
|     pub config: SyncJobConfig, | ||||
|     #[serde(flatten)] | ||||
|     pub status: JobScheduleStatus, | ||||
| } | ||||
|  | ||||
| fn init() -> SectionConfig { | ||||
|     let obj_schema = match SyncJobConfig::API_SCHEMA { | ||||
|  | ||||
| @ -1,174 +0,0 @@ | ||||
| use anyhow::{Error}; | ||||
| use lazy_static::lazy_static; | ||||
| use std::collections::HashMap; | ||||
| use serde::{Serialize, Deserialize}; | ||||
|  | ||||
| use proxmox::api::{ | ||||
|     api, | ||||
|     schema::*, | ||||
|     section_config::{ | ||||
|         SectionConfig, | ||||
|         SectionConfigData, | ||||
|         SectionConfigPlugin, | ||||
|     } | ||||
| }; | ||||
|  | ||||
| use crate::api2::types::{ | ||||
|     Userid, | ||||
|     JOB_ID_SCHEMA, | ||||
|     DATASTORE_SCHEMA, | ||||
|     DRIVE_NAME_SCHEMA, | ||||
|     MEDIA_POOL_NAME_SCHEMA, | ||||
|     SINGLE_LINE_COMMENT_SCHEMA, | ||||
|     SYNC_SCHEDULE_SCHEMA, | ||||
|     JobScheduleStatus, | ||||
| }; | ||||
|  | ||||
| lazy_static! { | ||||
|     pub static ref CONFIG: SectionConfig = init(); | ||||
| } | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         store: { | ||||
|            schema: DATASTORE_SCHEMA, | ||||
|         }, | ||||
|         pool: { | ||||
|             schema: MEDIA_POOL_NAME_SCHEMA, | ||||
|         }, | ||||
|         drive: { | ||||
|             schema: DRIVE_NAME_SCHEMA, | ||||
|         }, | ||||
|         "eject-media": { | ||||
|             description: "Eject media upon job completion.", | ||||
|             type: bool, | ||||
|             optional: true, | ||||
|         }, | ||||
|         "export-media-set": { | ||||
|             description: "Export media set upon job completion.", | ||||
|             type: bool, | ||||
|             optional: true, | ||||
|         }, | ||||
|         "latest-only": { | ||||
|             description: "Backup latest snapshots only.", | ||||
|             type: bool, | ||||
|             optional: true, | ||||
|         }, | ||||
|         "notify-user": { | ||||
|             optional: true, | ||||
|             type: Userid, | ||||
|         }, | ||||
|     } | ||||
| )] | ||||
| #[derive(Serialize,Deserialize,Clone)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| /// Tape Backup Job Setup | ||||
| pub struct TapeBackupJobSetup { | ||||
|     pub store: String, | ||||
|     pub pool: String, | ||||
|     pub drive: String, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub eject_media: Option<bool>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub export_media_set: Option<bool>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub latest_only: Option<bool>, | ||||
|     /// Send job email notification to this user | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub notify_user: Option<Userid>, | ||||
| } | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         id: { | ||||
|             schema: JOB_ID_SCHEMA, | ||||
|         }, | ||||
|         setup: { | ||||
|             type: TapeBackupJobSetup, | ||||
|         }, | ||||
|         comment: { | ||||
|             optional: true, | ||||
|             schema: SINGLE_LINE_COMMENT_SCHEMA, | ||||
|         }, | ||||
|         schedule: { | ||||
|             optional: true, | ||||
|             schema: SYNC_SCHEDULE_SCHEMA, | ||||
|         }, | ||||
|     } | ||||
| )] | ||||
| #[derive(Serialize,Deserialize,Clone)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| /// Tape Backup Job | ||||
| pub struct TapeBackupJobConfig { | ||||
|     pub id: String, | ||||
|     #[serde(flatten)] | ||||
|     pub setup: TapeBackupJobSetup, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub comment: Option<String>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub schedule: Option<String>, | ||||
| } | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         config: { | ||||
|             type: TapeBackupJobConfig, | ||||
|         }, | ||||
|         status: { | ||||
|             type: JobScheduleStatus, | ||||
|         }, | ||||
|     }, | ||||
| )] | ||||
| #[derive(Serialize,Deserialize)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| /// Status of Tape Backup Job | ||||
| pub struct TapeBackupJobStatus { | ||||
|     #[serde(flatten)] | ||||
|     pub config: TapeBackupJobConfig, | ||||
|     #[serde(flatten)] | ||||
|     pub status: JobScheduleStatus, | ||||
|     /// Next tape used (best guess) | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub next_media_label: Option<String>, | ||||
| } | ||||
|  | ||||
| fn init() -> SectionConfig { | ||||
|     let obj_schema = match TapeBackupJobConfig::API_SCHEMA { | ||||
|         Schema::AllOf(ref allof_schema) => allof_schema, | ||||
|         _ => unreachable!(), | ||||
|     }; | ||||
|  | ||||
|     let plugin = SectionConfigPlugin::new("backup".to_string(), Some(String::from("id")), obj_schema); | ||||
|     let mut config = SectionConfig::new(&JOB_ID_SCHEMA); | ||||
|     config.register_plugin(plugin); | ||||
|  | ||||
|     config | ||||
| } | ||||
|  | ||||
| pub const TAPE_JOB_CFG_FILENAME: &str = "/etc/proxmox-backup/tape-job.cfg"; | ||||
| pub const TAPE_JOB_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.tape-job.lck"; | ||||
|  | ||||
| pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> { | ||||
|  | ||||
|     let content = proxmox::tools::fs::file_read_optional_string(TAPE_JOB_CFG_FILENAME)? | ||||
|         .unwrap_or_else(|| "".to_string()); | ||||
|  | ||||
|     let digest = openssl::sha::sha256(content.as_bytes()); | ||||
|     let data = CONFIG.parse(TAPE_JOB_CFG_FILENAME, &content)?; | ||||
|     Ok((data, digest)) | ||||
| } | ||||
|  | ||||
| pub fn save_config(config: &SectionConfigData) -> Result<(), Error> { | ||||
|     let raw = CONFIG.write(TAPE_JOB_CFG_FILENAME, &config)?; | ||||
|     pbs_config::replace_backup_config(TAPE_JOB_CFG_FILENAME, raw.as_bytes()) | ||||
| } | ||||
|  | ||||
| // shell completion helper | ||||
|  | ||||
| /// List all tape job IDs | ||||
| pub fn complete_tape_job_id(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> { | ||||
|     match config() { | ||||
|         Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(), | ||||
|         Err(_) => return vec![], | ||||
|     } | ||||
| } | ||||
| @ -1,10 +1,8 @@ | ||||
| use anyhow::{Error}; | ||||
| use lazy_static::lazy_static; | ||||
| use std::collections::HashMap; | ||||
| use serde::{Serialize, Deserialize}; | ||||
|  | ||||
| use proxmox::api::{ | ||||
|     api, | ||||
|     schema::*, | ||||
|     section_config::{ | ||||
|         SectionConfig, | ||||
| @ -13,81 +11,12 @@ use proxmox::api::{ | ||||
|     } | ||||
| }; | ||||
|  | ||||
| use crate::api2::types::*; | ||||
| use pbs_api_types::{JOB_ID_SCHEMA, VerificationJobConfig}; | ||||
|  | ||||
| lazy_static! { | ||||
|     pub static ref CONFIG: SectionConfig = init(); | ||||
| } | ||||
|  | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         id: { | ||||
|             schema: JOB_ID_SCHEMA, | ||||
|         }, | ||||
|         store: { | ||||
|             schema: DATASTORE_SCHEMA, | ||||
|         }, | ||||
|         "ignore-verified": { | ||||
|             optional: true, | ||||
|             schema: IGNORE_VERIFIED_BACKUPS_SCHEMA, | ||||
|         }, | ||||
|         "outdated-after": { | ||||
|             optional: true, | ||||
|             schema: VERIFICATION_OUTDATED_AFTER_SCHEMA, | ||||
|         }, | ||||
|         comment: { | ||||
|             optional: true, | ||||
|             schema: SINGLE_LINE_COMMENT_SCHEMA, | ||||
|         }, | ||||
|         schedule: { | ||||
|             optional: true, | ||||
|             schema: VERIFICATION_SCHEDULE_SCHEMA, | ||||
|         }, | ||||
|     } | ||||
| )] | ||||
| #[derive(Serialize,Deserialize)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| /// Verification Job | ||||
| pub struct VerificationJobConfig { | ||||
|     /// unique ID to address this job | ||||
|     pub id: String, | ||||
|     /// the datastore ID this verificaiton job affects | ||||
|     pub store: String, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     /// if not set to false, check the age of the last snapshot verification to filter | ||||
|     /// out recent ones, depending on 'outdated_after' configuration. | ||||
|     pub ignore_verified: Option<bool>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     /// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false. | ||||
|     pub outdated_after: Option<i64>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     pub comment: Option<String>, | ||||
|     #[serde(skip_serializing_if="Option::is_none")] | ||||
|     /// when to schedule this job in calendar event notation | ||||
|     pub schedule: Option<String>, | ||||
| } | ||||
|  | ||||
| #[api( | ||||
|     properties: { | ||||
|         config: { | ||||
|             type: VerificationJobConfig, | ||||
|         }, | ||||
|         status: { | ||||
|             type: JobScheduleStatus, | ||||
|         }, | ||||
|     }, | ||||
| )] | ||||
| #[derive(Serialize,Deserialize)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| /// Status of Verification Job | ||||
| pub struct VerificationJobStatus { | ||||
|     #[serde(flatten)] | ||||
|     pub config: VerificationJobConfig, | ||||
|     #[serde(flatten)] | ||||
|     pub status: JobScheduleStatus, | ||||
| } | ||||
|  | ||||
| fn init() -> SectionConfig { | ||||
|     let obj_schema = match VerificationJobConfig::API_SCHEMA { | ||||
|         Schema::Object(ref obj_schema) => obj_schema, | ||||
|  | ||||
| @ -8,19 +8,14 @@ use proxmox::api::schema::{ApiType, parse_property_string}; | ||||
| use proxmox::try_block; | ||||
|  | ||||
| use pbs_tools::format::HumanByte; | ||||
| use pbs_api_types::{ | ||||
|     TapeBackupJobSetup, SyncJobConfig, VerificationJobConfig, | ||||
|     APTUpdateInfo, GarbageCollectionStatus, | ||||
|     Userid, Notify, DatastoreNotify, | ||||
| }; | ||||
|  | ||||
| use crate::{ | ||||
|     config::datastore::DataStoreConfig, | ||||
|     config::verify::VerificationJobConfig, | ||||
|     config::sync::SyncJobConfig, | ||||
|     config::tape_job::TapeBackupJobSetup, | ||||
|     api2::types::{ | ||||
|         APTUpdateInfo, | ||||
|         GarbageCollectionStatus, | ||||
|         Userid, | ||||
|         Notify, | ||||
|         DatastoreNotify, | ||||
|     }, | ||||
| }; | ||||
|  | ||||
| const GC_OK_TEMPLATE: &str = r###" | ||||
|  | ||||
| @ -1,12 +1,11 @@ | ||||
| use anyhow::{format_err, Error}; | ||||
|  | ||||
| use pbs_datastore::task_log; | ||||
| use pbs_api_types::{Authid, VerificationJobConfig}; | ||||
|  | ||||
| use crate::{ | ||||
|     server::WorkerTask, | ||||
|     api2::types::*, | ||||
|     server::jobstate::Job, | ||||
|     config::verify::VerificationJobConfig, | ||||
|     backup::{ | ||||
|         DataStore, | ||||
|         verify_filter, | ||||
|  | ||||
		Reference in New Issue
	
	Block a user