59af9ca98e
by requiring - Datastore.Backup permission for target datastore - Remote.Read permission for source remote/datastore - Datastore.Prune if vanished snapshots should be removed - Datastore.Modify if another user should own the freshly synced snapshots reading a sync job entry only requires knowing about both the source remote and the target datastore. note that this does not affect the Authid used to authenticate with the remote, which of course also needs permissions to access the source datastore. Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
218 lines
6.1 KiB
Rust
218 lines
6.1 KiB
Rust
use anyhow::{Error};
|
|
use lazy_static::lazy_static;
|
|
use std::collections::HashMap;
|
|
use serde::{Serialize, Deserialize};
|
|
|
|
use proxmox::api::{
|
|
api,
|
|
schema::*,
|
|
section_config::{
|
|
SectionConfig,
|
|
SectionConfigData,
|
|
SectionConfigPlugin,
|
|
}
|
|
};
|
|
|
|
use proxmox::tools::{fs::replace_file, fs::CreateOptions};
|
|
|
|
use crate::api2::types::*;
|
|
|
|
lazy_static! {
|
|
static ref CONFIG: SectionConfig = init();
|
|
}
|
|
|
|
#[api(
|
|
properties: {
|
|
id: {
|
|
schema: JOB_ID_SCHEMA,
|
|
},
|
|
store: {
|
|
schema: DATASTORE_SCHEMA,
|
|
},
|
|
"owner": {
|
|
type: Authid,
|
|
optional: true,
|
|
},
|
|
remote: {
|
|
schema: REMOTE_ID_SCHEMA,
|
|
},
|
|
"remote-store": {
|
|
schema: DATASTORE_SCHEMA,
|
|
},
|
|
"remove-vanished": {
|
|
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
|
optional: true,
|
|
},
|
|
comment: {
|
|
optional: true,
|
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
},
|
|
schedule: {
|
|
optional: true,
|
|
schema: SYNC_SCHEDULE_SCHEMA,
|
|
},
|
|
}
|
|
)]
|
|
#[serde(rename_all="kebab-case")]
|
|
#[derive(Serialize,Deserialize,Clone)]
|
|
/// Sync Job
|
|
pub struct SyncJobConfig {
|
|
pub id: String,
|
|
pub store: String,
|
|
#[serde(skip_serializing_if="Option::is_none")]
|
|
pub owner: Option<Authid>,
|
|
pub remote: String,
|
|
pub remote_store: String,
|
|
#[serde(skip_serializing_if="Option::is_none")]
|
|
pub remove_vanished: Option<bool>,
|
|
#[serde(skip_serializing_if="Option::is_none")]
|
|
pub comment: Option<String>,
|
|
#[serde(skip_serializing_if="Option::is_none")]
|
|
pub schedule: Option<String>,
|
|
}
|
|
|
|
impl From<&SyncJobStatus> for SyncJobConfig {
|
|
fn from(job_status: &SyncJobStatus) -> Self {
|
|
Self {
|
|
id: job_status.id.clone(),
|
|
store: job_status.store.clone(),
|
|
owner: job_status.owner.clone(),
|
|
remote: job_status.remote.clone(),
|
|
remote_store: job_status.remote_store.clone(),
|
|
remove_vanished: job_status.remove_vanished.clone(),
|
|
comment: job_status.comment.clone(),
|
|
schedule: job_status.schedule.clone(),
|
|
}
|
|
}
|
|
}
|
|
|
|
// FIXME: generate duplicate schemas/structs from one listing?
|
|
#[api(
|
|
properties: {
|
|
id: {
|
|
schema: JOB_ID_SCHEMA,
|
|
},
|
|
store: {
|
|
schema: DATASTORE_SCHEMA,
|
|
},
|
|
owner: {
|
|
type: Authid,
|
|
optional: true,
|
|
},
|
|
remote: {
|
|
schema: REMOTE_ID_SCHEMA,
|
|
},
|
|
"remote-store": {
|
|
schema: DATASTORE_SCHEMA,
|
|
},
|
|
"remove-vanished": {
|
|
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
|
optional: true,
|
|
},
|
|
comment: {
|
|
optional: true,
|
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
},
|
|
schedule: {
|
|
optional: true,
|
|
schema: SYNC_SCHEDULE_SCHEMA,
|
|
},
|
|
"next-run": {
|
|
description: "Estimated time of the next run (UNIX epoch).",
|
|
optional: true,
|
|
type: Integer,
|
|
},
|
|
"last-run-state": {
|
|
description: "Result of the last run.",
|
|
optional: true,
|
|
type: String,
|
|
},
|
|
"last-run-upid": {
|
|
description: "Task UPID of the last run.",
|
|
optional: true,
|
|
type: String,
|
|
},
|
|
"last-run-endtime": {
|
|
description: "Endtime of the last run.",
|
|
optional: true,
|
|
type: Integer,
|
|
},
|
|
}
|
|
)]
|
|
#[serde(rename_all="kebab-case")]
|
|
#[derive(Serialize,Deserialize)]
|
|
/// Status of Sync Job
|
|
pub struct SyncJobStatus {
|
|
pub id: String,
|
|
pub store: String,
|
|
#[serde(skip_serializing_if="Option::is_none")]
|
|
pub owner: Option<Authid>,
|
|
pub remote: String,
|
|
pub remote_store: String,
|
|
#[serde(skip_serializing_if="Option::is_none")]
|
|
pub remove_vanished: Option<bool>,
|
|
#[serde(skip_serializing_if="Option::is_none")]
|
|
pub comment: Option<String>,
|
|
#[serde(skip_serializing_if="Option::is_none")]
|
|
pub schedule: Option<String>,
|
|
#[serde(skip_serializing_if="Option::is_none")]
|
|
pub next_run: Option<i64>,
|
|
#[serde(skip_serializing_if="Option::is_none")]
|
|
pub last_run_state: Option<String>,
|
|
#[serde(skip_serializing_if="Option::is_none")]
|
|
pub last_run_upid: Option<String>,
|
|
#[serde(skip_serializing_if="Option::is_none")]
|
|
pub last_run_endtime: Option<i64>,
|
|
}
|
|
|
|
fn init() -> SectionConfig {
|
|
let obj_schema = match SyncJobConfig::API_SCHEMA {
|
|
Schema::Object(ref obj_schema) => obj_schema,
|
|
_ => unreachable!(),
|
|
};
|
|
|
|
let plugin = SectionConfigPlugin::new("sync".to_string(), Some(String::from("id")), obj_schema);
|
|
let mut config = SectionConfig::new(&JOB_ID_SCHEMA);
|
|
config.register_plugin(plugin);
|
|
|
|
config
|
|
}
|
|
|
|
pub const SYNC_CFG_FILENAME: &str = "/etc/proxmox-backup/sync.cfg";
|
|
pub const SYNC_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.sync.lck";
|
|
|
|
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
|
|
|
let content = proxmox::tools::fs::file_read_optional_string(SYNC_CFG_FILENAME)?;
|
|
let content = content.unwrap_or(String::from(""));
|
|
|
|
let digest = openssl::sha::sha256(content.as_bytes());
|
|
let data = CONFIG.parse(SYNC_CFG_FILENAME, &content)?;
|
|
Ok((data, digest))
|
|
}
|
|
|
|
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
|
let raw = CONFIG.write(SYNC_CFG_FILENAME, &config)?;
|
|
|
|
let backup_user = crate::backup::backup_user()?;
|
|
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0640);
|
|
// set the correct owner/group/permissions while saving file
|
|
// owner(rw) = root, group(r)= backup
|
|
let options = CreateOptions::new()
|
|
.perm(mode)
|
|
.owner(nix::unistd::ROOT)
|
|
.group(backup_user.gid);
|
|
|
|
replace_file(SYNC_CFG_FILENAME, raw.as_bytes(), options)?;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
// shell completion helper
|
|
pub fn complete_sync_job_id(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
|
match config() {
|
|
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
|
|
Err(_) => return vec![],
|
|
}
|
|
}
|