move datastore config to pbs_config workspace
This commit is contained in:
parent
ba3d7e19fb
commit
e7d4be9d85
|
@ -3,14 +3,16 @@ use serde::{Deserialize, Serialize};
|
|||
use proxmox::api::api;
|
||||
use proxmox::api::schema::{
|
||||
ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType, Schema,
|
||||
StringSchema,
|
||||
StringSchema, Updater,
|
||||
};
|
||||
|
||||
use proxmox::const_regex;
|
||||
|
||||
use crate::{
|
||||
PROXMOX_SAFE_ID_FORMAT, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, CryptMode, UPID,
|
||||
Fingerprint, Authid,
|
||||
Fingerprint, Userid, Authid,
|
||||
GC_SCHEDULE_SCHEMA, DATASTORE_NOTIFY_STRING_SCHEMA, PRUNE_SCHEDULE_SCHEMA,
|
||||
|
||||
};
|
||||
|
||||
const_regex!{
|
||||
|
@ -31,6 +33,11 @@ const_regex!{
|
|||
|
||||
pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
|
||||
|
||||
pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name")
|
||||
.min_length(1)
|
||||
.max_length(4096)
|
||||
.schema();
|
||||
|
||||
pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema = StringSchema::new("Backup archive name.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.schema();
|
||||
|
@ -84,6 +91,130 @@ pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new(
|
|||
.format(&ApiStringFormat::PropertyString(&DATASTORE_MAP_ARRAY_SCHEMA))
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new("Number of daily backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema =
|
||||
IntegerSchema::new("Number of hourly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new("Number of backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema =
|
||||
IntegerSchema::new("Number of monthly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema =
|
||||
IntegerSchema::new("Number of weekly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema =
|
||||
IntegerSchema::new("Number of yearly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: DIR_NAME_SCHEMA,
|
||||
},
|
||||
"notify-user": {
|
||||
optional: true,
|
||||
type: Userid,
|
||||
},
|
||||
"notify": {
|
||||
optional: true,
|
||||
schema: DATASTORE_NOTIFY_STRING_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
"gc-schedule": {
|
||||
optional: true,
|
||||
schema: GC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"prune-schedule": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"keep-last": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||
},
|
||||
"keep-hourly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_HOURLY,
|
||||
},
|
||||
"keep-daily": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_DAILY,
|
||||
},
|
||||
"keep-weekly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_WEEKLY,
|
||||
},
|
||||
"keep-monthly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_MONTHLY,
|
||||
},
|
||||
"keep-yearly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_YEARLY,
|
||||
},
|
||||
"verify-new": {
|
||||
description: "If enabled, all new backups will be verified right after completion.",
|
||||
optional: true,
|
||||
type: bool,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize,Deserialize,Updater)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Datastore configuration properties.
|
||||
pub struct DataStoreConfig {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
#[updater(skip)]
|
||||
pub path: String,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub gc_schedule: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub prune_schedule: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_last: Option<u64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_hourly: Option<u64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_daily: Option<u64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_weekly: Option<u64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_monthly: Option<u64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_yearly: Option<u64>,
|
||||
/// If enabled, all backups will be verified right after completion.
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub verify_new: Option<bool>,
|
||||
/// Send job email notification to this user
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub notify_user: Option<Userid>,
|
||||
/// Send notification only for job errors
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub notify: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
store: {
|
||||
|
|
|
@ -5,8 +5,7 @@ use anyhow::bail;
|
|||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::schema::{
|
||||
ApiStringFormat, ApiType, ArraySchema, IntegerSchema, ReturnType, Schema,
|
||||
StringSchema,
|
||||
ApiStringFormat, ApiType, ArraySchema, ReturnType, Schema, StringSchema,
|
||||
};
|
||||
use proxmox::const_regex;
|
||||
use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE};
|
||||
|
@ -246,34 +245,6 @@ pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema =
|
|||
.format(&FINGERPRINT_SHA256_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new("Number of daily backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema =
|
||||
IntegerSchema::new("Number of hourly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new("Number of backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema =
|
||||
IntegerSchema::new("Number of monthly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema =
|
||||
IntegerSchema::new("Number of weekly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema =
|
||||
IntegerSchema::new("Number of yearly backups to keep.")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
||||
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
use anyhow::{Error};
|
||||
use lazy_static::lazy_static;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use proxmox::api::{
|
||||
schema::{ApiType, Schema},
|
||||
section_config::{
|
||||
SectionConfig,
|
||||
SectionConfigData,
|
||||
SectionConfigPlugin,
|
||||
}
|
||||
};
|
||||
|
||||
use pbs_api_types::{DataStoreConfig, DATASTORE_SCHEMA};
|
||||
|
||||
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG: SectionConfig = init();
|
||||
}
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let obj_schema = match DataStoreConfig::API_SCHEMA {
|
||||
Schema::Object(ref obj_schema) => obj_schema,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let plugin = SectionConfigPlugin::new("datastore".to_string(), Some(String::from("name")), obj_schema);
|
||||
let mut config = SectionConfig::new(&DATASTORE_SCHEMA);
|
||||
config.register_plugin(plugin);
|
||||
|
||||
config
|
||||
}
|
||||
|
||||
pub const DATASTORE_CFG_FILENAME: &str = "/etc/proxmox-backup/datastore.cfg";
|
||||
pub const DATASTORE_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.datastore.lck";
|
||||
|
||||
/// Get exclusive lock
|
||||
pub fn lock_config() -> Result<BackupLockGuard, Error> {
|
||||
open_backup_lockfile(DATASTORE_CFG_LOCKFILE, None, true)
|
||||
}
|
||||
|
||||
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||
|
||||
let content = proxmox::tools::fs::file_read_optional_string(DATASTORE_CFG_FILENAME)?
|
||||
.unwrap_or_else(|| "".to_string());
|
||||
|
||||
let digest = openssl::sha::sha256(content.as_bytes());
|
||||
let data = CONFIG.parse(DATASTORE_CFG_FILENAME, &content)?;
|
||||
Ok((data, digest))
|
||||
}
|
||||
|
||||
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||
let raw = CONFIG.write(DATASTORE_CFG_FILENAME, &config)?;
|
||||
replace_backup_config(DATASTORE_CFG_FILENAME, raw.as_bytes())
|
||||
}
|
||||
|
||||
// shell completion helper
|
||||
pub fn complete_datastore_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
match config() {
|
||||
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
|
||||
Err(_) => return vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn complete_acl_path(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
let mut list = Vec::new();
|
||||
|
||||
list.push(String::from("/"));
|
||||
list.push(String::from("/datastore"));
|
||||
list.push(String::from("/datastore/"));
|
||||
|
||||
if let Ok((data, _digest)) = config() {
|
||||
for id in data.sections.keys() {
|
||||
list.push(format!("/datastore/{}", id));
|
||||
}
|
||||
}
|
||||
|
||||
list.push(String::from("/remote"));
|
||||
list.push(String::from("/remote/"));
|
||||
|
||||
list.push(String::from("/tape"));
|
||||
list.push(String::from("/tape/"));
|
||||
list.push(String::from("/tape/drive"));
|
||||
list.push(String::from("/tape/drive/"));
|
||||
list.push(String::from("/tape/changer"));
|
||||
list.push(String::from("/tape/changer/"));
|
||||
list.push(String::from("/tape/pool"));
|
||||
list.push(String::from("/tape/pool/"));
|
||||
list.push(String::from("/tape/job"));
|
||||
list.push(String::from("/tape/job/"));
|
||||
|
||||
list
|
||||
}
|
||||
|
||||
pub fn complete_calendar_event(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
// just give some hints about possible values
|
||||
["minutely", "hourly", "daily", "mon..fri", "0:0"]
|
||||
.iter().map(|s| String::from(*s)).collect()
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
pub mod acl;
|
||||
mod cached_user_info;
|
||||
pub use cached_user_info::CachedUserInfo;
|
||||
pub mod datastore;
|
||||
pub mod domains;
|
||||
pub mod drive;
|
||||
pub mod key_config;
|
||||
|
|
|
@ -50,6 +50,7 @@ use pbs_datastore::prune::{compute_prune_info, PruneOptions};
|
|||
use pbs_tools::blocking::WrappedReaderStream;
|
||||
use pbs_tools::stream::{AsyncReaderStream, AsyncChannelWriter};
|
||||
use pbs_tools::json::{required_integer_param, required_string_param};
|
||||
use pbs_config::CachedUserInfo;
|
||||
|
||||
use crate::api2::types::{DataStoreStatus, RRDMode, RRDTimeFrameResolution};
|
||||
use crate::api2::node::rrd::create_value_from_rrd;
|
||||
|
@ -57,8 +58,6 @@ use crate::backup::{
|
|||
check_backup_owner, verify_all_backups, verify_backup_group, verify_backup_dir, verify_filter,
|
||||
DataStore, LocalChunkReader,
|
||||
};
|
||||
use crate::config::datastore;
|
||||
use pbs_config::CachedUserInfo;
|
||||
|
||||
use crate::server::{jobstate::Job, WorkerTask};
|
||||
|
||||
|
@ -1050,7 +1049,7 @@ pub fn get_datastore_list(
|
|||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<DataStoreListItem>, Error> {
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
let (config, _digest) = pbs_config::datastore::config()?;
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
|
|
@ -15,6 +15,7 @@ use pbs_api_types::{
|
|||
Authid, DatastoreNotify,
|
||||
DATASTORE_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||
PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY,
|
||||
DataStoreConfig, DataStoreConfigUpdater,
|
||||
};
|
||||
|
||||
use crate::api2::config::sync::delete_sync_job;
|
||||
|
@ -25,7 +26,7 @@ use crate::api2::admin::{
|
|||
verify::list_verification_jobs,
|
||||
};
|
||||
use pbs_config::CachedUserInfo;
|
||||
use crate::config::datastore::{self, DataStoreConfig, DataStoreConfigUpdater};
|
||||
|
||||
use crate::server::{jobstate, WorkerTask};
|
||||
|
||||
#[api(
|
||||
|
@ -47,7 +48,7 @@ pub fn list_datastores(
|
|||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<DataStoreConfig>, Error> {
|
||||
|
||||
let (config, digest) = datastore::config()?;
|
||||
let (config, digest) = pbs_config::datastore::config()?;
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
@ -76,7 +77,7 @@ pub(crate) fn do_create_datastore(
|
|||
|
||||
config.set_data(&datastore.name, "datastore", &datastore)?;
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
pbs_config::datastore::save_config(&config)?;
|
||||
|
||||
jobstate::create_state_file("prune", &datastore.name)?;
|
||||
jobstate::create_state_file("garbage_collection", &datastore.name)?;
|
||||
|
@ -104,9 +105,9 @@ pub fn create_datastore(
|
|||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let lock = datastore::lock_config()?;
|
||||
let lock = pbs_config::datastore::lock_config()?;
|
||||
|
||||
let (section_config, _digest) = datastore::config()?;
|
||||
let (section_config, _digest) = pbs_config::datastore::config()?;
|
||||
|
||||
if section_config.sections.get(&config.name).is_some() {
|
||||
bail!("datastore '{}' already exists.", config.name);
|
||||
|
@ -141,7 +142,7 @@ pub fn read_datastore(
|
|||
name: String,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<DataStoreConfig, Error> {
|
||||
let (config, digest) = datastore::config()?;
|
||||
let (config, digest) = pbs_config::datastore::config()?;
|
||||
|
||||
let store_config = config.lookup("datastore", &name)?;
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
@ -218,10 +219,10 @@ pub fn update_datastore(
|
|||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = datastore::lock_config()?;
|
||||
let _lock = pbs_config::datastore::lock_config()?;
|
||||
|
||||
// pass/compare digest
|
||||
let (mut config, expected_digest) = datastore::config()?;
|
||||
let (mut config, expected_digest) = pbs_config::datastore::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
|
@ -292,7 +293,7 @@ pub fn update_datastore(
|
|||
|
||||
config.set_data(&name, "datastore", &data)?;
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
pbs_config::datastore::save_config(&config)?;
|
||||
|
||||
// we want to reset the statefiles, to avoid an immediate action in some cases
|
||||
// (e.g. going from monthly to weekly in the second week of the month)
|
||||
|
@ -338,9 +339,9 @@ pub async fn delete_datastore(
|
|||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = datastore::lock_config()?;
|
||||
let _lock = pbs_config::datastore::lock_config()?;
|
||||
|
||||
let (mut config, expected_digest) = datastore::config()?;
|
||||
let (mut config, expected_digest) = pbs_config::datastore::config()?;
|
||||
|
||||
if let Some(ref digest) = digest {
|
||||
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||
|
@ -366,7 +367,7 @@ pub async fn delete_datastore(
|
|||
}
|
||||
}
|
||||
|
||||
datastore::save_config(&config)?;
|
||||
pbs_config::datastore::save_config(&config)?;
|
||||
|
||||
// ignore errors
|
||||
let _ = jobstate::remove_state_file("prune", &name);
|
||||
|
|
|
@ -7,8 +7,8 @@ use proxmox::api::section_config::SectionConfigData;
|
|||
use proxmox::api::router::Router;
|
||||
|
||||
use pbs_api_types::{
|
||||
Authid, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA, DATASTORE_SCHEMA, UPID_SCHEMA,
|
||||
PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
|
||||
Authid, DataStoreConfig, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA,
|
||||
DATASTORE_SCHEMA, UPID_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
|
||||
};
|
||||
|
||||
use crate::tools::disks::{
|
||||
|
@ -19,9 +19,6 @@ use crate::tools::systemd::{self, types::*};
|
|||
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::config::datastore::{self, DataStoreConfig};
|
||||
use pbs_config::open_backup_lockfile;
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"filesystem": {
|
||||
|
@ -183,11 +180,11 @@ pub fn create_datastore_disk(
|
|||
pbs_systemd::start_unit(&mount_unit_name)?;
|
||||
|
||||
if add_datastore {
|
||||
let lock = open_backup_lockfile(datastore::DATASTORE_CFG_LOCKFILE, None, true)?;
|
||||
let lock = pbs_config::datastore::lock_config()?;
|
||||
let datastore: DataStoreConfig =
|
||||
serde_json::from_value(json!({ "name": name, "path": mount_point }))?;
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
let (config, _digest) = pbs_config::datastore::config()?;
|
||||
|
||||
if config.sections.get(&datastore.name).is_some() {
|
||||
bail!("datastore '{}' already exists.", datastore.name);
|
||||
|
@ -223,7 +220,7 @@ pub fn delete_datastore_disk(name: String) -> Result<(), Error> {
|
|||
|
||||
let path = format!("/mnt/datastore/{}", name);
|
||||
// path of datastore cannot be changed
|
||||
let (config, _) = crate::config::datastore::config()?;
|
||||
let (config, _) = pbs_config::datastore::config()?;
|
||||
let datastores: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
|
||||
let conflicting_datastore: Option<DataStoreConfig> = datastores.into_iter()
|
||||
.find(|ds| ds.path == path);
|
||||
|
|
|
@ -8,7 +8,7 @@ use proxmox::api::{
|
|||
use proxmox::api::router::Router;
|
||||
|
||||
use pbs_api_types::{
|
||||
Authid, ZpoolListItem, ZfsRaidLevel, ZfsCompressionType,
|
||||
Authid, ZpoolListItem, ZfsRaidLevel, ZfsCompressionType, DataStoreConfig,
|
||||
NODE_SCHEMA, ZPOOL_NAME_SCHEMA, DATASTORE_SCHEMA, DISK_ARRAY_SCHEMA,
|
||||
DISK_LIST_SCHEMA, ZFS_ASHIFT_SCHEMA, UPID_SCHEMA,
|
||||
PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
|
||||
|
@ -18,7 +18,6 @@ use crate::tools::disks::{
|
|||
zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree,
|
||||
DiskUsageType,
|
||||
};
|
||||
use crate::config::datastore::{self, DataStoreConfig};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
|
@ -285,11 +284,11 @@ pub fn create_zpool(
|
|||
}
|
||||
|
||||
if add_datastore {
|
||||
let lock = datastore::lock_config()?;
|
||||
let lock = pbs_config::datastore::lock_config()?;
|
||||
let datastore: DataStoreConfig =
|
||||
serde_json::from_value(json!({ "name": name, "path": mount_point }))?;
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
let (config, _digest) = pbs_config::datastore::config()?;
|
||||
|
||||
if config.sections.get(&datastore.name).is_some() {
|
||||
bail!("datastore '{}' already exists.", datastore.name);
|
||||
|
|
|
@ -20,7 +20,6 @@ use pbs_api_types::{
|
|||
};
|
||||
|
||||
use crate::backup::DataStore;
|
||||
use crate::config::datastore;
|
||||
use crate::tools::statistics::{linear_regression};
|
||||
use pbs_config::CachedUserInfo;
|
||||
|
||||
|
@ -83,7 +82,7 @@ pub fn datastore_status(
|
|||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
let (config, _digest) = pbs_config::datastore::config()?;
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
|
|
@ -11,8 +11,7 @@ use lazy_static::lazy_static;
|
|||
|
||||
use proxmox::tools::fs::{replace_file, file_read_optional_string, CreateOptions};
|
||||
|
||||
use pbs_api_types::upid::UPID;
|
||||
use pbs_api_types::{Authid, GarbageCollectionStatus};
|
||||
use pbs_api_types::{UPID, DataStoreConfig, Authid, GarbageCollectionStatus};
|
||||
use pbs_datastore::{task_log, task_warn};
|
||||
use pbs_datastore::DataBlob;
|
||||
use pbs_datastore::backup_info::{BackupGroup, BackupDir};
|
||||
|
@ -29,7 +28,6 @@ use pbs_datastore::task::TaskState;
|
|||
use pbs_tools::format::HumanByte;
|
||||
use pbs_tools::fs::{lock_dir_noblock, DirLockGuard};
|
||||
|
||||
use crate::config::datastore::{self, DataStoreConfig};
|
||||
use crate::tools;
|
||||
use pbs_config::{open_backup_lockfile, BackupLockGuard};
|
||||
|
||||
|
@ -67,8 +65,8 @@ impl DataStore {
|
|||
|
||||
pub fn lookup_datastore(name: &str) -> Result<Arc<DataStore>, Error> {
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
let config: datastore::DataStoreConfig = config.lookup("datastore", name)?;
|
||||
let (config, _digest) = pbs_config::datastore::config()?;
|
||||
let config: DataStoreConfig = config.lookup("datastore", name)?;
|
||||
let path = PathBuf::from(&config.path);
|
||||
|
||||
let mut map = DATASTORE_MAP.lock().unwrap();
|
||||
|
@ -92,7 +90,7 @@ impl DataStore {
|
|||
|
||||
/// removes all datastores that are not configured anymore
|
||||
pub fn remove_unused_datastores() -> Result<(), Error>{
|
||||
let (config, _digest) = datastore::config()?;
|
||||
let (config, _digest) = pbs_config::datastore::config()?;
|
||||
|
||||
let mut map = DATASTORE_MAP.lock().unwrap();
|
||||
// removes all elements that are not in the config
|
||||
|
|
|
@ -27,7 +27,7 @@ use proxmox::{
|
|||
|
||||
use pbs_api_types::PRIVILEGES;
|
||||
|
||||
use proxmox_backup::{api2, config};
|
||||
use proxmox_backup::api2;
|
||||
|
||||
fn get_args() -> (String, Vec<String>) {
|
||||
|
||||
|
@ -50,7 +50,7 @@ fn main() -> Result<(), Error> {
|
|||
for arg in args.iter() {
|
||||
let text = match arg.as_ref() {
|
||||
"apidata.js" => generate_api_tree(),
|
||||
"datastore.cfg" => dump_section_config(&config::datastore::CONFIG),
|
||||
"datastore.cfg" => dump_section_config(&pbs_config::datastore::CONFIG),
|
||||
"tape.cfg" => dump_section_config(&pbs_config::drive::CONFIG),
|
||||
"tape-job.cfg" => dump_section_config(&pbs_config::tape_job::CONFIG),
|
||||
"user.cfg" => dump_section_config(&pbs_config::user::CONFIG),
|
||||
|
|
|
@ -89,12 +89,12 @@ fn garbage_collection_commands() -> CommandLineInterface {
|
|||
.insert("status",
|
||||
CliCommand::new(&API_METHOD_GARBAGE_COLLECTION_STATUS)
|
||||
.arg_param(&["store"])
|
||||
.completion_cb("store", config::datastore::complete_datastore_name)
|
||||
.completion_cb("store", pbs_config::datastore::complete_datastore_name)
|
||||
)
|
||||
.insert("start",
|
||||
CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION)
|
||||
.arg_param(&["store"])
|
||||
.completion_cb("store", config::datastore::complete_datastore_name)
|
||||
.completion_cb("store", pbs_config::datastore::complete_datastore_name)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
|
@ -379,7 +379,7 @@ fn main() {
|
|||
"pull",
|
||||
CliCommand::new(&API_METHOD_PULL_DATASTORE)
|
||||
.arg_param(&["remote", "remote-store", "local-store"])
|
||||
.completion_cb("local-store", config::datastore::complete_datastore_name)
|
||||
.completion_cb("local-store", pbs_config::datastore::complete_datastore_name)
|
||||
.completion_cb("remote", pbs_config::remote::complete_remote_name)
|
||||
.completion_cb("remote-store", complete_remote_datastore_name)
|
||||
)
|
||||
|
@ -387,7 +387,7 @@ fn main() {
|
|||
"verify",
|
||||
CliCommand::new(&API_METHOD_VERIFY)
|
||||
.arg_param(&["store"])
|
||||
.completion_cb("store", config::datastore::complete_datastore_name)
|
||||
.completion_cb("store", pbs_config::datastore::complete_datastore_name)
|
||||
)
|
||||
.insert("report",
|
||||
CliCommand::new(&API_METHOD_REPORT)
|
||||
|
|
|
@ -32,7 +32,9 @@ use pbs_buildcfg::configdir;
|
|||
use pbs_systemd::time::{compute_next_event, parse_calendar_event};
|
||||
use pbs_tools::logrotate::LogRotate;
|
||||
|
||||
use pbs_api_types::{Authid, TapeBackupJobConfig, VerificationJobConfig, SyncJobConfig};
|
||||
use pbs_api_types::{Authid, TapeBackupJobConfig, VerificationJobConfig, SyncJobConfig, DataStoreConfig};
|
||||
use pbs_datastore::prune::PruneOptions;
|
||||
|
||||
use proxmox_backup::server;
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
use proxmox_backup::tools::{
|
||||
|
@ -45,6 +47,7 @@ use proxmox_backup::tools::{
|
|||
},
|
||||
};
|
||||
|
||||
|
||||
use proxmox_backup::api2::pull::do_sync_job;
|
||||
use proxmox_backup::api2::tape::backup::do_tape_backup_job;
|
||||
use proxmox_backup::server::do_verification_job;
|
||||
|
@ -374,14 +377,7 @@ async fn schedule_tasks() -> Result<(), Error> {
|
|||
|
||||
async fn schedule_datastore_garbage_collection() {
|
||||
|
||||
use proxmox_backup::config::{
|
||||
datastore::{
|
||||
self,
|
||||
DataStoreConfig,
|
||||
},
|
||||
};
|
||||
|
||||
let config = match datastore::config() {
|
||||
let config = match pbs_config::datastore::config() {
|
||||
Err(err) => {
|
||||
eprintln!("unable to read datastore config - {}", err);
|
||||
return;
|
||||
|
@ -459,15 +455,7 @@ async fn schedule_datastore_garbage_collection() {
|
|||
|
||||
async fn schedule_datastore_prune() {
|
||||
|
||||
use pbs_datastore::prune::PruneOptions;
|
||||
use proxmox_backup::{
|
||||
config::datastore::{
|
||||
self,
|
||||
DataStoreConfig,
|
||||
},
|
||||
};
|
||||
|
||||
let config = match datastore::config() {
|
||||
let config = match pbs_config::datastore::config() {
|
||||
Err(err) => {
|
||||
eprintln!("unable to read datastore config - {}", err);
|
||||
return;
|
||||
|
@ -765,8 +753,6 @@ fn rrd_update_derive(name: &str, value: f64, save: bool) {
|
|||
async fn generate_host_stats(save: bool) {
|
||||
use proxmox::sys::linux::procfs::{
|
||||
read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg};
|
||||
use proxmox_backup::config::datastore;
|
||||
|
||||
|
||||
pbs_runtime::block_in_place(move || {
|
||||
|
||||
|
@ -823,9 +809,9 @@ async fn generate_host_stats(save: bool) {
|
|||
|
||||
gather_disk_stats(disk_manager.clone(), Path::new("/"), "host", save);
|
||||
|
||||
match datastore::config() {
|
||||
match pbs_config::datastore::config() {
|
||||
Ok((config, _)) => {
|
||||
let datastore_list: Vec<datastore::DataStoreConfig> =
|
||||
let datastore_list: Vec<DataStoreConfig> =
|
||||
config.convert_to_typed_array("datastore").unwrap_or_default();
|
||||
|
||||
for config in datastore_list {
|
||||
|
|
|
@ -23,6 +23,7 @@ use pbs_tools::format::{
|
|||
|
||||
use pbs_config::drive::complete_drive_name;
|
||||
use pbs_config::media_pool::complete_pool_name;
|
||||
use pbs_config::datastore::complete_datastore_name;
|
||||
|
||||
use proxmox_backup::{
|
||||
api2::{
|
||||
|
@ -38,9 +39,6 @@ use proxmox_backup::{
|
|||
TAPE_RESTORE_SNAPSHOT_SCHEMA,
|
||||
},
|
||||
},
|
||||
config::{
|
||||
datastore::complete_datastore_name,
|
||||
},
|
||||
tape::{
|
||||
BlockReadError,
|
||||
drive::{
|
||||
|
|
|
@ -3,7 +3,6 @@ use serde_json::Value;
|
|||
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::api2;
|
||||
|
||||
#[api(
|
||||
|
@ -61,7 +60,7 @@ pub fn acl_commands() -> CommandLineInterface {
|
|||
CliCommand::new(&api2::access::acl::API_METHOD_UPDATE_ACL)
|
||||
.arg_param(&["path", "role"])
|
||||
.completion_cb("auth-id", pbs_config::user::complete_authid)
|
||||
.completion_cb("path", config::datastore::complete_acl_path)
|
||||
.completion_cb("path", pbs_config::datastore::complete_acl_path)
|
||||
|
||||
);
|
||||
|
||||
|
|
|
@ -4,10 +4,9 @@ use serde_json::Value;
|
|||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
|
||||
use pbs_client::{connect_to_localhost, view_task_result};
|
||||
use pbs_api_types::{DataStoreConfig, DATASTORE_SCHEMA};
|
||||
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::api2::{self, types::* };
|
||||
use proxmox_backup::config::datastore::DataStoreConfig;
|
||||
use proxmox_backup::api2;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
|
@ -106,7 +105,7 @@ pub fn datastore_commands() -> CommandLineInterface {
|
|||
.insert("show",
|
||||
CliCommand::new(&API_METHOD_SHOW_DATASTORE)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", config::datastore::complete_datastore_name)
|
||||
.completion_cb("name", pbs_config::datastore::complete_datastore_name)
|
||||
)
|
||||
.insert("create",
|
||||
CliCommand::new(&API_METHOD_CREATE_DATASTORE)
|
||||
|
@ -115,14 +114,14 @@ pub fn datastore_commands() -> CommandLineInterface {
|
|||
.insert("update",
|
||||
CliCommand::new(&api2::config::datastore::API_METHOD_UPDATE_DATASTORE)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", config::datastore::complete_datastore_name)
|
||||
.completion_cb("gc-schedule", config::datastore::complete_calendar_event)
|
||||
.completion_cb("prune-schedule", config::datastore::complete_calendar_event)
|
||||
.completion_cb("name", pbs_config::datastore::complete_datastore_name)
|
||||
.completion_cb("gc-schedule", pbs_config::datastore::complete_calendar_event)
|
||||
.completion_cb("prune-schedule", pbs_config::datastore::complete_calendar_event)
|
||||
)
|
||||
.insert("remove",
|
||||
CliCommand::new(&api2::config::datastore::API_METHOD_DELETE_DATASTORE)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("name", config::datastore::complete_datastore_name)
|
||||
.completion_cb("name", pbs_config::datastore::complete_datastore_name)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
|
|
|
@ -3,8 +3,9 @@ use serde_json::Value;
|
|||
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::api2::{self, types::* };
|
||||
use pbs_api_types::JOB_ID_SCHEMA;
|
||||
|
||||
use proxmox_backup::api2;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
|
@ -83,8 +84,8 @@ pub fn sync_job_commands() -> CommandLineInterface {
|
|||
CliCommand::new(&api2::config::sync::API_METHOD_CREATE_SYNC_JOB)
|
||||
.arg_param(&["id"])
|
||||
.completion_cb("id", pbs_config::sync::complete_sync_job_id)
|
||||
.completion_cb("schedule", config::datastore::complete_calendar_event)
|
||||
.completion_cb("store", config::datastore::complete_datastore_name)
|
||||
.completion_cb("schedule", pbs_config::datastore::complete_calendar_event)
|
||||
.completion_cb("store", pbs_config::datastore::complete_datastore_name)
|
||||
.completion_cb("remote", pbs_config::remote::complete_remote_name)
|
||||
.completion_cb("remote-store", crate::complete_remote_datastore_name)
|
||||
)
|
||||
|
@ -92,8 +93,8 @@ pub fn sync_job_commands() -> CommandLineInterface {
|
|||
CliCommand::new(&api2::config::sync::API_METHOD_UPDATE_SYNC_JOB)
|
||||
.arg_param(&["id"])
|
||||
.completion_cb("id", pbs_config::sync::complete_sync_job_id)
|
||||
.completion_cb("schedule", config::datastore::complete_calendar_event)
|
||||
.completion_cb("store", config::datastore::complete_datastore_name)
|
||||
.completion_cb("schedule", pbs_config::datastore::complete_calendar_event)
|
||||
.completion_cb("store", pbs_config::datastore::complete_datastore_name)
|
||||
.completion_cb("remote-store", crate::complete_remote_datastore_name)
|
||||
)
|
||||
.insert("remove",
|
||||
|
|
|
@ -5,9 +5,9 @@ use std::collections::HashMap;
|
|||
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
|
||||
use proxmox_backup::config;
|
||||
use pbs_api_types::{ACL_PATH_SCHEMA, Authid, Userid};
|
||||
|
||||
use proxmox_backup::api2;
|
||||
use proxmox_backup::api2::types::{ACL_PATH_SCHEMA, Authid, Userid};
|
||||
|
||||
fn render_expire(value: &Value, _record: &Value) -> Result<String, Error> {
|
||||
let never = String::from("never");
|
||||
|
@ -213,7 +213,7 @@ pub fn user_commands() -> CommandLineInterface {
|
|||
CliCommand::new(&&API_METHOD_LIST_PERMISSIONS)
|
||||
.arg_param(&["auth-id"])
|
||||
.completion_cb("auth-id", pbs_config::user::complete_authid)
|
||||
.completion_cb("path", config::datastore::complete_acl_path)
|
||||
.completion_cb("path", pbs_config::datastore::complete_acl_path)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
|
|
|
@ -3,8 +3,9 @@ use serde_json::Value;
|
|||
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::api2::{self, types::* };
|
||||
use pbs_api_types::JOB_ID_SCHEMA;
|
||||
|
||||
use proxmox_backup::api2;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
|
@ -83,15 +84,15 @@ pub fn verify_job_commands() -> CommandLineInterface {
|
|||
CliCommand::new(&api2::config::verify::API_METHOD_CREATE_VERIFICATION_JOB)
|
||||
.arg_param(&["id"])
|
||||
.completion_cb("id", pbs_config::verify::complete_verification_job_id)
|
||||
.completion_cb("schedule", config::datastore::complete_calendar_event)
|
||||
.completion_cb("store", config::datastore::complete_datastore_name)
|
||||
.completion_cb("schedule", pbs_config::datastore::complete_calendar_event)
|
||||
.completion_cb("store", pbs_config::datastore::complete_datastore_name)
|
||||
)
|
||||
.insert("update",
|
||||
CliCommand::new(&api2::config::verify::API_METHOD_UPDATE_VERIFICATION_JOB)
|
||||
.arg_param(&["id"])
|
||||
.completion_cb("id", pbs_config::verify::complete_verification_job_id)
|
||||
.completion_cb("schedule", config::datastore::complete_calendar_event)
|
||||
.completion_cb("store", config::datastore::complete_datastore_name)
|
||||
.completion_cb("schedule", pbs_config::datastore::complete_calendar_event)
|
||||
.completion_cb("store", pbs_config::datastore::complete_datastore_name)
|
||||
.completion_cb("remote-store", crate::complete_remote_datastore_name)
|
||||
)
|
||||
.insert("remove",
|
||||
|
|
|
@ -6,10 +6,7 @@ use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
|||
use pbs_api_types::JOB_ID_SCHEMA;
|
||||
use pbs_client::{connect_to_localhost, view_task_result};
|
||||
|
||||
use proxmox_backup::{
|
||||
config,
|
||||
api2,
|
||||
};
|
||||
use proxmox_backup::api2;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
|
@ -121,8 +118,8 @@ pub fn backup_job_commands() -> CommandLineInterface {
|
|||
CliCommand::new(&api2::config::tape_backup_job::API_METHOD_CREATE_TAPE_BACKUP_JOB)
|
||||
.arg_param(&["id"])
|
||||
.completion_cb("id", pbs_config::tape_job::complete_tape_job_id)
|
||||
.completion_cb("schedule", config::datastore::complete_calendar_event)
|
||||
.completion_cb("store", config::datastore::complete_datastore_name)
|
||||
.completion_cb("schedule", pbs_config::datastore::complete_calendar_event)
|
||||
.completion_cb("store", pbs_config::datastore::complete_datastore_name)
|
||||
.completion_cb("pool", pbs_config::media_pool::complete_pool_name)
|
||||
.completion_cb("drive", crate::complete_drive_name)
|
||||
)
|
||||
|
@ -130,8 +127,8 @@ pub fn backup_job_commands() -> CommandLineInterface {
|
|||
CliCommand::new(&api2::config::tape_backup_job::API_METHOD_UPDATE_TAPE_BACKUP_JOB)
|
||||
.arg_param(&["id"])
|
||||
.completion_cb("id", pbs_config::tape_job::complete_tape_job_id)
|
||||
.completion_cb("schedule", config::datastore::complete_calendar_event)
|
||||
.completion_cb("store", config::datastore::complete_datastore_name)
|
||||
.completion_cb("schedule", pbs_config::datastore::complete_calendar_event)
|
||||
.completion_cb("store", pbs_config::datastore::complete_datastore_name)
|
||||
.completion_cb("pool", pbs_config::media_pool::complete_pool_name)
|
||||
.completion_cb("drive", crate::complete_drive_name)
|
||||
)
|
||||
|
|
|
@ -1,201 +0,0 @@
|
|||
use anyhow::{Error};
|
||||
use lazy_static::lazy_static;
|
||||
use std::collections::HashMap;
|
||||
use serde::{Serialize, Deserialize};
|
||||
|
||||
use proxmox::api::{
|
||||
api,
|
||||
schema::{ApiType, Schema, StringSchema, Updater},
|
||||
section_config::{
|
||||
SectionConfig,
|
||||
SectionConfigData,
|
||||
SectionConfigPlugin,
|
||||
}
|
||||
};
|
||||
|
||||
use pbs_config::{open_backup_lockfile, BackupLockGuard};
|
||||
|
||||
use crate::api2::types::*;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG: SectionConfig = init();
|
||||
}
|
||||
|
||||
// fixme: define better schemas
|
||||
pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name").schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: DIR_NAME_SCHEMA,
|
||||
},
|
||||
"notify-user": {
|
||||
optional: true,
|
||||
type: Userid,
|
||||
},
|
||||
"notify": {
|
||||
optional: true,
|
||||
schema: DATASTORE_NOTIFY_STRING_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
"gc-schedule": {
|
||||
optional: true,
|
||||
schema: GC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"prune-schedule": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"keep-last": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||
},
|
||||
"keep-hourly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_HOURLY,
|
||||
},
|
||||
"keep-daily": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_DAILY,
|
||||
},
|
||||
"keep-weekly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_WEEKLY,
|
||||
},
|
||||
"keep-monthly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_MONTHLY,
|
||||
},
|
||||
"keep-yearly": {
|
||||
optional: true,
|
||||
schema: PRUNE_SCHEMA_KEEP_YEARLY,
|
||||
},
|
||||
"verify-new": {
|
||||
description: "If enabled, all new backups will be verified right after completion.",
|
||||
optional: true,
|
||||
type: bool,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize,Deserialize,Updater)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Datastore configuration properties.
|
||||
pub struct DataStoreConfig {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
#[updater(skip)]
|
||||
pub path: String,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub gc_schedule: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub prune_schedule: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_last: Option<u64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_hourly: Option<u64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_daily: Option<u64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_weekly: Option<u64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_monthly: Option<u64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub keep_yearly: Option<u64>,
|
||||
/// If enabled, all backups will be verified right after completion.
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub verify_new: Option<bool>,
|
||||
/// Send job email notification to this user
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub notify_user: Option<Userid>,
|
||||
/// Send notification only for job errors
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub notify: Option<String>,
|
||||
}
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let obj_schema = match DataStoreConfig::API_SCHEMA {
|
||||
Schema::Object(ref obj_schema) => obj_schema,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let plugin = SectionConfigPlugin::new("datastore".to_string(), Some(String::from("name")), obj_schema);
|
||||
let mut config = SectionConfig::new(&DATASTORE_SCHEMA);
|
||||
config.register_plugin(plugin);
|
||||
|
||||
config
|
||||
}
|
||||
|
||||
pub const DATASTORE_CFG_FILENAME: &str = "/etc/proxmox-backup/datastore.cfg";
|
||||
pub const DATASTORE_CFG_LOCKFILE: &str = "/etc/proxmox-backup/.datastore.lck";
|
||||
|
||||
/// Get exclusive lock
|
||||
pub fn lock_config() -> Result<BackupLockGuard, Error> {
|
||||
open_backup_lockfile(DATASTORE_CFG_LOCKFILE, None, true)
|
||||
}
|
||||
|
||||
pub fn config() -> Result<(SectionConfigData, [u8;32]), Error> {
|
||||
|
||||
let content = proxmox::tools::fs::file_read_optional_string(DATASTORE_CFG_FILENAME)?
|
||||
.unwrap_or_else(|| "".to_string());
|
||||
|
||||
let digest = openssl::sha::sha256(content.as_bytes());
|
||||
let data = CONFIG.parse(DATASTORE_CFG_FILENAME, &content)?;
|
||||
Ok((data, digest))
|
||||
}
|
||||
|
||||
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||
let raw = CONFIG.write(DATASTORE_CFG_FILENAME, &config)?;
|
||||
pbs_config::replace_backup_config(DATASTORE_CFG_FILENAME, raw.as_bytes())
|
||||
}
|
||||
|
||||
// shell completion helper
|
||||
pub fn complete_datastore_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
match config() {
|
||||
Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
|
||||
Err(_) => return vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn complete_acl_path(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
let mut list = Vec::new();
|
||||
|
||||
list.push(String::from("/"));
|
||||
list.push(String::from("/datastore"));
|
||||
list.push(String::from("/datastore/"));
|
||||
|
||||
if let Ok((data, _digest)) = config() {
|
||||
for id in data.sections.keys() {
|
||||
list.push(format!("/datastore/{}", id));
|
||||
}
|
||||
}
|
||||
|
||||
list.push(String::from("/remote"));
|
||||
list.push(String::from("/remote/"));
|
||||
|
||||
list.push(String::from("/tape"));
|
||||
list.push(String::from("/tape/"));
|
||||
list.push(String::from("/tape/drive"));
|
||||
list.push(String::from("/tape/drive/"));
|
||||
list.push(String::from("/tape/changer"));
|
||||
list.push(String::from("/tape/changer/"));
|
||||
list.push(String::from("/tape/pool"));
|
||||
list.push(String::from("/tape/pool/"));
|
||||
list.push(String::from("/tape/job"));
|
||||
list.push(String::from("/tape/job/"));
|
||||
|
||||
list
|
||||
}
|
||||
|
||||
pub fn complete_calendar_event(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
// just give some hints about possible values
|
||||
["minutely", "hourly", "daily", "mon..fri", "0:0"]
|
||||
.iter().map(|s| String::from(*s)).collect()
|
||||
}
|
|
@ -15,7 +15,6 @@ use proxmox::try_block;
|
|||
use pbs_buildcfg::{self, configdir};
|
||||
|
||||
pub mod acme;
|
||||
pub mod datastore;
|
||||
pub mod node;
|
||||
pub mod tfa;
|
||||
|
||||
|
|
|
@ -11,11 +11,7 @@ use pbs_tools::format::HumanByte;
|
|||
use pbs_api_types::{
|
||||
User, TapeBackupJobSetup, SyncJobConfig, VerificationJobConfig,
|
||||
APTUpdateInfo, GarbageCollectionStatus,
|
||||
Userid, Notify, DatastoreNotify,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
config::datastore::DataStoreConfig,
|
||||
Userid, Notify, DatastoreNotify, DataStoreConfig,
|
||||
};
|
||||
|
||||
const GC_OK_TEMPLATE: &str = r###"
|
||||
|
@ -566,7 +562,7 @@ pub fn lookup_datastore_notify_settings(
|
|||
|
||||
let notify = DatastoreNotify { gc: None, verify: None, sync: None };
|
||||
|
||||
let (config, _digest) = match crate::config::datastore::config() {
|
||||
let (config, _digest) = match pbs_config::datastore::config() {
|
||||
Ok(result) => result,
|
||||
Err(_) => return (email, notify),
|
||||
};
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
|
||||
use crate::config::datastore;
|
||||
|
||||
fn files() -> Vec<&'static str> {
|
||||
vec![
|
||||
"/etc/hostname",
|
||||
|
@ -35,7 +33,7 @@ type FunctionMapping = (&'static str, fn() -> String);
|
|||
fn function_calls() -> Vec<FunctionMapping> {
|
||||
vec![
|
||||
("Datastores", || {
|
||||
let config = match datastore::config() {
|
||||
let config = match pbs_config::datastore::config() {
|
||||
Ok((config, _digest)) => config,
|
||||
_ => return String::from("could not read datastore config"),
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue