api: rustfmt

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Thomas Lamprecht
2022-04-14 13:33:01 +02:00
parent 35f151e010
commit dc7a5b3491
53 changed files with 2703 additions and 1864 deletions

View File

@ -1,15 +1,12 @@
use proxmox_router::{Router, SubdirMap};
use proxmox_router::list_subdirs_api_method;
use proxmox_router::{Router, SubdirMap};
use proxmox_sys::sortable;
pub mod tfa;
pub mod openid;
pub mod tfa;
#[sortable]
const SUBDIRS: SubdirMap = &sorted!([
("openid", &openid::ROUTER),
("tfa", &tfa::ROUTER),
]);
const SUBDIRS: SubdirMap = &sorted!([("openid", &openid::ROUTER), ("tfa", &tfa::ROUTER),]);
pub const ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SUBDIRS))

View File

@ -1,16 +1,15 @@
/// Configure OpenId realms
use anyhow::Error;
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
/// Configure OpenId realms
use anyhow::Error;
use hex::FromHex;
use serde_json::Value;
use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
OpenIdRealmConfig, OpenIdRealmConfigUpdater,
PROXMOX_CONFIG_DIGEST_SCHEMA, REALM_ID_SCHEMA, PRIV_SYS_AUDIT, PRIV_REALM_ALLOCATE,
OpenIdRealmConfig, OpenIdRealmConfigUpdater, PRIV_REALM_ALLOCATE, PRIV_SYS_AUDIT,
PROXMOX_CONFIG_DIGEST_SCHEMA, REALM_ID_SCHEMA,
};
use pbs_config::domains;
@ -33,7 +32,6 @@ pub fn list_openid_realms(
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<OpenIdRealmConfig>, Error> {
let (config, digest) = domains::config()?;
let list = config.convert_to_typed_array("openid")?;
@ -59,14 +57,13 @@ pub fn list_openid_realms(
)]
/// Create a new OpenId realm
pub fn create_openid_realm(config: OpenIdRealmConfig) -> Result<(), Error> {
let _lock = domains::lock_config()?;
let (mut domains, _digest) = domains::config()?;
if config.realm == "pbs" ||
config.realm == "pam" ||
domains.sections.get(&config.realm).is_some()
if config.realm == "pbs"
|| config.realm == "pam"
|| domains.sections.get(&config.realm).is_some()
{
param_bail!("realm", "realm '{}' already exists.", config.realm);
}
@ -101,7 +98,6 @@ pub fn delete_openid_realm(
digest: Option<String>,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let _lock = domains::lock_config()?;
let (mut domains, expected_digest) = domains::config()?;
@ -111,7 +107,7 @@ pub fn delete_openid_realm(
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
if domains.sections.remove(&realm).is_none() {
if domains.sections.remove(&realm).is_none() {
http_bail!(NOT_FOUND, "realm '{}' does not exist.", realm);
}
@ -138,7 +134,6 @@ pub fn read_openid_realm(
realm: String,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<OpenIdRealmConfig, Error> {
let (domains, digest) = domains::config()?;
let config = domains.lookup("openid", &realm)?;
@ -150,7 +145,7 @@ pub fn read_openid_realm(
#[api()]
#[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")]
#[serde(rename_all = "kebab-case")]
#[allow(non_camel_case_types)]
/// Deletable property name
pub enum DeletableProperty {
@ -206,7 +201,6 @@ pub fn update_openid_realm(
digest: Option<String>,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let _lock = domains::lock_config()?;
let (mut domains, expected_digest) = domains::config()?;
@ -221,12 +215,24 @@ pub fn update_openid_realm(
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::client_key => { config.client_key = None; },
DeletableProperty::comment => { config.comment = None; },
DeletableProperty::autocreate => { config.autocreate = None; },
DeletableProperty::scopes => { config.scopes = None; },
DeletableProperty::prompt => { config.prompt = None; },
DeletableProperty::acr_values => { config.acr_values = None; },
DeletableProperty::client_key => {
config.client_key = None;
}
DeletableProperty::comment => {
config.comment = None;
}
DeletableProperty::autocreate => {
config.autocreate = None;
}
DeletableProperty::scopes => {
config.scopes = None;
}
DeletableProperty::prompt => {
config.prompt = None;
}
DeletableProperty::acr_values => {
config.acr_values = None;
}
}
}
}
@ -240,14 +246,28 @@ pub fn update_openid_realm(
}
}
if let Some(issuer_url) = update.issuer_url { config.issuer_url = issuer_url; }
if let Some(client_id) = update.client_id { config.client_id = client_id; }
if let Some(issuer_url) = update.issuer_url {
config.issuer_url = issuer_url;
}
if let Some(client_id) = update.client_id {
config.client_id = client_id;
}
if update.client_key.is_some() { config.client_key = update.client_key; }
if update.autocreate.is_some() { config.autocreate = update.autocreate; }
if update.scopes.is_some() { config.scopes = update.scopes; }
if update.prompt.is_some() { config.prompt = update.prompt; }
if update.acr_values.is_some() { config.acr_values = update.acr_values; }
if update.client_key.is_some() {
config.client_key = update.client_key;
}
if update.autocreate.is_some() {
config.autocreate = update.autocreate;
}
if update.scopes.is_some() {
config.scopes = update.scopes;
}
if update.prompt.is_some() {
config.prompt = update.prompt;
}
if update.acr_values.is_some() {
config.acr_values = update.acr_values;
}
domains.set_data(&realm, "openid", &config)?;

View File

@ -5,10 +5,10 @@ use std::sync::{Arc, Mutex};
use std::time::SystemTime;
use anyhow::{bail, format_err, Error};
use hex::FromHex;
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use hex::FromHex;
use proxmox_router::{
http_bail, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap,

View File

@ -1,18 +1,17 @@
use anyhow::Error;
use ::serde::{Deserialize, Serialize};
use serde_json::Value;
use anyhow::Error;
use hex::FromHex;
use serde_json::Value;
use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
Authid, ScsiTapeChanger, ScsiTapeChangerUpdater, LtoTapeDrive,
PROXMOX_CONFIG_DIGEST_SCHEMA, CHANGER_NAME_SCHEMA, SLOT_ARRAY_SCHEMA,
PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY,
Authid, LtoTapeDrive, ScsiTapeChanger, ScsiTapeChangerUpdater, CHANGER_NAME_SCHEMA,
PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, SLOT_ARRAY_SCHEMA,
};
use pbs_config::CachedUserInfo;
use pbs_tape::linux_list_drives::{linux_tape_changer_list, check_drive_path};
use pbs_tape::linux_list_drives::{check_drive_path, linux_tape_changer_list};
#[api(
protected: true,
@ -30,7 +29,6 @@ use pbs_tape::linux_list_drives::{linux_tape_changer_list, check_drive_path};
)]
/// Create a new changer device
pub fn create_changer(config: ScsiTapeChanger) -> Result<(), Error> {
let _lock = pbs_config::drive::lock()?;
let (mut section_config, _digest) = pbs_config::drive::config()?;
@ -47,7 +45,12 @@ pub fn create_changer(config: ScsiTapeChanger) -> Result<(), Error> {
}
if changer.path == config.path {
param_bail!("path", "Path '{}' already in use by '{}'", config.path, changer.name);
param_bail!(
"path",
"Path '{}' already in use by '{}'",
config.path,
changer.name
);
}
}
@ -79,7 +82,6 @@ pub fn get_config(
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<ScsiTapeChanger, Error> {
let (config, digest) = pbs_config::drive::config()?;
let data: ScsiTapeChanger = config.lookup("changer", &name)?;
@ -176,7 +178,6 @@ pub fn update_changer(
digest: Option<String>,
_param: Value,
) -> Result<(), Error> {
let _lock = pbs_config::drive::lock()?;
let (mut config, expected_digest) = pbs_config::drive::config()?;
@ -244,7 +245,6 @@ pub fn update_changer(
)]
/// Delete a tape changer configuration
pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> {
let _lock = pbs_config::drive::lock()?;
let (mut config, _digest) = pbs_config::drive::config()?;
@ -252,18 +252,31 @@ pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> {
match config.sections.get(&name) {
Some((section_type, _)) => {
if section_type != "changer" {
param_bail!("name", "Entry '{}' exists, but is not a changer device", name);
param_bail!(
"name",
"Entry '{}' exists, but is not a changer device",
name
);
}
config.sections.remove(&name);
},
None => http_bail!(NOT_FOUND, "Delete changer '{}' failed - no such entry", name),
}
None => http_bail!(
NOT_FOUND,
"Delete changer '{}' failed - no such entry",
name
),
}
let drive_list: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
for drive in drive_list {
if let Some(changer) = drive.changer {
if changer == name {
param_bail!("name", "Delete changer '{}' failed - used by drive '{}'", name, drive.name);
param_bail!(
"name",
"Delete changer '{}' failed - used by drive '{}'",
name,
drive.name
);
}
}
}
@ -278,7 +291,6 @@ const ITEM_ROUTER: Router = Router::new()
.put(&API_METHOD_UPDATE_CHANGER)
.delete(&API_METHOD_DELETE_CHANGER);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_CHANGERS)
.post(&API_METHOD_CREATE_CHANGER)

View File

@ -1,31 +1,27 @@
use std::path::PathBuf;
use anyhow::Error;
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
use anyhow::Error;
use hex::FromHex;
use serde_json::Value;
use proxmox_router::{http_bail, Router, RpcEnvironment, RpcEnvironmentType, Permission};
use proxmox_router::{http_bail, Permission, Router, RpcEnvironment, RpcEnvironmentType};
use proxmox_schema::{api, param_bail, ApiType};
use proxmox_section_config::SectionConfigData;
use proxmox_sys::WorkerTaskContext;
use pbs_datastore::chunk_store::ChunkStore;
use pbs_config::BackupLockGuard;
use pbs_api_types::{
Authid, DatastoreNotify,
DATASTORE_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA,
Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DATASTORE_SCHEMA,
PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY,
DataStoreConfig, DataStoreConfigUpdater,
PROXMOX_CONFIG_DIGEST_SCHEMA,
};
use pbs_config::BackupLockGuard;
use pbs_datastore::chunk_store::ChunkStore;
use crate::api2::admin::{sync::list_sync_jobs, verify::list_verification_jobs};
use crate::api2::config::sync::delete_sync_job;
use crate::api2::config::tape_backup_job::{delete_tape_backup_job, list_tape_backup_jobs};
use crate::api2::config::verify::delete_verification_job;
use crate::api2::config::tape_backup_job::{list_tape_backup_jobs, delete_tape_backup_job};
use crate::api2::admin::{
sync::list_sync_jobs,
verify::list_verification_jobs,
};
use pbs_config::CachedUserInfo;
use proxmox_rest_server::WorkerTask;
@ -50,7 +46,6 @@ pub fn list_datastores(
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<DataStoreConfig>, Error> {
let (config, digest) = pbs_config::datastore::config()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@ -58,7 +53,7 @@ pub fn list_datastores(
rpcenv["digest"] = hex::encode(&digest).into();
let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
let list: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
let filter_by_privs = |store: &DataStoreConfig| {
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store.name]);
(user_privs & PRIV_DATASTORE_AUDIT) != 0
@ -76,7 +71,13 @@ pub(crate) fn do_create_datastore(
let path: PathBuf = datastore.path.clone().into();
let backup_user = pbs_config::backup_user()?;
let _store = ChunkStore::create(&datastore.name, path, backup_user.uid, backup_user.gid, worker)?;
let _store = ChunkStore::create(
&datastore.name,
path,
backup_user.uid,
backup_user.gid,
worker,
)?;
config.set_data(&datastore.name, "datastore", &datastore)?;
@ -107,7 +108,6 @@ pub fn create_datastore(
config: DataStoreConfig,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let lock = pbs_config::datastore::lock_config()?;
let (section_config, _digest) = pbs_config::datastore::config()?;
@ -124,7 +124,7 @@ pub fn create_datastore(
Some(config.name.to_string()),
auth_id.to_string(),
to_stdout,
move |worker| do_create_datastore(lock, section_config, config, Some(&worker)),
move |worker| do_create_datastore(lock, section_config, config, Some(&worker)),
)
}
@ -156,7 +156,7 @@ pub fn read_datastore(
#[api()]
#[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")]
#[serde(rename_all = "kebab-case")]
#[allow(non_camel_case_types)]
/// Deletable property name
pub enum DeletableProperty {
@ -226,7 +226,6 @@ pub fn update_datastore(
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
) -> Result<(), Error> {
let _lock = pbs_config::datastore::lock_config()?;
// pass/compare digest
@ -239,23 +238,51 @@ pub fn update_datastore(
let mut data: DataStoreConfig = config.lookup("datastore", &name)?;
if let Some(delete) = delete {
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::comment => { data.comment = None; },
DeletableProperty::gc_schedule => { data.gc_schedule = None; },
DeletableProperty::prune_schedule => { data.prune_schedule = None; },
DeletableProperty::keep_last => { data.keep_last = None; },
DeletableProperty::keep_hourly => { data.keep_hourly = None; },
DeletableProperty::keep_daily => { data.keep_daily = None; },
DeletableProperty::keep_weekly => { data.keep_weekly = None; },
DeletableProperty::keep_monthly => { data.keep_monthly = None; },
DeletableProperty::keep_yearly => { data.keep_yearly = None; },
DeletableProperty::verify_new => { data.verify_new = None; },
DeletableProperty::notify => { data.notify = None; },
DeletableProperty::notify_user => { data.notify_user = None; },
DeletableProperty::tuning => { data.tuning = None; },
DeletableProperty::maintenance_mode => { data.maintenance_mode = None; },
DeletableProperty::comment => {
data.comment = None;
}
DeletableProperty::gc_schedule => {
data.gc_schedule = None;
}
DeletableProperty::prune_schedule => {
data.prune_schedule = None;
}
DeletableProperty::keep_last => {
data.keep_last = None;
}
DeletableProperty::keep_hourly => {
data.keep_hourly = None;
}
DeletableProperty::keep_daily => {
data.keep_daily = None;
}
DeletableProperty::keep_weekly => {
data.keep_weekly = None;
}
DeletableProperty::keep_monthly => {
data.keep_monthly = None;
}
DeletableProperty::keep_yearly => {
data.keep_yearly = None;
}
DeletableProperty::verify_new => {
data.verify_new = None;
}
DeletableProperty::notify => {
data.notify = None;
}
DeletableProperty::notify_user => {
data.notify_user = None;
}
DeletableProperty::tuning => {
data.tuning = None;
}
DeletableProperty::maintenance_mode => {
data.maintenance_mode = None;
}
}
}
}
@ -281,29 +308,54 @@ pub fn update_datastore(
data.prune_schedule = update.prune_schedule;
}
if update.keep_last.is_some() { data.keep_last = update.keep_last; }
if update.keep_hourly.is_some() { data.keep_hourly = update.keep_hourly; }
if update.keep_daily.is_some() { data.keep_daily = update.keep_daily; }
if update.keep_weekly.is_some() { data.keep_weekly = update.keep_weekly; }
if update.keep_monthly.is_some() { data.keep_monthly = update.keep_monthly; }
if update.keep_yearly.is_some() { data.keep_yearly = update.keep_yearly; }
if update.keep_last.is_some() {
data.keep_last = update.keep_last;
}
if update.keep_hourly.is_some() {
data.keep_hourly = update.keep_hourly;
}
if update.keep_daily.is_some() {
data.keep_daily = update.keep_daily;
}
if update.keep_weekly.is_some() {
data.keep_weekly = update.keep_weekly;
}
if update.keep_monthly.is_some() {
data.keep_monthly = update.keep_monthly;
}
if update.keep_yearly.is_some() {
data.keep_yearly = update.keep_yearly;
}
if let Some(notify_str) = update.notify {
let value = DatastoreNotify::API_SCHEMA.parse_property_string(&notify_str)?;
let notify: DatastoreNotify = serde_json::from_value(value)?;
if let DatastoreNotify { gc: None, verify: None, sync: None } = notify {
if let DatastoreNotify {
gc: None,
verify: None,
sync: None,
} = notify
{
data.notify = None;
} else {
data.notify = Some(notify_str);
}
}
if update.verify_new.is_some() { data.verify_new = update.verify_new; }
if update.verify_new.is_some() {
data.verify_new = update.verify_new;
}
if update.notify_user.is_some() { data.notify_user = update.notify_user; }
if update.notify_user.is_some() {
data.notify_user = update.notify_user;
}
if update.tuning.is_some() { data.tuning = update.tuning; }
if update.tuning.is_some() {
data.tuning = update.tuning;
}
if update.maintenance_mode.is_some() { data.maintenance_mode = update.maintenance_mode; }
if update.maintenance_mode.is_some() {
data.maintenance_mode = update.maintenance_mode;
}
config.set_data(&name, "datastore", &data)?;
@ -352,7 +404,6 @@ pub async fn delete_datastore(
digest: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let _lock = pbs_config::datastore::lock_config()?;
let (mut config, expected_digest) = pbs_config::datastore::config()?;
@ -363,7 +414,9 @@ pub async fn delete_datastore(
}
match config.sections.get(&name) {
Some(_) => { config.sections.remove(&name); },
Some(_) => {
config.sections.remove(&name);
}
None => http_bail!(NOT_FOUND, "datastore '{}' does not exist.", name),
}
@ -376,7 +429,10 @@ pub async fn delete_datastore(
}
let tape_jobs = list_tape_backup_jobs(Value::Null, rpcenv)?;
for job_config in tape_jobs.into_iter().filter(|config| config.setup.store == name) {
for job_config in tape_jobs
.into_iter()
.filter(|config| config.setup.store == name)
{
delete_tape_backup_job(job_config.id, None, rpcenv)?;
}
}

View File

@ -1,18 +1,18 @@
use anyhow::{format_err, Error};
use ::serde::{Deserialize, Serialize};
use serde_json::Value;
use anyhow::{format_err, Error};
use hex::FromHex;
use serde_json::Value;
use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
Authid, LtoTapeDrive, LtoTapeDriveUpdater, ScsiTapeChanger,
PROXMOX_CONFIG_DIGEST_SCHEMA, DRIVE_NAME_SCHEMA, PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY,
Authid, LtoTapeDrive, LtoTapeDriveUpdater, ScsiTapeChanger, DRIVE_NAME_SCHEMA, PRIV_TAPE_AUDIT,
PRIV_TAPE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
};
use pbs_config::CachedUserInfo;
use pbs_tape::linux_list_drives::{lto_tape_device_list, check_drive_path};
use pbs_tape::linux_list_drives::{check_drive_path, lto_tape_device_list};
#[api(
protected: true,
@ -30,7 +30,6 @@ use pbs_tape::linux_list_drives::{lto_tape_device_list, check_drive_path};
)]
/// Create a new drive
pub fn create_drive(config: LtoTapeDrive) -> Result<(), Error> {
let _lock = pbs_config::drive::lock()?;
let (mut section_config, _digest) = pbs_config::drive::config()?;
@ -46,7 +45,12 @@ pub fn create_drive(config: LtoTapeDrive) -> Result<(), Error> {
param_bail!("name", "Entry '{}' already exists", config.name);
}
if drive.path == config.path {
param_bail!("path", "Path '{}' already used in drive '{}'", config.path, drive.name);
param_bail!(
"path",
"Path '{}' already used in drive '{}'",
config.path,
drive.name
);
}
}
@ -78,7 +82,6 @@ pub fn get_config(
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<LtoTapeDrive, Error> {
let (config, digest) = pbs_config::drive::config()?;
let data: LtoTapeDrive = config.lookup("lto", &name)?;
@ -176,9 +179,8 @@ pub fn update_drive(
update: LtoTapeDriveUpdater,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
_param: Value,
_param: Value,
) -> Result<(), Error> {
let _lock = pbs_config::drive::lock()?;
let (mut config, expected_digest) = pbs_config::drive::config()?;
@ -196,8 +198,10 @@ pub fn update_drive(
DeletableProperty::changer => {
data.changer = None;
data.changer_drivenum = None;
},
DeletableProperty::changer_drivenum => { data.changer_drivenum = None; },
}
DeletableProperty::changer_drivenum => {
data.changer_drivenum = None;
}
}
}
}
@ -218,7 +222,10 @@ pub fn update_drive(
data.changer_drivenum = None;
} else {
if data.changer.is_none() {
param_bail!("changer", format_err!("Option 'changer-drivenum' requires option 'changer'."));
param_bail!(
"changer",
format_err!("Option 'changer-drivenum' requires option 'changer'.")
);
}
data.changer_drivenum = Some(changer_drivenum);
}
@ -246,7 +253,6 @@ pub fn update_drive(
)]
/// Delete a drive configuration
pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> {
let _lock = pbs_config::drive::lock()?;
let (mut config, _digest) = pbs_config::drive::config()?;
@ -254,10 +260,14 @@ pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> {
match config.sections.get(&name) {
Some((section_type, _)) => {
if section_type != "lto" {
param_bail!("name", "Entry '{}' exists, but is not a lto tape drive", name);
param_bail!(
"name",
"Entry '{}' exists, but is not a lto tape drive",
name
);
}
config.sections.remove(&name);
},
}
None => http_bail!(NOT_FOUND, "Delete drive '{}' failed - no such drive", name),
}
@ -271,7 +281,6 @@ const ITEM_ROUTER: Router = Router::new()
.put(&API_METHOD_UPDATE_DRIVE)
.delete(&API_METHOD_DELETE_DRIVE);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_DRIVES)
.post(&API_METHOD_CREATE_DRIVE)

View File

@ -1,12 +1,12 @@
use anyhow::Error;
use ::serde::{Deserialize, Serialize};
use anyhow::Error;
use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
Authid, MediaPoolConfig, MediaPoolConfigUpdater, MEDIA_POOL_NAME_SCHEMA,
PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY,
Authid, MediaPoolConfig, MediaPoolConfigUpdater, MEDIA_POOL_NAME_SCHEMA, PRIV_TAPE_AUDIT,
PRIV_TAPE_MODIFY,
};
use pbs_config::CachedUserInfo;
@ -26,10 +26,7 @@ use pbs_config::CachedUserInfo;
},
)]
/// Create a new media pool
pub fn create_pool(
config: MediaPoolConfig,
) -> Result<(), Error> {
pub fn create_pool(config: MediaPoolConfig) -> Result<(), Error> {
let _lock = pbs_config::media_pool::lock()?;
let (mut section_config, _digest) = pbs_config::media_pool::config()?;
@ -59,9 +56,7 @@ pub fn create_pool(
},
)]
/// List media pools
pub fn list_pools(
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<MediaPoolConfig>, Error> {
pub fn list_pools(mut rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<MediaPoolConfig>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
@ -69,7 +64,7 @@ pub fn list_pools(
let list = config.convert_to_typed_array::<MediaPoolConfig>("pool")?;
let list = list
let list = list
.into_iter()
.filter(|pool| {
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool.name]);
@ -99,7 +94,6 @@ pub fn list_pools(
)]
/// Get media pool configuration
pub fn get_config(name: String) -> Result<MediaPoolConfig, Error> {
let (config, _digest) = pbs_config::media_pool::config()?;
let data: MediaPoolConfig = config.lookup("pool", &name)?;
@ -155,7 +149,6 @@ pub fn update_pool(
update: MediaPoolConfigUpdater,
delete: Option<Vec<DeletableProperty>>,
) -> Result<(), Error> {
let _lock = pbs_config::media_pool::lock()?;
let (mut config, _digest) = pbs_config::media_pool::config()?;
@ -165,19 +158,37 @@ pub fn update_pool(
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::allocation => { data.allocation = None; },
DeletableProperty::retention => { data.retention = None; },
DeletableProperty::template => { data.template = None; },
DeletableProperty::encrypt => { data.encrypt = None; },
DeletableProperty::comment => { data.comment = None; },
DeletableProperty::allocation => {
data.allocation = None;
}
DeletableProperty::retention => {
data.retention = None;
}
DeletableProperty::template => {
data.template = None;
}
DeletableProperty::encrypt => {
data.encrypt = None;
}
DeletableProperty::comment => {
data.comment = None;
}
}
}
}
if update.allocation.is_some() { data.allocation = update.allocation; }
if update.retention.is_some() { data.retention = update.retention; }
if update.template.is_some() { data.template = update.template; }
if update.encrypt.is_some() { data.encrypt = update.encrypt; }
if update.allocation.is_some() {
data.allocation = update.allocation;
}
if update.retention.is_some() {
data.retention = update.retention;
}
if update.template.is_some() {
data.template = update.template;
}
if update.encrypt.is_some() {
data.encrypt = update.encrypt;
}
if let Some(comment) = update.comment {
let comment = comment.trim();
@ -210,13 +221,14 @@ pub fn update_pool(
)]
/// Delete a media pool configuration
pub fn delete_pool(name: String) -> Result<(), Error> {
let _lock = pbs_config::media_pool::lock()?;
let (mut config, _digest) = pbs_config::media_pool::config()?;
match config.sections.get(&name) {
Some(_) => { config.sections.remove(&name); },
Some(_) => {
config.sections.remove(&name);
}
None => http_bail!(NOT_FOUND, "delete pool '{}' failed - no such pool", name),
}
@ -230,7 +242,6 @@ const ITEM_ROUTER: Router = Router::new()
.put(&API_METHOD_UPDATE_POOL)
.delete(&API_METHOD_DELETE_POOL);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_POOLS)
.post(&API_METHOD_CREATE_POOL)

View File

@ -1,20 +1,20 @@
//! Backup Server Configuration
use proxmox_router::{Router, SubdirMap};
use proxmox_router::list_subdirs_api_method;
use proxmox_router::{Router, SubdirMap};
pub mod access;
pub mod acme;
pub mod changer;
pub mod datastore;
pub mod drive;
pub mod media_pool;
pub mod remote;
pub mod sync;
pub mod verify;
pub mod drive;
pub mod changer;
pub mod media_pool;
pub mod tape_encryption_keys;
pub mod tape_backup_job;
pub mod tape_encryption_keys;
pub mod traffic_control;
pub mod verify;
const SUBDIRS: SubdirMap = &[
("access", &access::ROUTER),

View File

@ -1,20 +1,20 @@
use anyhow::{bail, format_err, Error};
use proxmox_sys::sortable;
use proxmox_router::SubdirMap;
use proxmox_router::list_subdirs_api_method;
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
use anyhow::{bail, format_err, Error};
use hex::FromHex;
use proxmox_router::list_subdirs_api_method;
use proxmox_router::SubdirMap;
use proxmox_sys::sortable;
use serde_json::Value;
use proxmox_router::{http_bail, http_err, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox_router::{http_bail, http_err, ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_client::{HttpClient, HttpClientOptions};
use pbs_api_types::{
REMOTE_ID_SCHEMA, REMOTE_PASSWORD_SCHEMA, Remote, RemoteConfig, RemoteConfigUpdater,
Authid, PROXMOX_CONFIG_DIGEST_SCHEMA, DATASTORE_SCHEMA, GroupListItem,
DataStoreListItem, RateLimitConfig, SyncJobConfig, PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY,
Authid, DataStoreListItem, GroupListItem, RateLimitConfig, Remote, RemoteConfig,
RemoteConfigUpdater, SyncJobConfig, DATASTORE_SCHEMA, PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY,
PROXMOX_CONFIG_DIGEST_SCHEMA, REMOTE_ID_SCHEMA, REMOTE_PASSWORD_SCHEMA,
};
use pbs_client::{HttpClient, HttpClientOptions};
use pbs_config::sync;
use pbs_config::CachedUserInfo;
@ -84,12 +84,7 @@ pub fn list_remotes(
},
)]
/// Create new remote.
pub fn create_remote(
name: String,
config: RemoteConfig,
password: String,
) -> Result<(), Error> {
pub fn create_remote(name: String, config: RemoteConfig, password: String) -> Result<(), Error> {
let _lock = pbs_config::remote::lock_config()?;
let (mut section_config, _digest) = pbs_config::remote::config()?;
@ -98,7 +93,11 @@ pub fn create_remote(
param_bail!("name", "remote '{}' already exists.", name);
}
let remote = Remote { name: name.clone(), config, password };
let remote = Remote {
name: name.clone(),
config,
password,
};
section_config.set_data(&name, "remote", &remote)?;
@ -188,7 +187,6 @@ pub fn update_remote(
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
) -> Result<(), Error> {
let _lock = pbs_config::remote::lock_config()?;
let (mut config, expected_digest) = pbs_config::remote::config()?;
@ -203,9 +201,15 @@ pub fn update_remote(
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::comment => { data.config.comment = None; },
DeletableProperty::fingerprint => { data.config.fingerprint = None; },
DeletableProperty::port => { data.config.port = None; },
DeletableProperty::comment => {
data.config.comment = None;
}
DeletableProperty::fingerprint => {
data.config.fingerprint = None;
}
DeletableProperty::port => {
data.config.port = None;
}
}
}
}
@ -218,12 +222,22 @@ pub fn update_remote(
data.config.comment = Some(comment);
}
}
if let Some(host) = update.host { data.config.host = host; }
if update.port.is_some() { data.config.port = update.port; }
if let Some(auth_id) = update.auth_id { data.config.auth_id = auth_id; }
if let Some(password) = password { data.password = password; }
if let Some(host) = update.host {
data.config.host = host;
}
if update.port.is_some() {
data.config.port = update.port;
}
if let Some(auth_id) = update.auth_id {
data.config.auth_id = auth_id;
}
if let Some(password) = password {
data.password = password;
}
if update.fingerprint.is_some() { data.config.fingerprint = update.fingerprint; }
if update.fingerprint.is_some() {
data.config.fingerprint = update.fingerprint;
}
config.set_data(&name, "remote", &data)?;
@ -251,13 +265,18 @@ pub fn update_remote(
)]
/// Remove a remote from the configuration file.
pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
let (sync_jobs, _) = sync::config()?;
let job_list: Vec<SyncJobConfig> = sync_jobs.convert_to_typed_array("sync")?;
let job_list: Vec<SyncJobConfig> = sync_jobs.convert_to_typed_array("sync")?;
for job in job_list {
if job.remote == name {
param_bail!("name", "remote '{}' is used by sync job '{}' (datastore '{}')", name, job.id, job.store);
param_bail!(
"name",
"remote '{}' is used by sync job '{}' (datastore '{}')",
name,
job.id,
job.store
);
}
}
@ -271,7 +290,9 @@ pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error>
}
match config.sections.get(&name) {
Some(_) => { config.sections.remove(&name); },
Some(_) => {
config.sections.remove(&name);
}
None => http_bail!(NOT_FOUND, "remote '{}' does not exist.", name),
}
@ -285,7 +306,10 @@ pub async fn remote_client(
remote: &Remote,
limit: Option<RateLimitConfig>,
) -> Result<HttpClient, Error> {
let mut options = HttpClientOptions::new_non_interactive(remote.password.clone(), remote.config.fingerprint.clone());
let mut options = HttpClientOptions::new_non_interactive(
remote.password.clone(),
remote.config.fingerprint.clone(),
);
if let Some(limit) = limit {
options = options.rate_limit(limit);
@ -295,15 +319,22 @@ pub async fn remote_client(
&remote.config.host,
remote.config.port.unwrap_or(8007),
&remote.config.auth_id,
options)?;
let _auth_info = client.login() // make sure we can auth
options,
)?;
let _auth_info = client
.login() // make sure we can auth
.await
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.config.host, err))?;
.map_err(|err| {
format_err!(
"remote connection to '{}' failed - {}",
remote.config.host,
err
)
})?;
Ok(client)
}
#[api(
input: {
properties: {
@ -327,15 +358,15 @@ pub async fn scan_remote_datastores(name: String) -> Result<Vec<DataStoreListIte
let remote: Remote = remote_config.lookup("remote", &name)?;
let map_remote_err = |api_err| {
http_err!(INTERNAL_SERVER_ERROR,
"failed to scan remote '{}' - {}",
&name,
api_err)
http_err!(
INTERNAL_SERVER_ERROR,
"failed to scan remote '{}' - {}",
&name,
api_err
)
};
let client = remote_client(&remote, None)
.await
.map_err(map_remote_err)?;
let client = remote_client(&remote, None).await.map_err(map_remote_err)?;
let api_res = client
.get("api2/json/admin/datastore", None)
.await
@ -377,15 +408,15 @@ pub async fn scan_remote_groups(name: String, store: String) -> Result<Vec<Group
let remote: Remote = remote_config.lookup("remote", &name)?;
let map_remote_err = |api_err| {
http_err!(INTERNAL_SERVER_ERROR,
"failed to scan remote '{}' - {}",
&name,
api_err)
http_err!(
INTERNAL_SERVER_ERROR,
"failed to scan remote '{}' - {}",
&name,
api_err
)
};
let client = remote_client(&remote, None)
.await
.map_err(map_remote_err)?;
let client = remote_client(&remote, None).await.map_err(map_remote_err)?;
let api_res = client
.get(&format!("api2/json/admin/datastore/{}/groups", store), None)
.await
@ -402,13 +433,8 @@ pub async fn scan_remote_groups(name: String, store: String) -> Result<Vec<Group
}
#[sortable]
const DATASTORE_SCAN_SUBDIRS: SubdirMap = &[
(
"groups",
&Router::new()
.get(&API_METHOD_SCAN_REMOTE_GROUPS)
),
];
const DATASTORE_SCAN_SUBDIRS: SubdirMap =
&[("groups", &Router::new().get(&API_METHOD_SCAN_REMOTE_GROUPS))];
const DATASTORE_SCAN_ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(DATASTORE_SCAN_SUBDIRS))

View File

@ -1,15 +1,15 @@
use anyhow::{bail, Error};
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
use anyhow::{bail, Error};
use hex::FromHex;
use serde_json::Value;
use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA,
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
PRIV_REMOTE_AUDIT, PRIV_REMOTE_READ,
Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_AUDIT,
PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA,
};
use pbs_config::sync;
@ -49,10 +49,8 @@ pub fn check_sync_job_modify_access(
let correct_owner = match job.owner {
Some(ref owner) => {
owner == auth_id
|| (owner.is_token()
&& !auth_id.is_token()
&& owner.user() == auth_id.user())
},
|| (owner.is_token() && !auth_id.is_token() && owner.user() == auth_id.user())
}
// default sync owner
None => auth_id == Authid::root_auth_id(),
};
@ -98,7 +96,7 @@ pub fn list_sync_jobs(
.into_iter()
.filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job))
.collect();
Ok(list)
Ok(list)
}
#[api(
@ -181,7 +179,7 @@ pub fn read_sync_job(
#[api()]
#[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")]
#[serde(rename_all = "kebab-case")]
#[allow(non_camel_case_types)]
/// Deletable property name
pub enum DeletableProperty {
@ -258,18 +256,36 @@ pub fn update_sync_job(
let mut data: SyncJobConfig = config.lookup("sync", &id)?;
if let Some(delete) = delete {
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::owner => { data.owner = None; },
DeletableProperty::comment => { data.comment = None; },
DeletableProperty::schedule => { data.schedule = None; },
DeletableProperty::remove_vanished => { data.remove_vanished = None; },
DeletableProperty::group_filter => { data.group_filter = None; },
DeletableProperty::rate_in => { data.limit.rate_in = None; },
DeletableProperty::rate_out => { data.limit.rate_out = None; },
DeletableProperty::burst_in => { data.limit.burst_in = None; },
DeletableProperty::burst_out => { data.limit.burst_out = None; },
DeletableProperty::owner => {
data.owner = None;
}
DeletableProperty::comment => {
data.comment = None;
}
DeletableProperty::schedule => {
data.schedule = None;
}
DeletableProperty::remove_vanished => {
data.remove_vanished = None;
}
DeletableProperty::group_filter => {
data.group_filter = None;
}
DeletableProperty::rate_in => {
data.limit.rate_in = None;
}
DeletableProperty::rate_out => {
data.limit.rate_out = None;
}
DeletableProperty::burst_in => {
data.limit.burst_in = None;
}
DeletableProperty::burst_out => {
data.limit.burst_out = None;
}
}
}
}
@ -283,11 +299,21 @@ pub fn update_sync_job(
}
}
if let Some(store) = update.store { data.store = store; }
if let Some(remote) = update.remote { data.remote = remote; }
if let Some(remote_store) = update.remote_store { data.remote_store = remote_store; }
if let Some(owner) = update.owner { data.owner = Some(owner); }
if let Some(group_filter) = update.group_filter { data.group_filter = Some(group_filter); }
if let Some(store) = update.store {
data.store = store;
}
if let Some(remote) = update.remote {
data.remote = remote;
}
if let Some(remote_store) = update.remote_store {
data.remote_store = remote_store;
}
if let Some(owner) = update.owner {
data.owner = Some(owner);
}
if let Some(group_filter) = update.group_filter {
data.group_filter = Some(group_filter);
}
if update.limit.rate_in.is_some() {
data.limit.rate_in = update.limit.rate_in;
@ -306,8 +332,12 @@ pub fn update_sync_job(
}
let schedule_changed = data.schedule != update.schedule;
if update.schedule.is_some() { data.schedule = update.schedule; }
if update.remove_vanished.is_some() { data.remove_vanished = update.remove_vanished; }
if update.schedule.is_some() {
data.schedule = update.schedule;
}
if update.remove_vanished.is_some() {
data.remove_vanished = update.remove_vanished;
}
if !check_sync_job_modify_access(&user_info, &auth_id, &data) {
bail!("permission check failed");
@ -366,8 +396,10 @@ pub fn delete_sync_job(
bail!("permission check failed");
}
config.sections.remove(&id);
},
Err(_) => { http_bail!(NOT_FOUND, "job '{}' does not exist.", id) },
}
Err(_) => {
http_bail!(NOT_FOUND, "job '{}' does not exist.", id)
}
};
sync::save_config(&config)?;
@ -387,25 +419,30 @@ pub const ROUTER: Router = Router::new()
.post(&API_METHOD_CREATE_SYNC_JOB)
.match_all("id", &ITEM_ROUTER);
#[test]
fn sync_job_access_test() -> Result<(), Error> {
let (user_cfg, _) = pbs_config::user::test_cfg_from_str(r###"
let (user_cfg, _) = pbs_config::user::test_cfg_from_str(
r###"
user: noperm@pbs
user: read@pbs
user: write@pbs
"###).expect("test user.cfg is not parsable");
let acl_tree = pbs_config::acl::AclTree::from_raw(r###"
"###,
)
.expect("test user.cfg is not parsable");
let acl_tree = pbs_config::acl::AclTree::from_raw(
r###"
acl:1:/datastore/localstore1:read@pbs,write@pbs:DatastoreAudit
acl:1:/datastore/localstore1:write@pbs:DatastoreBackup
acl:1:/datastore/localstore2:write@pbs:DatastorePowerUser
acl:1:/datastore/localstore3:write@pbs:DatastoreAdmin
acl:1:/remote/remote1:read@pbs,write@pbs:RemoteAudit
acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
"###).expect("test acl.cfg is not parsable");
"###,
)
.expect("test acl.cfg is not parsable");
let user_info = CachedUserInfo::test_new(user_cfg, acl_tree);
@ -429,28 +466,52 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
};
// should work without ACLs
assert_eq!(check_sync_job_read_access(&user_info, root_auth_id, &job), true);
assert_eq!(check_sync_job_modify_access(&user_info, root_auth_id, &job), true);
assert_eq!(
check_sync_job_read_access(&user_info, root_auth_id, &job),
true
);
assert_eq!(
check_sync_job_modify_access(&user_info, root_auth_id, &job),
true
);
// user without permissions must fail
assert_eq!(check_sync_job_read_access(&user_info, &no_perm_auth_id, &job), false);
assert_eq!(check_sync_job_modify_access(&user_info, &no_perm_auth_id, &job), false);
assert_eq!(
check_sync_job_read_access(&user_info, &no_perm_auth_id, &job),
false
);
assert_eq!(
check_sync_job_modify_access(&user_info, &no_perm_auth_id, &job),
false
);
// reading without proper read permissions on either remote or local must fail
assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false);
assert_eq!(
check_sync_job_read_access(&user_info, &read_auth_id, &job),
false
);
// reading without proper read permissions on local end must fail
job.remote = "remote1".to_string();
assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false);
assert_eq!(
check_sync_job_read_access(&user_info, &read_auth_id, &job),
false
);
// reading without proper read permissions on remote end must fail
job.remote = "remote0".to_string();
job.store = "localstore1".to_string();
assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false);
assert_eq!(
check_sync_job_read_access(&user_info, &read_auth_id, &job),
false
);
// writing without proper write permissions on either end must fail
job.store = "localstore0".to_string();
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
false
);
// writing without proper write permissions on local end must fail
job.remote = "remote1".to_string();
@ -458,46 +519,85 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
// writing without proper write permissions on remote end must fail
job.remote = "remote0".to_string();
job.store = "localstore1".to_string();
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
false
);
// reset remote to one where users have access
job.remote = "remote1".to_string();
// user with read permission can only read, but not modify/run
assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), true);
assert_eq!(
check_sync_job_read_access(&user_info, &read_auth_id, &job),
true
);
job.owner = Some(read_auth_id.clone());
assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false);
assert_eq!(
check_sync_job_modify_access(&user_info, &read_auth_id, &job),
false
);
job.owner = None;
assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false);
assert_eq!(
check_sync_job_modify_access(&user_info, &read_auth_id, &job),
false
);
job.owner = Some(write_auth_id.clone());
assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false);
assert_eq!(
check_sync_job_modify_access(&user_info, &read_auth_id, &job),
false
);
// user with simple write permission can modify/run
assert_eq!(check_sync_job_read_access(&user_info, &write_auth_id, &job), true);
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
assert_eq!(
check_sync_job_read_access(&user_info, &write_auth_id, &job),
true
);
assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
true
);
// but can't modify/run with deletion
job.remove_vanished = Some(true);
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
false
);
// unless they have Datastore.Prune as well
job.store = "localstore2".to_string();
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
true
);
// changing owner is not possible
job.owner = Some(read_auth_id.clone());
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
false
);
// also not to the default 'root@pam'
job.owner = None;
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
false
);
// unless they have Datastore.Modify as well
job.store = "localstore3".to_string();
job.owner = Some(read_auth_id);
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
true
);
job.owner = None;
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
true
);
Ok(())
}

View File

@ -1,15 +1,14 @@
use anyhow::Error;
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
use anyhow::Error;
use hex::FromHex;
use serde_json::Value;
use proxmox_router::{http_bail, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox_router::{http_bail, ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
TrafficControlRule, TrafficControlRuleUpdater,
TrafficControlRule, TrafficControlRuleUpdater, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
PROXMOX_CONFIG_DIGEST_SCHEMA, TRAFFIC_CONTROL_ID_SCHEMA,
PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
};
#[api(
@ -56,13 +55,16 @@ pub fn list_traffic_controls(
)]
/// Create new traffic control rule.
pub fn create_traffic_control(config: TrafficControlRule) -> Result<(), Error> {
let _lock = pbs_config::traffic_control::lock_config()?;
let (mut section_config, _digest) = pbs_config::traffic_control::config()?;
if section_config.sections.get(&config.name).is_some() {
param_bail!("name", "traffic control rule '{}' already exists.", config.name);
param_bail!(
"name",
"traffic control rule '{}' already exists.",
config.name
);
}
section_config.set_data(&config.name, "rule", &config)?;
@ -154,7 +156,6 @@ pub fn update_traffic_control(
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
) -> Result<(), Error> {
let _lock = pbs_config::traffic_control::lock_config()?;
let (mut config, expected_digest) = pbs_config::traffic_control::config()?;
@ -169,12 +170,24 @@ pub fn update_traffic_control(
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::rate_in => { data.limit.rate_in = None; },
DeletableProperty::rate_out => { data.limit.rate_out = None; },
DeletableProperty::burst_in => { data.limit.burst_in = None; },
DeletableProperty::burst_out => { data.limit.burst_out = None; },
DeletableProperty::comment => { data.comment = None; },
DeletableProperty::timeframe => { data.timeframe = None; },
DeletableProperty::rate_in => {
data.limit.rate_in = None;
}
DeletableProperty::rate_out => {
data.limit.rate_out = None;
}
DeletableProperty::burst_in => {
data.limit.burst_in = None;
}
DeletableProperty::burst_out => {
data.limit.burst_out = None;
}
DeletableProperty::comment => {
data.comment = None;
}
DeletableProperty::timeframe => {
data.timeframe = None;
}
}
}
}
@ -204,8 +217,12 @@ pub fn update_traffic_control(
data.limit.burst_out = update.limit.burst_out;
}
if let Some(network) = update.network { data.network = network; }
if update.timeframe.is_some() { data.timeframe = update.timeframe; }
if let Some(network) = update.network {
data.network = network;
}
if update.timeframe.is_some() {
data.timeframe = update.timeframe;
}
config.set_data(&name, "rule", &data)?;
@ -233,7 +250,6 @@ pub fn update_traffic_control(
)]
/// Remove a traffic control rule from the configuration file.
pub fn delete_traffic_control(name: String, digest: Option<String>) -> Result<(), Error> {
let _lock = pbs_config::traffic_control::lock_config()?;
let (mut config, expected_digest) = pbs_config::traffic_control::config()?;
@ -244,7 +260,9 @@ pub fn delete_traffic_control(name: String, digest: Option<String>) -> Result<()
}
match config.sections.get(&name) {
Some(_) => { config.sections.remove(&name); },
Some(_) => {
config.sections.remove(&name);
}
None => http_bail!(NOT_FOUND, "traffic control rule '{}' does not exist.", name),
}
@ -253,7 +271,6 @@ pub fn delete_traffic_control(name: String, digest: Option<String>) -> Result<()
Ok(())
}
const ITEM_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_TRAFFIC_CONTROL)
.put(&API_METHOD_UPDATE_TRAFFIC_CONTROL)

View File

@ -1,14 +1,14 @@
use anyhow::Error;
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
use anyhow::Error;
use hex::FromHex;
use serde_json::Value;
use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
Authid, VerificationJobConfig, VerificationJobConfigUpdater, JOB_ID_SCHEMA,
PROXMOX_CONFIG_DIGEST_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_VERIFY,
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_VERIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
};
use pbs_config::verify;
@ -42,19 +42,20 @@ pub fn list_verification_jobs(
let list = config.convert_to_typed_array("verification")?;
let list = list.into_iter()
let list = list
.into_iter()
.filter(|job: &VerificationJobConfig| {
let privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
privs & required_privs != 00
}).collect();
})
.collect();
rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
#[api(
protected: true,
input: {
@ -73,12 +74,17 @@ pub fn list_verification_jobs(
/// Create a new verification job.
pub fn create_verification_job(
config: VerificationJobConfig,
rpcenv: &mut dyn RpcEnvironment
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
user_info.check_privs(&auth_id, &["datastore", &config.store], PRIV_DATASTORE_VERIFY, false)?;
user_info.check_privs(
&auth_id,
&["datastore", &config.store],
PRIV_DATASTORE_VERIFY,
false,
)?;
let _lock = verify::lock_config()?;
@ -124,7 +130,12 @@ pub fn read_verification_job(
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_VERIFY;
user_info.check_privs(&auth_id, &["datastore", &verification_job.store], required_privs, true)?;
user_info.check_privs(
&auth_id,
&["datastore", &verification_job.store],
required_privs,
true,
)?;
rpcenv["digest"] = hex::encode(&digest).into();
@ -133,7 +144,7 @@ pub fn read_verification_job(
#[api()]
#[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")]
#[serde(rename_all = "kebab-case")]
/// Deletable property name
pub enum DeletableProperty {
/// Delete the ignore verified property.
@ -143,7 +154,7 @@ pub enum DeletableProperty {
/// Delete the job schedule.
Schedule,
/// Delete outdated after property.
OutdatedAfter
OutdatedAfter,
}
#[api(
@ -201,15 +212,28 @@ pub fn update_verification_job(
let mut data: VerificationJobConfig = config.lookup("verification", &id)?;
// check existing store
user_info.check_privs(&auth_id, &["datastore", &data.store], PRIV_DATASTORE_VERIFY, true)?;
user_info.check_privs(
&auth_id,
&["datastore", &data.store],
PRIV_DATASTORE_VERIFY,
true,
)?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::IgnoreVerified => { data.ignore_verified = None; },
DeletableProperty::OutdatedAfter => { data.outdated_after = None; },
DeletableProperty::Comment => { data.comment = None; },
DeletableProperty::Schedule => { data.schedule = None; },
DeletableProperty::IgnoreVerified => {
data.ignore_verified = None;
}
DeletableProperty::OutdatedAfter => {
data.outdated_after = None;
}
DeletableProperty::Comment => {
data.comment = None;
}
DeletableProperty::Schedule => {
data.schedule = None;
}
}
}
}
@ -225,15 +249,25 @@ pub fn update_verification_job(
if let Some(store) = update.store {
// check new store
user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_VERIFY, true)?;
user_info.check_privs(
&auth_id,
&["datastore", &store],
PRIV_DATASTORE_VERIFY,
true,
)?;
data.store = store;
}
if update.ignore_verified.is_some() { data.ignore_verified = update.ignore_verified; }
if update.outdated_after.is_some() { data.outdated_after = update.outdated_after; }
if update.ignore_verified.is_some() {
data.ignore_verified = update.ignore_verified;
}
if update.outdated_after.is_some() {
data.outdated_after = update.outdated_after;
}
let schedule_changed = data.schedule != update.schedule;
if update.schedule.is_some() { data.schedule = update.schedule; }
if update.schedule.is_some() {
data.schedule = update.schedule;
}
config.set_data(&id, "verification", &data)?;
@ -278,7 +312,12 @@ pub fn delete_verification_job(
let (mut config, expected_digest) = verify::config()?;
let job: VerificationJobConfig = config.lookup("verification", &id)?;
user_info.check_privs(&auth_id, &["datastore", &job.store], PRIV_DATASTORE_VERIFY, true)?;
user_info.check_privs(
&auth_id,
&["datastore", &job.store],
PRIV_DATASTORE_VERIFY,
true,
)?;
if let Some(ref digest) = digest {
let digest = <[u8; 32]>::from_hex(digest)?;
@ -286,7 +325,9 @@ pub fn delete_verification_job(
}
match config.sections.get(&id) {
Some(_) => { config.sections.remove(&id); },
Some(_) => {
config.sections.remove(&id);
}
None => http_bail!(NOT_FOUND, "job '{}' does not exist.", id),
}