api: rustfmt

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Thomas Lamprecht 2022-04-14 13:33:01 +02:00
parent 35f151e010
commit dc7a5b3491
53 changed files with 2703 additions and 1864 deletions

View File

@ -3,13 +3,12 @@
use anyhow::{bail, Error}; use anyhow::{bail, Error};
use hex::FromHex; use hex::FromHex;
use proxmox_router::{Router, RpcEnvironment, Permission}; use proxmox_router::{Permission, Router, RpcEnvironment};
use proxmox_schema::api; use proxmox_schema::api;
use pbs_api_types::{ use pbs_api_types::{
Authid, AclListItem, Role, AclListItem, Authid, Role, ACL_PATH_SCHEMA, ACL_PROPAGATE_SCHEMA, PRIV_PERMISSIONS_MODIFY,
ACL_PATH_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA, PROXMOX_GROUP_ID_SCHEMA, PRIV_SYS_AUDIT, PROXMOX_CONFIG_DIGEST_SCHEMA, PROXMOX_GROUP_ID_SCHEMA,
ACL_PROPAGATE_SCHEMA, PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY,
}; };
use pbs_config::acl::AclTreeNode; use pbs_config::acl::AclTreeNode;
@ -32,15 +31,18 @@ fn extract_acl_node_data(
for (user, roles) in &node.users { for (user, roles) in &node.users {
if let Some(auth_id_filter) = auth_id_filter { if let Some(auth_id_filter) = auth_id_filter {
if !user.is_token() if !user.is_token() || user.user() != auth_id_filter.user() {
|| user.user() != auth_id_filter.user() { continue;
continue;
} }
} }
for (role, propagate) in roles { for (role, propagate) in roles {
list.push(AclListItem { list.push(AclListItem {
path: if path.is_empty() { String::from("/") } else { path.to_string() }, path: if path.is_empty() {
String::from("/")
} else {
path.to_string()
},
propagate: *propagate, propagate: *propagate,
ugid_type: String::from("user"), ugid_type: String::from("user"),
ugid: user.to_string(), ugid: user.to_string(),
@ -55,7 +57,11 @@ fn extract_acl_node_data(
for (role, propagate) in roles { for (role, propagate) in roles {
list.push(AclListItem { list.push(AclListItem {
path: if path.is_empty() { String::from("/") } else { path.to_string() }, path: if path.is_empty() {
String::from("/")
} else {
path.to_string()
},
propagate: *propagate, propagate: *propagate,
ugid_type: String::from("group"), ugid_type: String::from("group"),
ugid: group.to_string(), ugid: group.to_string(),
@ -201,8 +207,10 @@ pub fn update_acl(
} else if auth_id.user() != current_auth_id.user() { } else if auth_id.user() != current_auth_id.user() {
bail!("Unprivileged users can only set ACL items for their own API tokens."); bail!("Unprivileged users can only set ACL items for their own API tokens.");
} }
}, }
None => { bail!("Unprivileged user needs to provide auth_id to update ACL item."); }, None => {
bail!("Unprivileged user needs to provide auth_id to update ACL item.");
}
}; };
} }
@ -222,18 +230,26 @@ pub fn update_acl(
if let Some(ref _group) = group { if let Some(ref _group) = group {
bail!("parameter 'group' - groups are currently not supported."); bail!("parameter 'group' - groups are currently not supported.");
} else if let Some(ref auth_id) = auth_id { } else if let Some(ref auth_id) = auth_id {
if !delete { // Note: we allow to delete non-existent users if !delete {
// Note: we allow to delete non-existent users
let user_cfg = pbs_config::user::cached_config()?; let user_cfg = pbs_config::user::cached_config()?;
if user_cfg.sections.get(&auth_id.to_string()).is_none() { if user_cfg.sections.get(&auth_id.to_string()).is_none() {
bail!(format!("no such {}.", bail!(format!(
if auth_id.is_token() { "API token" } else { "user" })); "no such {}.",
if auth_id.is_token() {
"API token"
} else {
"user"
}
));
} }
} }
} else { } else {
bail!("missing 'userid' or 'group' parameter."); bail!("missing 'userid' or 'group' parameter.");
} }
if !delete { // Note: we allow to delete entries with invalid path if !delete {
// Note: we allow to delete entries with invalid path
pbs_config::acl::check_acl_path(&path)?; pbs_config::acl::check_acl_path(&path)?;
} }

View File

@ -1,9 +1,9 @@
//! List Authentication domains/realms //! List Authentication domains/realms
use anyhow::{Error}; use anyhow::Error;
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_router::{Router, RpcEnvironment, Permission}; use proxmox_router::{Permission, Router, RpcEnvironment};
use proxmox_schema::api; use proxmox_schema::api;
use pbs_api_types::BasicRealmInfo; use pbs_api_types::BasicRealmInfo;
@ -50,5 +50,4 @@ fn list_domains(mut rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<BasicRealmInf
Ok(list) Ok(list)
} }
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new().get(&API_METHOD_LIST_DOMAINS);
.get(&API_METHOD_LIST_DOMAINS);

View File

@ -6,19 +6,19 @@ use serde_json::{json, Value};
use std::collections::HashMap; use std::collections::HashMap;
use std::collections::HashSet; use std::collections::HashSet;
use proxmox_sys::sortable;
use proxmox_router::{ use proxmox_router::{
http_err, list_subdirs_api_method, Router, RpcEnvironment, SubdirMap, Permission, http_err, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap,
}; };
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_sys::sortable;
use pbs_api_types::{ use pbs_api_types::{
Userid, Authid, PASSWORD_SCHEMA, ACL_PATH_SCHEMA, Authid, Userid, ACL_PATH_SCHEMA, PASSWORD_SCHEMA, PRIVILEGES, PRIV_PERMISSIONS_MODIFY,
PRIVILEGES, PRIV_PERMISSIONS_MODIFY, PRIV_SYS_AUDIT, PRIV_SYS_AUDIT,
}; };
use pbs_tools::ticket::{self, Empty, Ticket};
use pbs_config::acl::AclTreeNode; use pbs_config::acl::AclTreeNode;
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use pbs_tools::ticket::{self, Empty, Ticket};
use crate::auth_helpers::*; use crate::auth_helpers::*;
use crate::config::tfa::TfaChallenge; use crate::config::tfa::TfaChallenge;
@ -193,10 +193,11 @@ pub fn create_ticket(
tfa_challenge: Option<String>, tfa_challenge: Option<String>,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
use proxmox_rest_server::RestEnvironment; use proxmox_rest_server::RestEnvironment;
let env: &RestEnvironment = rpcenv.as_any().downcast_ref::<RestEnvironment>() let env: &RestEnvironment = rpcenv
.as_any()
.downcast_ref::<RestEnvironment>()
.ok_or_else(|| format_err!("detected worng RpcEnvironment type"))?; .ok_or_else(|| format_err!("detected worng RpcEnvironment type"))?;
match authenticate_user(&username, &password, path, privs, port, tfa_challenge) { match authenticate_user(&username, &password, path, privs, port, tfa_challenge) {
@ -340,7 +341,7 @@ pub fn list_permissions(
} else { } else {
bail!("not allowed to list permissions of {}", auth_id); bail!("not allowed to list permissions of {}", auth_id);
} }
}, }
None => current_auth_id, None => current_auth_id,
}; };

View File

@ -4,31 +4,35 @@ use std::convert::TryFrom;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_sys::sortable;
use proxmox_router::{ use proxmox_router::{
http_err, list_subdirs_api_method, Router, RpcEnvironment, SubdirMap, Permission, http_err, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap,
}; };
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_sys::sortable;
use proxmox_openid::{OpenIdAuthenticator, OpenIdConfig}; use proxmox_openid::{OpenIdAuthenticator, OpenIdConfig};
use pbs_api_types::{ use pbs_api_types::{
OpenIdRealmConfig, User, Userid, OpenIdRealmConfig, User, Userid, EMAIL_SCHEMA, FIRST_NAME_SCHEMA, LAST_NAME_SCHEMA,
EMAIL_SCHEMA, FIRST_NAME_SCHEMA, LAST_NAME_SCHEMA, OPENID_DEFAILT_SCOPE_LIST, OPENID_DEFAILT_SCOPE_LIST, REALM_ID_SCHEMA,
REALM_ID_SCHEMA,
}; };
use pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR_M; use pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR_M;
use pbs_tools::ticket::Ticket; use pbs_tools::ticket::Ticket;
use pbs_config::CachedUserInfo;
use pbs_config::open_backup_lockfile; use pbs_config::open_backup_lockfile;
use pbs_config::CachedUserInfo;
use crate::auth_helpers::*; use crate::auth_helpers::*;
use crate::server::ticket::ApiTicket; use crate::server::ticket::ApiTicket;
fn openid_authenticator(realm_config: &OpenIdRealmConfig, redirect_url: &str) -> Result<OpenIdAuthenticator, Error> { fn openid_authenticator(
realm_config: &OpenIdRealmConfig,
let scopes: Vec<String> = realm_config.scopes.as_deref().unwrap_or(OPENID_DEFAILT_SCOPE_LIST) redirect_url: &str,
) -> Result<OpenIdAuthenticator, Error> {
let scopes: Vec<String> = realm_config
.scopes
.as_deref()
.unwrap_or(OPENID_DEFAILT_SCOPE_LIST)
.split(|c: char| c == ',' || c == ';' || char::is_ascii_whitespace(&c)) .split(|c: char| c == ',' || c == ';' || char::is_ascii_whitespace(&c))
.filter(|s| !s.is_empty()) .filter(|s| !s.is_empty())
.map(String::from) .map(String::from)
@ -37,11 +41,10 @@ fn openid_authenticator(realm_config: &OpenIdRealmConfig, redirect_url: &str) ->
let mut acr_values = None; let mut acr_values = None;
if let Some(ref list) = realm_config.acr_values { if let Some(ref list) = realm_config.acr_values {
acr_values = Some( acr_values = Some(
list list.split(|c: char| c == ',' || c == ';' || char::is_ascii_whitespace(&c))
.split(|c: char| c == ',' || c == ';' || char::is_ascii_whitespace(&c))
.filter(|s| !s.is_empty()) .filter(|s| !s.is_empty())
.map(String::from) .map(String::from)
.collect() .collect(),
); );
} }
@ -105,7 +108,9 @@ pub fn openid_login(
) -> Result<Value, Error> { ) -> Result<Value, Error> {
use proxmox_rest_server::RestEnvironment; use proxmox_rest_server::RestEnvironment;
let env: &RestEnvironment = rpcenv.as_any().downcast_ref::<RestEnvironment>() let env: &RestEnvironment = rpcenv
.as_any()
.downcast_ref::<RestEnvironment>()
.ok_or_else(|| format_err!("detected worng RpcEnvironment type"))?; .ok_or_else(|| format_err!("detected worng RpcEnvironment type"))?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
@ -113,7 +118,6 @@ pub fn openid_login(
let mut tested_username = None; let mut tested_username = None;
let result = proxmox_lang::try_block!({ let result = proxmox_lang::try_block!({
let (realm, private_auth_state) = let (realm, private_auth_state) =
OpenIdAuthenticator::verify_public_auth_state(PROXMOX_BACKUP_RUN_DIR_M!(), &state)?; OpenIdAuthenticator::verify_public_auth_state(PROXMOX_BACKUP_RUN_DIR_M!(), &state)?;
@ -157,13 +161,19 @@ pub fn openid_login(
use pbs_config::user; use pbs_config::user;
let _lock = open_backup_lockfile(user::USER_CFG_LOCKFILE, None, true)?; let _lock = open_backup_lockfile(user::USER_CFG_LOCKFILE, None, true)?;
let firstname = info["given_name"].as_str().map(|n| n.to_string()) let firstname = info["given_name"]
.as_str()
.map(|n| n.to_string())
.filter(|n| FIRST_NAME_SCHEMA.parse_simple_value(n).is_ok()); .filter(|n| FIRST_NAME_SCHEMA.parse_simple_value(n).is_ok());
let lastname = info["family_name"].as_str().map(|n| n.to_string()) let lastname = info["family_name"]
.as_str()
.map(|n| n.to_string())
.filter(|n| LAST_NAME_SCHEMA.parse_simple_value(n).is_ok()); .filter(|n| LAST_NAME_SCHEMA.parse_simple_value(n).is_ok());
let email = info["email"].as_str().map(|n| n.to_string()) let email = info["email"]
.as_str()
.map(|n| n.to_string())
.filter(|n| EMAIL_SCHEMA.parse_simple_value(n).is_ok()); .filter(|n| EMAIL_SCHEMA.parse_simple_value(n).is_ok());
let user = User { let user = User {
@ -206,7 +216,7 @@ pub fn openid_login(
if let Err(ref err) = result { if let Err(ref err) = result {
let msg = err.to_string(); let msg = err.to_string();
env.log_failed_auth(tested_username, &msg); env.log_failed_auth(tested_username, &msg);
return Err(http_err!(UNAUTHORIZED, "{}", msg)) return Err(http_err!(UNAUTHORIZED, "{}", msg));
} }
result result
@ -240,7 +250,6 @@ fn openid_auth_url(
redirect_url: String, redirect_url: String,
_rpcenv: &mut dyn RpcEnvironment, _rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> { ) -> Result<String, Error> {
let (domains, _digest) = pbs_config::domains::config()?; let (domains, _digest) = pbs_config::domains::config()?;
let config: OpenIdRealmConfig = domains.lookup("openid", &realm)?; let config: OpenIdRealmConfig = domains.lookup("openid", &realm)?;

View File

@ -7,7 +7,7 @@ use serde_json::{json, Value};
use proxmox_router::{Permission, Router}; use proxmox_router::{Permission, Router};
use proxmox_schema::api; use proxmox_schema::api;
use pbs_api_types::{Role, SINGLE_LINE_COMMENT_SCHEMA, PRIVILEGES}; use pbs_api_types::{Role, PRIVILEGES, SINGLE_LINE_COMMENT_SCHEMA};
use pbs_config::acl::ROLE_NAMES; use pbs_config::acl::ROLE_NAMES;
#[api( #[api(
@ -56,5 +56,4 @@ fn list_roles() -> Result<Value, Error> {
Ok(list.into()) Ok(list.into())
} }
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new().get(&API_METHOD_LIST_ROLES);
.get(&API_METHOD_LIST_ROLES);

View File

@ -1,19 +1,18 @@
//! User Management //! User Management
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use serde::{Serialize, Deserialize}; use hex::FromHex;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value}; use serde_json::{json, Value};
use std::collections::HashMap; use std::collections::HashMap;
use hex::FromHex;
use proxmox_router::{ApiMethod, Router, RpcEnvironment, SubdirMap, Permission}; use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment, SubdirMap};
use proxmox_schema::api; use proxmox_schema::api;
use pbs_api_types::{ use pbs_api_types::{
PROXMOX_CONFIG_DIGEST_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, Authid, ApiToken, Authid, Tokenname, User, UserUpdater, UserWithTokens, Userid, ENABLE_USER_SCHEMA,
Tokenname, UserWithTokens, Userid, User, UserUpdater, ApiToken, EXPIRE_USER_SCHEMA, PBS_PASSWORD_SCHEMA, PRIV_PERMISSIONS_MODIFY, PRIV_SYS_AUDIT,
ENABLE_USER_SCHEMA, EXPIRE_USER_SCHEMA, PBS_PASSWORD_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY,
}; };
use pbs_config::token_shadow; use pbs_config::token_shadow;
@ -59,7 +58,6 @@ pub fn list_users(
_info: &ApiMethod, _info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment, mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<UserWithTokens>, Error> { ) -> Result<Vec<UserWithTokens>, Error> {
let (config, digest) = pbs_config::user::config()?; let (config, digest) = pbs_config::user::config()?;
let auth_id: Authid = rpcenv let auth_id: Authid = rpcenv
@ -74,41 +72,34 @@ pub fn list_users(
let top_level_privs = user_info.lookup_privs(&auth_id, &["access", "users"]); let top_level_privs = user_info.lookup_privs(&auth_id, &["access", "users"]);
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0; let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
let filter_by_privs = |user: &User| { let filter_by_privs = |user: &User| top_level_allowed || user.userid == *userid;
top_level_allowed || user.userid == *userid
};
let list: Vec<User> = config.convert_to_typed_array("user")?;
let list:Vec<User> = config.convert_to_typed_array("user")?;
rpcenv["digest"] = hex::encode(&digest).into(); rpcenv["digest"] = hex::encode(&digest).into();
let iter = list.into_iter().filter(filter_by_privs); let iter = list.into_iter().filter(filter_by_privs);
let list = if include_tokens { let list = if include_tokens {
let tokens: Vec<ApiToken> = config.convert_to_typed_array("token")?; let tokens: Vec<ApiToken> = config.convert_to_typed_array("token")?;
let mut user_to_tokens = tokens let mut user_to_tokens = tokens.into_iter().fold(
.into_iter() HashMap::new(),
.fold( |mut map: HashMap<Userid, Vec<ApiToken>>, token: ApiToken| {
HashMap::new(),
|mut map: HashMap<Userid, Vec<ApiToken>>, token: ApiToken| {
if token.tokenid.is_token() { if token.tokenid.is_token() {
map map.entry(token.tokenid.user().clone())
.entry(token.tokenid.user().clone())
.or_default() .or_default()
.push(token); .push(token);
} }
map map
}); },
iter );
.map(|user: User| { iter.map(|user: User| {
let mut user = new_user_with_tokens(user); let mut user = new_user_with_tokens(user);
user.tokens = user_to_tokens.remove(&user.userid).unwrap_or_default(); user.tokens = user_to_tokens.remove(&user.userid).unwrap_or_default();
user user
}) })
.collect() .collect()
} else { } else {
iter.map(new_user_with_tokens) iter.map(new_user_with_tokens).collect()
.collect()
}; };
Ok(list) Ok(list)
@ -136,14 +127,17 @@ pub fn list_users(
pub fn create_user( pub fn create_user(
password: Option<String>, password: Option<String>,
config: User, config: User,
rpcenv: &mut dyn RpcEnvironment rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = pbs_config::user::lock_config()?; let _lock = pbs_config::user::lock_config()?;
let (mut section_config, _digest) = pbs_config::user::config()?; let (mut section_config, _digest) = pbs_config::user::config()?;
if section_config.sections.get(config.userid.as_str()).is_some() { if section_config
.sections
.get(config.userid.as_str())
.is_some()
{
bail!("user '{}' already exists.", config.userid); bail!("user '{}' already exists.", config.userid);
} }
@ -194,7 +188,7 @@ pub fn read_user(userid: Userid, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
#[api()] #[api()]
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")] #[serde(rename_all = "kebab-case")]
#[allow(non_camel_case_types)] #[allow(non_camel_case_types)]
pub enum DeletableProperty { pub enum DeletableProperty {
/// Delete the comment property. /// Delete the comment property.
@ -253,7 +247,6 @@ pub fn update_user(
digest: Option<String>, digest: Option<String>,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = pbs_config::user::lock_config()?; let _lock = pbs_config::user::lock_config()?;
let (mut config, expected_digest) = pbs_config::user::config()?; let (mut config, expected_digest) = pbs_config::user::config()?;
@ -306,11 +299,19 @@ pub fn update_user(
} }
if let Some(firstname) = update.firstname { if let Some(firstname) = update.firstname {
data.firstname = if firstname.is_empty() { None } else { Some(firstname) }; data.firstname = if firstname.is_empty() {
None
} else {
Some(firstname)
};
} }
if let Some(lastname) = update.lastname { if let Some(lastname) = update.lastname {
data.lastname = if lastname.is_empty() { None } else { Some(lastname) }; data.lastname = if lastname.is_empty() {
None
} else {
Some(lastname)
};
} }
if let Some(email) = update.email { if let Some(email) = update.email {
data.email = if email.is_empty() { None } else { Some(email) }; data.email = if email.is_empty() { None } else { Some(email) };
@ -345,10 +346,9 @@ pub fn update_user(
)] )]
/// Remove a user from the configuration file. /// Remove a user from the configuration file.
pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> { pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
let _lock = pbs_config::user::lock_config()?; let _lock = pbs_config::user::lock_config()?;
let _tfa_lock = crate::config::tfa::write_lock()?; let _tfa_lock = crate::config::tfa::write_lock()?;
let (mut config, expected_digest) = pbs_config::user::config()?; let (mut config, expected_digest) = pbs_config::user::config()?;
if let Some(ref digest) = digest { if let Some(ref digest) = digest {
@ -357,7 +357,9 @@ pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error>
} }
match config.sections.get(userid.as_str()) { match config.sections.get(userid.as_str()) {
Some(_) => { config.sections.remove(userid.as_str()); }, Some(_) => {
config.sections.remove(userid.as_str());
}
None => bail!("user '{}' does not exist.", userid), None => bail!("user '{}' does not exist.", userid),
} }
@ -365,7 +367,7 @@ pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error>
let authenticator = crate::auth::lookup_authenticator(userid.realm())?; let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
match authenticator.remove_password(userid.name()) { match authenticator.remove_password(userid.name()) {
Ok(()) => {}, Ok(()) => {}
Err(err) => { Err(err) => {
eprintln!( eprintln!(
"error removing password after deleting user {:?}: {}", "error removing password after deleting user {:?}: {}",
@ -417,7 +419,6 @@ pub fn read_token(
_info: &ApiMethod, _info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment, mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<ApiToken, Error> { ) -> Result<ApiToken, Error> {
let (config, digest) = pbs_config::user::config()?; let (config, digest) = pbs_config::user::config()?;
let tokenid = Authid::from((userid, Some(token_name))); let tokenid = Authid::from((userid, Some(token_name)));
@ -483,7 +484,6 @@ pub fn generate_token(
expire: Option<i64>, expire: Option<i64>,
digest: Option<String>, digest: Option<String>,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let _lock = pbs_config::user::lock_config()?; let _lock = pbs_config::user::lock_config()?;
let (mut config, expected_digest) = pbs_config::user::config()?; let (mut config, expected_digest) = pbs_config::user::config()?;
@ -497,7 +497,11 @@ pub fn generate_token(
let tokenid_string = tokenid.to_string(); let tokenid_string = tokenid.to_string();
if config.sections.get(&tokenid_string).is_some() { if config.sections.get(&tokenid_string).is_some() {
bail!("token '{}' for user '{}' already exists.", token_name.as_str(), userid); bail!(
"token '{}' for user '{}' already exists.",
token_name.as_str(),
userid
);
} }
let secret = format!("{:x}", proxmox_uuid::Uuid::generate()); let secret = format!("{:x}", proxmox_uuid::Uuid::generate());
@ -564,7 +568,6 @@ pub fn update_token(
expire: Option<i64>, expire: Option<i64>,
digest: Option<String>, digest: Option<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = pbs_config::user::lock_config()?; let _lock = pbs_config::user::lock_config()?;
let (mut config, expected_digest) = pbs_config::user::config()?; let (mut config, expected_digest) = pbs_config::user::config()?;
@ -632,7 +635,6 @@ pub fn delete_token(
token_name: Tokenname, token_name: Tokenname,
digest: Option<String>, digest: Option<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = pbs_config::user::lock_config()?; let _lock = pbs_config::user::lock_config()?;
let (mut config, expected_digest) = pbs_config::user::config()?; let (mut config, expected_digest) = pbs_config::user::config()?;
@ -646,8 +648,14 @@ pub fn delete_token(
let tokenid_string = tokenid.to_string(); let tokenid_string = tokenid.to_string();
match config.sections.get(&tokenid_string) { match config.sections.get(&tokenid_string) {
Some(_) => { config.sections.remove(&tokenid_string); }, Some(_) => {
None => bail!("token '{}' of user '{}' does not exist.", token_name.as_str(), userid), config.sections.remove(&tokenid_string);
}
None => bail!(
"token '{}' of user '{}' does not exist.",
token_name.as_str(),
userid
),
} }
token_shadow::delete_secret(&tokenid)?; token_shadow::delete_secret(&tokenid)?;
@ -664,7 +672,7 @@ pub fn delete_token(
} }
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")] #[serde(rename_all = "kebab-case")]
/// A Token Entry that contains the token-name /// A Token Entry that contains the token-name
pub struct TokenApiEntry { pub struct TokenApiEntry {
/// The Token name /// The Token name
@ -699,20 +707,16 @@ pub fn list_tokens(
_info: &ApiMethod, _info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment, mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TokenApiEntry>, Error> { ) -> Result<Vec<TokenApiEntry>, Error> {
let (config, digest) = pbs_config::user::config()?; let (config, digest) = pbs_config::user::config()?;
let list:Vec<ApiToken> = config.convert_to_typed_array("token")?; let list: Vec<ApiToken> = config.convert_to_typed_array("token")?;
rpcenv["digest"] = hex::encode(&digest).into(); rpcenv["digest"] = hex::encode(&digest).into();
let filter_by_owner = |token: ApiToken| { let filter_by_owner = |token: ApiToken| {
if token.tokenid.is_token() && token.tokenid.user() == &userid { if token.tokenid.is_token() && token.tokenid.user() == &userid {
let token_name = token.tokenid.tokenname().unwrap().to_owned(); let token_name = token.tokenid.tokenname().unwrap().to_owned();
Some(TokenApiEntry { Some(TokenApiEntry { token_name, token })
token_name,
token,
})
} else { } else {
None None
} }
@ -733,9 +737,7 @@ const TOKEN_ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_TOKENS) .get(&API_METHOD_LIST_TOKENS)
.match_all("token-name", &TOKEN_ITEM_ROUTER); .match_all("token-name", &TOKEN_ITEM_ROUTER);
const USER_SUBDIRS: SubdirMap = &[ const USER_SUBDIRS: SubdirMap = &[("token", &TOKEN_ROUTER)];
("token", &TOKEN_ROUTER),
];
const USER_ROUTER: Router = Router::new() const USER_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_USER) .get(&API_METHOD_READ_USER)

File diff suppressed because it is too large Load Diff

View File

@ -1,18 +1,18 @@
//! Backup Server Administration //! Backup Server Administration
use proxmox_router::{Router, SubdirMap};
use proxmox_router::list_subdirs_api_method; use proxmox_router::list_subdirs_api_method;
use proxmox_router::{Router, SubdirMap};
pub mod datastore; pub mod datastore;
pub mod sync; pub mod sync;
pub mod verify;
pub mod traffic_control; pub mod traffic_control;
pub mod verify;
const SUBDIRS: SubdirMap = &[ const SUBDIRS: SubdirMap = &[
("datastore", &datastore::ROUTER), ("datastore", &datastore::ROUTER),
("sync", &sync::ROUTER), ("sync", &sync::ROUTER),
("traffic-control", &traffic_control::ROUTER), ("traffic-control", &traffic_control::ROUTER),
("verify", &verify::ROUTER) ("verify", &verify::ROUTER),
]; ];
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new()

View File

@ -3,32 +3,23 @@
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use serde_json::Value; use serde_json::Value;
use proxmox_sys::sortable;
use proxmox_router::{ use proxmox_router::{
list_subdirs_api_method, ApiMethod, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap, list_subdirs_api_method, ApiMethod, Permission, Router, RpcEnvironment, RpcEnvironmentType,
Permission, SubdirMap,
}; };
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_sys::sortable;
use pbs_api_types::{DATASTORE_SCHEMA, JOB_ID_SCHEMA, Authid, SyncJobConfig, SyncJobStatus}; use pbs_api_types::{Authid, SyncJobConfig, SyncJobStatus, DATASTORE_SCHEMA, JOB_ID_SCHEMA};
use pbs_config::sync; use pbs_config::sync;
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use crate::{ use crate::{
api2::{ api2::{
config::sync::{check_sync_job_modify_access, check_sync_job_read_access},
pull::do_sync_job, pull::do_sync_job,
config::sync::{
check_sync_job_modify_access,
check_sync_job_read_access,
},
},
server::{
jobstate::{
Job,
JobState,
compute_schedule_status,
},
}, },
server::jobstate::{compute_schedule_status, Job, JobState},
}; };
#[api( #[api(
@ -56,7 +47,6 @@ pub fn list_sync_jobs(
_param: Value, _param: Value,
mut rpcenv: &mut dyn RpcEnvironment, mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SyncJobStatus>, Error> { ) -> Result<Vec<SyncJobStatus>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
@ -72,9 +62,7 @@ pub fn list_sync_jobs(
true true
} }
}) })
.filter(|job: &SyncJobConfig| { .filter(|job: &SyncJobConfig| check_sync_job_read_access(&user_info, &auth_id, job));
check_sync_job_read_access(&user_info, &auth_id, job)
});
let mut list = Vec::new(); let mut list = Vec::new();
@ -84,7 +72,10 @@ pub fn list_sync_jobs(
let status = compute_schedule_status(&last_state, job.schedule.as_deref())?; let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
list.push(SyncJobStatus { config: job, status }); list.push(SyncJobStatus {
config: job,
status,
});
} }
rpcenv["digest"] = hex::encode(&digest).into(); rpcenv["digest"] = hex::encode(&digest).into();
@ -131,19 +122,12 @@ pub fn run_sync_job(
} }
#[sortable] #[sortable]
const SYNC_INFO_SUBDIRS: SubdirMap = &[ const SYNC_INFO_SUBDIRS: SubdirMap = &[("run", &Router::new().post(&API_METHOD_RUN_SYNC_JOB))];
(
"run",
&Router::new()
.post(&API_METHOD_RUN_SYNC_JOB)
),
];
const SYNC_INFO_ROUTER: Router = Router::new() const SYNC_INFO_ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SYNC_INFO_SUBDIRS)) .get(&list_subdirs_api_method!(SYNC_INFO_SUBDIRS))
.subdirs(SYNC_INFO_SUBDIRS); .subdirs(SYNC_INFO_SUBDIRS);
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_SYNC_JOBS) .get(&API_METHOD_LIST_SYNC_JOBS)
.match_all("id", &SYNC_INFO_ROUTER); .match_all("id", &SYNC_INFO_ROUTER);

View File

@ -1,12 +1,10 @@
use anyhow::Error; use anyhow::Error;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use proxmox_router::{Router, RpcEnvironment, Permission}; use proxmox_router::{Permission, Router, RpcEnvironment};
use proxmox_schema::api; use proxmox_schema::api;
use pbs_api_types::{ use pbs_api_types::{TrafficControlRule, PRIV_SYS_AUDIT};
TrafficControlRule, PRIV_SYS_AUDIT,
};
use crate::traffic_control_cache::TRAFFIC_CONTROL_CACHE; use crate::traffic_control_cache::TRAFFIC_CONTROL_CACHE;
@ -18,7 +16,7 @@ use crate::traffic_control_cache::TRAFFIC_CONTROL_CACHE;
}, },
)] )]
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")] #[serde(rename_all = "kebab-case")]
/// Traffic control rule config with current rates /// Traffic control rule config with current rates
pub struct TrafficControlCurrentRate { pub struct TrafficControlCurrentRate {
#[serde(flatten)] #[serde(flatten)]
@ -48,7 +46,6 @@ pub struct TrafficControlCurrentRate {
pub fn show_current_traffic( pub fn show_current_traffic(
mut rpcenv: &mut dyn RpcEnvironment, mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TrafficControlCurrentRate>, Error> { ) -> Result<Vec<TrafficControlCurrentRate>, Error> {
let (config, digest) = pbs_config::traffic_control::config()?; let (config, digest) = pbs_config::traffic_control::config()?;
let rules: Vec<TrafficControlRule> = config.convert_to_typed_array("rule")?; let rules: Vec<TrafficControlRule> = config.convert_to_typed_array("rule")?;
@ -62,7 +59,11 @@ pub fn show_current_traffic(
None => (0, 0), None => (0, 0),
Some(state) => (state.rate_in, state.rate_out), Some(state) => (state.rate_in, state.rate_out),
}; };
list.push(TrafficControlCurrentRate {config, cur_rate_in, cur_rate_out}); list.push(TrafficControlCurrentRate {
config,
cur_rate_in,
cur_rate_out,
});
} }
// also return the configuration digest // also return the configuration digest
@ -71,5 +72,4 @@ pub fn show_current_traffic(
Ok(list) Ok(list)
} }
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new().get(&API_METHOD_SHOW_CURRENT_TRAFFIC);
.get(&API_METHOD_SHOW_CURRENT_TRAFFIC);

View File

@ -3,29 +3,23 @@
use anyhow::{format_err, Error}; use anyhow::{format_err, Error};
use serde_json::Value; use serde_json::Value;
use proxmox_sys::sortable;
use proxmox_router::{ use proxmox_router::{
list_subdirs_api_method, ApiMethod, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap, list_subdirs_api_method, ApiMethod, Permission, Router, RpcEnvironment, RpcEnvironmentType,
Permission, SubdirMap,
}; };
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_sys::sortable;
use pbs_api_types::{ use pbs_api_types::{
VerificationJobConfig, VerificationJobStatus, JOB_ID_SCHEMA, Authid, Authid, VerificationJobConfig, VerificationJobStatus, DATASTORE_SCHEMA, JOB_ID_SCHEMA,
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_VERIFY, DATASTORE_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_VERIFY,
}; };
use pbs_config::verify; use pbs_config::verify;
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use crate::{ use crate::server::{
server::{ do_verification_job,
do_verification_job, jobstate::{compute_schedule_status, Job, JobState},
jobstate::{
Job,
JobState,
compute_schedule_status,
},
},
}; };
#[api( #[api(
@ -84,7 +78,10 @@ pub fn list_verification_jobs(
let status = compute_schedule_status(&last_state, job.schedule.as_deref())?; let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
list.push(VerificationJobStatus { config: job, status }); list.push(VerificationJobStatus {
config: job,
status,
});
} }
rpcenv["digest"] = hex::encode(&digest).into(); rpcenv["digest"] = hex::encode(&digest).into();
@ -117,7 +114,12 @@ pub fn run_verification_job(
let (config, _digest) = verify::config()?; let (config, _digest) = verify::config()?;
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?; let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
user_info.check_privs(&auth_id, &["datastore", &verification_job.store], PRIV_DATASTORE_VERIFY, true)?; user_info.check_privs(
&auth_id,
&["datastore", &verification_job.store],
PRIV_DATASTORE_VERIFY,
true,
)?;
let job = Job::new("verificationjob", &id)?; let job = Job::new("verificationjob", &id)?;
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
@ -128,7 +130,8 @@ pub fn run_verification_job(
} }
#[sortable] #[sortable]
const VERIFICATION_INFO_SUBDIRS: SubdirMap = &[("run", &Router::new().post(&API_METHOD_RUN_VERIFICATION_JOB))]; const VERIFICATION_INFO_SUBDIRS: SubdirMap =
&[("run", &Router::new().post(&API_METHOD_RUN_VERIFICATION_JOB))];
const VERIFICATION_INFO_ROUTER: Router = Router::new() const VERIFICATION_INFO_ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(VERIFICATION_INFO_SUBDIRS)) .get(&list_subdirs_api_method!(VERIFICATION_INFO_SUBDIRS))

View File

@ -1,20 +1,20 @@
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use std::sync::{Arc, Mutex};
use std::collections::HashMap;
use nix::dir::Dir; use nix::dir::Dir;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use ::serde::{Serialize}; use ::serde::Serialize;
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_sys::fs::{replace_file, CreateOptions};
use proxmox_router::{RpcEnvironment, RpcEnvironmentType}; use proxmox_router::{RpcEnvironment, RpcEnvironmentType};
use proxmox_sys::fs::{replace_file, CreateOptions};
use pbs_datastore::{DataStore, DataBlob}; use pbs_api_types::Authid;
use pbs_datastore::backup_info::{BackupDir, BackupInfo}; use pbs_datastore::backup_info::{BackupDir, BackupInfo};
use pbs_datastore::dynamic_index::DynamicIndexWriter; use pbs_datastore::dynamic_index::DynamicIndexWriter;
use pbs_datastore::fixed_index::FixedIndexWriter; use pbs_datastore::fixed_index::FixedIndexWriter;
use pbs_api_types::Authid; use pbs_datastore::{DataBlob, DataStore};
use proxmox_rest_server::{WorkerTask, formatter::*}; use proxmox_rest_server::{formatter::*, WorkerTask};
use crate::backup::verify_backup_dir_with_lock; use crate::backup::verify_backup_dir_with_lock;
@ -72,7 +72,7 @@ struct FixedWriterState {
} }
// key=digest, value=length // key=digest, value=length
type KnownChunksMap = HashMap<[u8;32], u32>; type KnownChunksMap = HashMap<[u8; 32], u32>;
struct SharedBackupState { struct SharedBackupState {
finished: bool, finished: bool,
@ -86,7 +86,6 @@ struct SharedBackupState {
} }
impl SharedBackupState { impl SharedBackupState {
// Raise error if finished flag is set // Raise error if finished flag is set
fn ensure_unfinished(&self) -> Result<(), Error> { fn ensure_unfinished(&self) -> Result<(), Error> {
if self.finished { if self.finished {
@ -102,7 +101,6 @@ impl SharedBackupState {
} }
} }
/// `RpcEnvironmet` implementation for backup service /// `RpcEnvironmet` implementation for backup service
#[derive(Clone)] #[derive(Clone)]
pub struct BackupEnvironment { pub struct BackupEnvironment {
@ -115,7 +113,7 @@ pub struct BackupEnvironment {
pub datastore: Arc<DataStore>, pub datastore: Arc<DataStore>,
pub backup_dir: BackupDir, pub backup_dir: BackupDir,
pub last_backup: Option<BackupInfo>, pub last_backup: Option<BackupInfo>,
state: Arc<Mutex<SharedBackupState>> state: Arc<Mutex<SharedBackupState>>,
} }
impl BackupEnvironment { impl BackupEnvironment {
@ -126,7 +124,6 @@ impl BackupEnvironment {
datastore: Arc<DataStore>, datastore: Arc<DataStore>,
backup_dir: BackupDir, backup_dir: BackupDir,
) -> Self { ) -> Self {
let state = SharedBackupState { let state = SharedBackupState {
finished: false, finished: false,
uid_counter: 0, uid_counter: 0,
@ -188,13 +185,21 @@ impl BackupEnvironment {
}; };
if size > data.chunk_size { if size > data.chunk_size {
bail!("fixed writer '{}' - got large chunk ({} > {}", data.name, size, data.chunk_size); bail!(
"fixed writer '{}' - got large chunk ({} > {}",
data.name,
size,
data.chunk_size
);
} }
if size < data.chunk_size { if size < data.chunk_size {
data.small_chunk_count += 1; data.small_chunk_count += 1;
if data.small_chunk_count > 1 { if data.small_chunk_count > 1 {
bail!("fixed writer '{}' - detected multiple end chunks (chunk size too small)", wid); bail!(
"fixed writer '{}' - detected multiple end chunks (chunk size too small)",
wid
);
} }
} }
@ -202,7 +207,9 @@ impl BackupEnvironment {
data.upload_stat.count += 1; data.upload_stat.count += 1;
data.upload_stat.size += size as u64; data.upload_stat.size += size as u64;
data.upload_stat.compressed_size += compressed_size as u64; data.upload_stat.compressed_size += compressed_size as u64;
if is_duplicate { data.upload_stat.duplicates += 1; } if is_duplicate {
data.upload_stat.duplicates += 1;
}
// register chunk // register chunk
state.known_chunks.insert(digest, size); state.known_chunks.insert(digest, size);
@ -235,7 +242,9 @@ impl BackupEnvironment {
data.upload_stat.count += 1; data.upload_stat.count += 1;
data.upload_stat.size += size as u64; data.upload_stat.size += size as u64;
data.upload_stat.compressed_size += compressed_size as u64; data.upload_stat.compressed_size += compressed_size as u64;
if is_duplicate { data.upload_stat.duplicates += 1; } if is_duplicate {
data.upload_stat.duplicates += 1;
}
// register chunk // register chunk
state.known_chunks.insert(digest, size); state.known_chunks.insert(digest, size);
@ -250,37 +259,71 @@ impl BackupEnvironment {
} }
/// Store the writer with an unique ID /// Store the writer with an unique ID
pub fn register_dynamic_writer(&self, index: DynamicIndexWriter, name: String) -> Result<usize, Error> { pub fn register_dynamic_writer(
&self,
index: DynamicIndexWriter,
name: String,
) -> Result<usize, Error> {
let mut state = self.state.lock().unwrap(); let mut state = self.state.lock().unwrap();
state.ensure_unfinished()?; state.ensure_unfinished()?;
let uid = state.next_uid(); let uid = state.next_uid();
state.dynamic_writers.insert(uid, DynamicWriterState { state.dynamic_writers.insert(
index, name, offset: 0, chunk_count: 0, upload_stat: UploadStatistic::new(), uid,
}); DynamicWriterState {
index,
name,
offset: 0,
chunk_count: 0,
upload_stat: UploadStatistic::new(),
},
);
Ok(uid) Ok(uid)
} }
/// Store the writer with an unique ID /// Store the writer with an unique ID
pub fn register_fixed_writer(&self, index: FixedIndexWriter, name: String, size: usize, chunk_size: u32, incremental: bool) -> Result<usize, Error> { pub fn register_fixed_writer(
&self,
index: FixedIndexWriter,
name: String,
size: usize,
chunk_size: u32,
incremental: bool,
) -> Result<usize, Error> {
let mut state = self.state.lock().unwrap(); let mut state = self.state.lock().unwrap();
state.ensure_unfinished()?; state.ensure_unfinished()?;
let uid = state.next_uid(); let uid = state.next_uid();
state.fixed_writers.insert(uid, FixedWriterState { state.fixed_writers.insert(
index, name, chunk_count: 0, size, chunk_size, small_chunk_count: 0, upload_stat: UploadStatistic::new(), incremental, uid,
}); FixedWriterState {
index,
name,
chunk_count: 0,
size,
chunk_size,
small_chunk_count: 0,
upload_stat: UploadStatistic::new(),
incremental,
},
);
Ok(uid) Ok(uid)
} }
/// Append chunk to dynamic writer /// Append chunk to dynamic writer
pub fn dynamic_writer_append_chunk(&self, wid: usize, offset: u64, size: u32, digest: &[u8; 32]) -> Result<(), Error> { pub fn dynamic_writer_append_chunk(
&self,
wid: usize,
offset: u64,
size: u32,
digest: &[u8; 32],
) -> Result<(), Error> {
let mut state = self.state.lock().unwrap(); let mut state = self.state.lock().unwrap();
state.ensure_unfinished()?; state.ensure_unfinished()?;
@ -290,10 +333,13 @@ impl BackupEnvironment {
None => bail!("dynamic writer '{}' not registered", wid), None => bail!("dynamic writer '{}' not registered", wid),
}; };
if data.offset != offset { if data.offset != offset {
bail!("dynamic writer '{}' append chunk failed - got strange chunk offset ({} != {})", bail!(
data.name, data.offset, offset); "dynamic writer '{}' append chunk failed - got strange chunk offset ({} != {})",
data.name,
data.offset,
offset
);
} }
data.offset += size as u64; data.offset += size as u64;
@ -305,7 +351,13 @@ impl BackupEnvironment {
} }
/// Append chunk to fixed writer /// Append chunk to fixed writer
pub fn fixed_writer_append_chunk(&self, wid: usize, offset: u64, size: u32, digest: &[u8; 32]) -> Result<(), Error> { pub fn fixed_writer_append_chunk(
&self,
wid: usize,
offset: u64,
size: u32,
digest: &[u8; 32],
) -> Result<(), Error> {
let mut state = self.state.lock().unwrap(); let mut state = self.state.lock().unwrap();
state.ensure_unfinished()?; state.ensure_unfinished()?;
@ -325,7 +377,15 @@ impl BackupEnvironment {
Ok(()) Ok(())
} }
fn log_upload_stat(&self, archive_name: &str, csum: &[u8; 32], uuid: &[u8; 16], size: u64, chunk_count: u64, upload_stat: &UploadStatistic) { fn log_upload_stat(
&self,
archive_name: &str,
csum: &[u8; 32],
uuid: &[u8; 16],
size: u64,
chunk_count: u64,
upload_stat: &UploadStatistic,
) {
self.log(format!("Upload statistics for '{}'", archive_name)); self.log(format!("Upload statistics for '{}'", archive_name));
self.log(format!("UUID: {}", hex::encode(uuid))); self.log(format!("UUID: {}", hex::encode(uuid)));
self.log(format!("Checksum: {}", hex::encode(csum))); self.log(format!("Checksum: {}", hex::encode(csum)));
@ -336,7 +396,11 @@ impl BackupEnvironment {
return; return;
} }
self.log(format!("Upload size: {} ({}%)", upload_stat.size, (upload_stat.size*100)/size)); self.log(format!(
"Upload size: {} ({}%)",
upload_stat.size,
(upload_stat.size * 100) / size
));
// account for zero chunk, which might be uploaded but never used // account for zero chunk, which might be uploaded but never used
let client_side_duplicates = if chunk_count < upload_stat.count { let client_side_duplicates = if chunk_count < upload_stat.count {
@ -348,17 +412,29 @@ impl BackupEnvironment {
let server_side_duplicates = upload_stat.duplicates; let server_side_duplicates = upload_stat.duplicates;
if (client_side_duplicates + server_side_duplicates) > 0 { if (client_side_duplicates + server_side_duplicates) > 0 {
let per = (client_side_duplicates + server_side_duplicates)*100/chunk_count; let per = (client_side_duplicates + server_side_duplicates) * 100 / chunk_count;
self.log(format!("Duplicates: {}+{} ({}%)", client_side_duplicates, server_side_duplicates, per)); self.log(format!(
"Duplicates: {}+{} ({}%)",
client_side_duplicates, server_side_duplicates, per
));
} }
if upload_stat.size > 0 { if upload_stat.size > 0 {
self.log(format!("Compression: {}%", (upload_stat.compressed_size*100)/upload_stat.size)); self.log(format!(
"Compression: {}%",
(upload_stat.compressed_size * 100) / upload_stat.size
));
} }
} }
/// Close dynamic writer /// Close dynamic writer
pub fn dynamic_writer_close(&self, wid: usize, chunk_count: u64, size: u64, csum: [u8; 32]) -> Result<(), Error> { pub fn dynamic_writer_close(
&self,
wid: usize,
chunk_count: u64,
size: u64,
csum: [u8; 32],
) -> Result<(), Error> {
let mut state = self.state.lock().unwrap(); let mut state = self.state.lock().unwrap();
state.ensure_unfinished()?; state.ensure_unfinished()?;
@ -369,11 +445,21 @@ impl BackupEnvironment {
}; };
if data.chunk_count != chunk_count { if data.chunk_count != chunk_count {
bail!("dynamic writer '{}' close failed - unexpected chunk count ({} != {})", data.name, data.chunk_count, chunk_count); bail!(
"dynamic writer '{}' close failed - unexpected chunk count ({} != {})",
data.name,
data.chunk_count,
chunk_count
);
} }
if data.offset != size { if data.offset != size {
bail!("dynamic writer '{}' close failed - unexpected file size ({} != {})", data.name, data.offset, size); bail!(
"dynamic writer '{}' close failed - unexpected file size ({} != {})",
data.name,
data.offset,
size
);
} }
let uuid = data.index.uuid; let uuid = data.index.uuid;
@ -381,10 +467,20 @@ impl BackupEnvironment {
let expected_csum = data.index.close()?; let expected_csum = data.index.close()?;
if csum != expected_csum { if csum != expected_csum {
bail!("dynamic writer '{}' close failed - got unexpected checksum", data.name); bail!(
"dynamic writer '{}' close failed - got unexpected checksum",
data.name
);
} }
self.log_upload_stat(&data.name, &csum, &uuid, size, chunk_count, &data.upload_stat); self.log_upload_stat(
&data.name,
&csum,
&uuid,
size,
chunk_count,
&data.upload_stat,
);
state.file_counter += 1; state.file_counter += 1;
state.backup_size += size; state.backup_size += size;
@ -394,7 +490,13 @@ impl BackupEnvironment {
} }
/// Close fixed writer /// Close fixed writer
pub fn fixed_writer_close(&self, wid: usize, chunk_count: u64, size: u64, csum: [u8; 32]) -> Result<(), Error> { pub fn fixed_writer_close(
&self,
wid: usize,
chunk_count: u64,
size: u64,
csum: [u8; 32],
) -> Result<(), Error> {
let mut state = self.state.lock().unwrap(); let mut state = self.state.lock().unwrap();
state.ensure_unfinished()?; state.ensure_unfinished()?;
@ -405,18 +507,33 @@ impl BackupEnvironment {
}; };
if data.chunk_count != chunk_count { if data.chunk_count != chunk_count {
bail!("fixed writer '{}' close failed - received wrong number of chunk ({} != {})", data.name, data.chunk_count, chunk_count); bail!(
"fixed writer '{}' close failed - received wrong number of chunk ({} != {})",
data.name,
data.chunk_count,
chunk_count
);
} }
if !data.incremental { if !data.incremental {
let expected_count = data.index.index_length(); let expected_count = data.index.index_length();
if chunk_count != (expected_count as u64) { if chunk_count != (expected_count as u64) {
bail!("fixed writer '{}' close failed - unexpected chunk count ({} != {})", data.name, expected_count, chunk_count); bail!(
"fixed writer '{}' close failed - unexpected chunk count ({} != {})",
data.name,
expected_count,
chunk_count
);
} }
if size != (data.size as u64) { if size != (data.size as u64) {
bail!("fixed writer '{}' close failed - unexpected file size ({} != {})", data.name, data.size, size); bail!(
"fixed writer '{}' close failed - unexpected file size ({} != {})",
data.name,
data.size,
size
);
} }
} }
@ -424,10 +541,20 @@ impl BackupEnvironment {
let expected_csum = data.index.close()?; let expected_csum = data.index.close()?;
if csum != expected_csum { if csum != expected_csum {
bail!("fixed writer '{}' close failed - got unexpected checksum", data.name); bail!(
"fixed writer '{}' close failed - got unexpected checksum",
data.name
);
} }
self.log_upload_stat(&data.name, &expected_csum, &uuid, size, chunk_count, &data.upload_stat); self.log_upload_stat(
&data.name,
&expected_csum,
&uuid,
size,
chunk_count,
&data.upload_stat,
);
state.file_counter += 1; state.file_counter += 1;
state.backup_size += size; state.backup_size += size;
@ -437,7 +564,6 @@ impl BackupEnvironment {
} }
pub fn add_blob(&self, file_name: &str, data: Vec<u8>) -> Result<(), Error> { pub fn add_blob(&self, file_name: &str, data: Vec<u8>) -> Result<(), Error> {
let mut path = self.datastore.base_path(); let mut path = self.datastore.base_path();
path.push(self.backup_dir.relative_path()); path.push(self.backup_dir.relative_path());
path.push(file_name); path.push(file_name);
@ -451,7 +577,10 @@ impl BackupEnvironment {
let raw_data = blob.raw_data(); let raw_data = blob.raw_data();
replace_file(&path, raw_data, CreateOptions::new(), false)?; replace_file(&path, raw_data, CreateOptions::new(), false)?;
self.log(format!("add blob {:?} ({} bytes, comp: {})", path, orig_len, blob_len)); self.log(format!(
"add blob {:?} ({} bytes, comp: {})",
path, orig_len, blob_len
));
let mut state = self.state.lock().unwrap(); let mut state = self.state.lock().unwrap();
state.file_counter += 1; state.file_counter += 1;
@ -478,9 +607,11 @@ impl BackupEnvironment {
// check for valid manifest and store stats // check for valid manifest and store stats
let stats = serde_json::to_value(state.backup_stat)?; let stats = serde_json::to_value(state.backup_stat)?;
self.datastore.update_manifest(&self.backup_dir, |manifest| { self.datastore
manifest.unprotected["chunk_upload_stats"] = stats; .update_manifest(&self.backup_dir, |manifest| {
}).map_err(|err| format_err!("unable to update manifest blob - {}", err))?; manifest.unprotected["chunk_upload_stats"] = stats;
})
.map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
if let Some(base) = &self.last_backup { if let Some(base) = &self.last_backup {
let path = self.datastore.snapshot_path(&base.backup_dir); let path = self.datastore.snapshot_path(&base.backup_dir);
@ -509,11 +640,13 @@ impl BackupEnvironment {
return Ok(()); return Ok(());
} }
let worker_id = format!("{}:{}/{}/{:08X}", let worker_id = format!(
"{}:{}/{}/{:08X}",
self.datastore.name(), self.datastore.name(),
self.backup_dir.group().backup_type(), self.backup_dir.group().backup_type(),
self.backup_dir.group().backup_id(), self.backup_dir.group().backup_id(),
self.backup_dir.backup_time()); self.backup_dir.backup_time()
);
let datastore = self.datastore.clone(); let datastore = self.datastore.clone();
let backup_dir = self.backup_dir.clone(); let backup_dir = self.backup_dir.clone();
@ -526,7 +659,6 @@ impl BackupEnvironment {
move |worker| { move |worker| {
worker.log_message("Automatically verifying newly added snapshot"); worker.log_message("Automatically verifying newly added snapshot");
let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore); let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
if !verify_backup_dir_with_lock( if !verify_backup_dir_with_lock(
&verify_worker, &verify_worker,
@ -540,7 +672,8 @@ impl BackupEnvironment {
Ok(()) Ok(())
}, },
).map(|_| ()) )
.map(|_| ())
} }
pub fn log<S: AsRef<str>>(&self, msg: S) { pub fn log<S: AsRef<str>>(&self, msg: S) {
@ -548,7 +681,9 @@ impl BackupEnvironment {
} }
pub fn debug<S: AsRef<str>>(&self, msg: S) { pub fn debug<S: AsRef<str>>(&self, msg: S) {
if self.debug { self.worker.log_message(msg); } if self.debug {
self.worker.log_message(msg);
}
} }
pub fn format_response(&self, result: Result<Value, Error>) -> Response<Body> { pub fn format_response(&self, result: Result<Value, Error>) -> Response<Body> {
@ -582,7 +717,6 @@ impl BackupEnvironment {
} }
impl RpcEnvironment for BackupEnvironment { impl RpcEnvironment for BackupEnvironment {
fn result_attrib_mut(&mut self) -> &mut Value { fn result_attrib_mut(&mut self) -> &mut Value {
&mut self.result_attributes &mut self.result_attributes
} }

View File

@ -2,32 +2,32 @@
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use futures::*; use futures::*;
use hex::FromHex;
use hyper::header::{HeaderValue, UPGRADE}; use hyper::header::{HeaderValue, UPGRADE};
use hyper::http::request::Parts; use hyper::http::request::Parts;
use hyper::{Body, Response, Request, StatusCode}; use hyper::{Body, Request, Response, StatusCode};
use serde_json::{json, Value}; use serde_json::{json, Value};
use hex::FromHex;
use proxmox_sys::sortable;
use proxmox_router::list_subdirs_api_method; use proxmox_router::list_subdirs_api_method;
use proxmox_router::{ use proxmox_router::{
ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, SubdirMap, Permission, ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment, SubdirMap,
}; };
use proxmox_schema::*; use proxmox_schema::*;
use proxmox_sys::sortable;
use pbs_api_types::{ use pbs_api_types::{
Authid, Operation, VerifyState, SnapshotVerifyState, Authid, Operation, SnapshotVerifyState, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA,
CHUNK_DIGEST_SCHEMA, PRIV_DATASTORE_BACKUP, BACKUP_ARCHIVE_NAME_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
}; };
use proxmox_sys::fs::lock_dir_noblock_shared;
use pbs_tools::json::{required_array_param, required_integer_param, required_string_param};
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1};
use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo}; use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
use pbs_datastore::index::IndexFile; use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{archive_type, ArchiveType}; use pbs_datastore::manifest::{archive_type, ArchiveType};
use proxmox_rest_server::{WorkerTask, H2Service}; use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1};
use pbs_tools::json::{required_array_param, required_integer_param, required_string_param};
use proxmox_rest_server::{H2Service, WorkerTask};
use proxmox_sys::fs::lock_dir_noblock_shared;
mod environment; mod environment;
use environment::*; use environment::*;
@ -35,8 +35,7 @@ use environment::*;
mod upload_chunk; mod upload_chunk;
use upload_chunk::*; use upload_chunk::*;
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new().upgrade(&API_METHOD_UPGRADE_BACKUP);
.upgrade(&API_METHOD_UPGRADE_BACKUP);
#[sortable] #[sortable]
pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new( pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
@ -65,269 +64,296 @@ fn upgrade_to_backup_protocol(
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>, rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture { ) -> ApiResponseFuture {
async move {
let debug = param["debug"].as_bool().unwrap_or(false);
let benchmark = param["benchmark"].as_bool().unwrap_or(false);
async move { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let debug = param["debug"].as_bool().unwrap_or(false);
let benchmark = param["benchmark"].as_bool().unwrap_or(false);
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let store = required_string_param(&param, "store")?.to_owned();
let store = required_string_param(&param, "store")?.to_owned(); let user_info = CachedUserInfo::new()?;
user_info.check_privs(
&auth_id,
&["datastore", &store],
PRIV_DATASTORE_BACKUP,
false,
)?;
let user_info = CachedUserInfo::new()?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let backup_type = required_string_param(&param, "backup-type")?;
let backup_id = required_string_param(&param, "backup-id")?;
let backup_time = required_integer_param(&param, "backup-time")?;
let backup_type = required_string_param(&param, "backup-type")?; let protocols = parts
let backup_id = required_string_param(&param, "backup-id")?; .headers
let backup_time = required_integer_param(&param, "backup-time")?; .get("UPGRADE")
.ok_or_else(|| format_err!("missing Upgrade header"))?
.to_str()?;
let protocols = parts if protocols != PROXMOX_BACKUP_PROTOCOL_ID_V1!() {
.headers bail!("invalid protocol name");
.get("UPGRADE")
.ok_or_else(|| format_err!("missing Upgrade header"))?
.to_str()?;
if protocols != PROXMOX_BACKUP_PROTOCOL_ID_V1!() {
bail!("invalid protocol name");
}
if parts.version >= http::version::Version::HTTP_2 {
bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
}
let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
let env_type = rpcenv.env_type();
let backup_group = BackupGroup::new(backup_type, backup_id);
let worker_type = if backup_type == "host" && backup_id == "benchmark" {
if !benchmark {
bail!("unable to run benchmark without --benchmark flags");
} }
"benchmark"
} else { if parts.version >= http::version::Version::HTTP_2 {
if benchmark { bail!(
bail!("benchmark flags is only allowed on 'host/benchmark'"); "unexpected http version '{:?}' (expected version < 2)",
parts.version
);
} }
"backup"
};
// lock backup group to only allow one backup per group at a time let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &auth_id)?;
// permission check let env_type = rpcenv.env_type();
let correct_owner = owner == auth_id
|| (owner.is_token()
&& Authid::from(owner.user().clone()) == auth_id);
if !correct_owner && worker_type != "benchmark" {
// only the owner is allowed to create additional snapshots
bail!("backup owner check failed ({} != {})", auth_id, owner);
}
let last_backup = { let backup_group = BackupGroup::new(backup_type, backup_id);
let info = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None);
if let Some(info) = info { let worker_type = if backup_type == "host" && backup_id == "benchmark" {
let (manifest, _) = datastore.load_manifest(&info.backup_dir)?; if !benchmark {
let verify = manifest.unprotected["verify_state"].clone(); bail!("unable to run benchmark without --benchmark flags");
match serde_json::from_value::<SnapshotVerifyState>(verify) { }
Ok(verify) => { "benchmark"
match verify.state { } else {
if benchmark {
bail!("benchmark flags is only allowed on 'host/benchmark'");
}
"backup"
};
// lock backup group to only allow one backup per group at a time
let (owner, _group_guard) =
datastore.create_locked_backup_group(&backup_group, &auth_id)?;
// permission check
let correct_owner =
owner == auth_id || (owner.is_token() && Authid::from(owner.user().clone()) == auth_id);
if !correct_owner && worker_type != "benchmark" {
// only the owner is allowed to create additional snapshots
bail!("backup owner check failed ({} != {})", auth_id, owner);
}
let last_backup = {
let info = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true)
.unwrap_or(None);
if let Some(info) = info {
let (manifest, _) = datastore.load_manifest(&info.backup_dir)?;
let verify = manifest.unprotected["verify_state"].clone();
match serde_json::from_value::<SnapshotVerifyState>(verify) {
Ok(verify) => match verify.state {
VerifyState::Ok => Some(info), VerifyState::Ok => Some(info),
VerifyState::Failed => None, VerifyState::Failed => None,
},
Err(_) => {
// no verify state found, treat as valid
Some(info)
} }
},
Err(_) => {
// no verify state found, treat as valid
Some(info)
} }
} else {
None
} }
};
let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
let _last_guard = if let Some(last) = &last_backup {
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
bail!("backup timestamp is older than last backup.");
}
// lock last snapshot to prevent forgetting/pruning it during backup
let full_path = datastore.snapshot_path(&last.backup_dir);
Some(lock_dir_noblock_shared(
&full_path,
"snapshot",
"base snapshot is already locked by another operation",
)?)
} else { } else {
None None
} };
};
let backup_dir = BackupDir::with_group(backup_group, backup_time)?; let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
if !is_new {
let _last_guard = if let Some(last) = &last_backup { bail!("backup directory already exists.");
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
bail!("backup timestamp is older than last backup.");
} }
// lock last snapshot to prevent forgetting/pruning it during backup WorkerTask::spawn(
let full_path = datastore.snapshot_path(&last.backup_dir); worker_type,
Some(lock_dir_noblock_shared(&full_path, "snapshot", "base snapshot is already locked by another operation")?) Some(worker_id),
} else { auth_id.to_string(),
None true,
}; move |worker| {
let mut env = BackupEnvironment::new(
env_type,
auth_id,
worker.clone(),
datastore,
backup_dir,
);
let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?; env.debug = debug;
if !is_new { bail!("backup directory already exists."); } env.last_backup = last_backup;
env.log(format!(
"starting new {} on datastore '{}': {:?}",
worker_type, store, path
));
WorkerTask::spawn(worker_type, Some(worker_id), auth_id.to_string(), true, move |worker| { let service =
let mut env = BackupEnvironment::new( H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
env_type, auth_id, worker.clone(), datastore, backup_dir);
env.debug = debug; let abort_future = worker.abort_future();
env.last_backup = last_backup;
env.log(format!("starting new {} on datastore '{}': {:?}", worker_type, store, path)); let env2 = env.clone();
let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug); let mut req_fut = hyper::upgrade::on(Request::from_parts(parts, req_body))
.map_err(Error::from)
.and_then(move |conn| {
env2.debug("protocol upgrade done");
let abort_future = worker.abort_future(); let mut http = hyper::server::conn::Http::new();
http.http2_only(true);
// increase window size: todo - find optiomal size
let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2
http.http2_initial_stream_window_size(window_size);
http.http2_initial_connection_window_size(window_size);
http.http2_max_frame_size(4 * 1024 * 1024);
let env2 = env.clone(); let env3 = env2.clone();
http.serve_connection(conn, service).map(move |result| {
let mut req_fut = hyper::upgrade::on(Request::from_parts(parts, req_body)) match result {
.map_err(Error::from) Err(err) => {
.and_then(move |conn| { // Avoid Transport endpoint is not connected (os error 107)
env2.debug("protocol upgrade done"); // fixme: find a better way to test for that error
if err.to_string().starts_with("connection error")
let mut http = hyper::server::conn::Http::new(); && env3.finished()
http.http2_only(true); {
// increase window size: todo - find optiomal size Ok(())
let window_size = 32*1024*1024; // max = (1 << 31) - 2 } else {
http.http2_initial_stream_window_size(window_size); Err(Error::from(err))
http.http2_initial_connection_window_size(window_size); }
http.http2_max_frame_size(4*1024*1024);
let env3 = env2.clone();
http.serve_connection(conn, service)
.map(move |result| {
match result {
Err(err) => {
// Avoid Transport endpoint is not connected (os error 107)
// fixme: find a better way to test for that error
if err.to_string().starts_with("connection error") && env3.finished() {
Ok(())
} else {
Err(Error::from(err))
} }
Ok(()) => Ok(()),
} }
Ok(()) => Ok(()), })
} });
}) let mut abort_future = abort_future.map(|_| Err(format_err!("task aborted")));
});
let mut abort_future = abort_future
.map(|_| Err(format_err!("task aborted")));
async move { async move {
// keep flock until task ends // keep flock until task ends
let _group_guard = _group_guard; let _group_guard = _group_guard;
let snap_guard = snap_guard; let snap_guard = snap_guard;
let _last_guard = _last_guard; let _last_guard = _last_guard;
let res = select!{ let res = select! {
req = req_fut => req, req = req_fut => req,
abrt = abort_future => abrt, abrt = abort_future => abrt,
}; };
if benchmark { if benchmark {
env.log("benchmark finished successfully"); env.log("benchmark finished successfully");
proxmox_async::runtime::block_in_place(|| env.remove_backup())?; proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
return Ok(()); return Ok(());
} }
let verify = |env: BackupEnvironment| { let verify = |env: BackupEnvironment| {
if let Err(err) = env.verify_after_complete(snap_guard) { if let Err(err) = env.verify_after_complete(snap_guard) {
env.log(format!( env.log(format!(
"backup finished, but starting the requested verify task failed: {}", "backup finished, but starting the requested verify task failed: {}",
err err
)); ));
}
};
match (res, env.ensure_finished()) {
(Ok(_), Ok(())) => {
env.log("backup finished successfully");
verify(env);
Ok(())
}
(Err(err), Ok(())) => {
// ignore errors after finish
env.log(format!("backup had errors but finished: {}", err));
verify(env);
Ok(())
}
(Ok(_), Err(err)) => {
env.log(format!("backup ended and finish failed: {}", err));
env.log("removing unfinished backup");
proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
Err(err)
}
(Err(err), Err(_)) => {
env.log(format!("backup failed: {}", err));
env.log("removing failed backup");
proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
Err(err)
}
}
} }
}; },
)?;
match (res, env.ensure_finished()) { let response = Response::builder()
(Ok(_), Ok(())) => { .status(StatusCode::SWITCHING_PROTOCOLS)
env.log("backup finished successfully"); .header(
verify(env); UPGRADE,
Ok(()) HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!()),
}, )
(Err(err), Ok(())) => { .body(Body::empty())?;
// ignore errors after finish
env.log(format!("backup had errors but finished: {}", err));
verify(env);
Ok(())
},
(Ok(_), Err(err)) => {
env.log(format!("backup ended and finish failed: {}", err));
env.log("removing unfinished backup");
proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
Err(err)
},
(Err(err), Err(_)) => {
env.log(format!("backup failed: {}", err));
env.log("removing failed backup");
proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
Err(err)
},
}
}
})?;
let response = Response::builder() Ok(response)
.status(StatusCode::SWITCHING_PROTOCOLS) }
.header(UPGRADE, HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!())) .boxed()
.body(Body::empty())?;
Ok(response)
}.boxed()
} }
const BACKUP_API_SUBDIRS: SubdirMap = &[ const BACKUP_API_SUBDIRS: SubdirMap = &[
("blob", &Router::new().upload(&API_METHOD_UPLOAD_BLOB)),
( (
"blob", &Router::new() "dynamic_chunk",
.upload(&API_METHOD_UPLOAD_BLOB) &Router::new().upload(&API_METHOD_UPLOAD_DYNAMIC_CHUNK),
), ),
( (
"dynamic_chunk", &Router::new() "dynamic_close",
.upload(&API_METHOD_UPLOAD_DYNAMIC_CHUNK) &Router::new().post(&API_METHOD_CLOSE_DYNAMIC_INDEX),
), ),
( (
"dynamic_close", &Router::new() "dynamic_index",
.post(&API_METHOD_CLOSE_DYNAMIC_INDEX) &Router::new()
),
(
"dynamic_index", &Router::new()
.post(&API_METHOD_CREATE_DYNAMIC_INDEX) .post(&API_METHOD_CREATE_DYNAMIC_INDEX)
.put(&API_METHOD_DYNAMIC_APPEND) .put(&API_METHOD_DYNAMIC_APPEND),
), ),
( (
"finish", &Router::new() "finish",
.post( &Router::new().post(&ApiMethod::new(
&ApiMethod::new( &ApiHandler::Sync(&finish_backup),
&ApiHandler::Sync(&finish_backup), &ObjectSchema::new("Mark backup as finished.", &[]),
&ObjectSchema::new("Mark backup as finished.", &[]) )),
)
)
), ),
( (
"fixed_chunk", &Router::new() "fixed_chunk",
.upload(&API_METHOD_UPLOAD_FIXED_CHUNK) &Router::new().upload(&API_METHOD_UPLOAD_FIXED_CHUNK),
), ),
( (
"fixed_close", &Router::new() "fixed_close",
.post(&API_METHOD_CLOSE_FIXED_INDEX) &Router::new().post(&API_METHOD_CLOSE_FIXED_INDEX),
), ),
( (
"fixed_index", &Router::new() "fixed_index",
&Router::new()
.post(&API_METHOD_CREATE_FIXED_INDEX) .post(&API_METHOD_CREATE_FIXED_INDEX)
.put(&API_METHOD_FIXED_APPEND) .put(&API_METHOD_FIXED_APPEND),
), ),
( (
"previous", &Router::new() "previous",
.download(&API_METHOD_DOWNLOAD_PREVIOUS) &Router::new().download(&API_METHOD_DOWNLOAD_PREVIOUS),
), ),
( (
"previous_backup_time", &Router::new() "previous_backup_time",
.get(&API_METHOD_GET_PREVIOUS_BACKUP_TIME) &Router::new().get(&API_METHOD_GET_PREVIOUS_BACKUP_TIME),
), ),
( (
"speedtest", &Router::new() "speedtest",
.upload(&API_METHOD_UPLOAD_SPEEDTEST) &Router::new().upload(&API_METHOD_UPLOAD_SPEEDTEST),
), ),
]; ];
@ -340,10 +366,8 @@ pub const API_METHOD_CREATE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
&ApiHandler::Sync(&create_dynamic_index), &ApiHandler::Sync(&create_dynamic_index),
&ObjectSchema::new( &ObjectSchema::new(
"Create dynamic chunk index file.", "Create dynamic chunk index file.",
&sorted!([ &sorted!([("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),]),
("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA), ),
]),
)
); );
fn create_dynamic_index( fn create_dynamic_index(
@ -351,7 +375,6 @@ fn create_dynamic_index(
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let env: &BackupEnvironment = rpcenv.as_ref(); let env: &BackupEnvironment = rpcenv.as_ref();
let name = required_string_param(&param, "archive-name")?.to_owned(); let name = required_string_param(&param, "archive-name")?.to_owned();
@ -379,14 +402,22 @@ pub const API_METHOD_CREATE_FIXED_INDEX: ApiMethod = ApiMethod::new(
"Create fixed chunk index file.", "Create fixed chunk index file.",
&sorted!([ &sorted!([
("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA), ("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
("size", false, &IntegerSchema::new("File size.") (
.minimum(1) "size",
.schema() false,
&IntegerSchema::new("File size.").minimum(1).schema()
),
(
"reuse-csum",
true,
&StringSchema::new(
"If set, compare last backup's \
csum and reuse index for incremental backup if it matches."
)
.schema()
), ),
("reuse-csum", true, &StringSchema::new("If set, compare last backup's \
csum and reuse index for incremental backup if it matches.").schema()),
]), ]),
) ),
); );
fn create_fixed_index( fn create_fixed_index(
@ -394,7 +425,6 @@ fn create_fixed_index(
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let env: &BackupEnvironment = rpcenv.as_ref(); let env: &BackupEnvironment = rpcenv.as_ref();
let name = required_string_param(&param, "archive-name")?.to_owned(); let name = required_string_param(&param, "archive-name")?.to_owned();
@ -409,7 +439,7 @@ fn create_fixed_index(
let mut path = env.backup_dir.relative_path(); let mut path = env.backup_dir.relative_path();
path.push(&archive_name); path.push(&archive_name);
let chunk_size = 4096*1024; // todo: ?? let chunk_size = 4096 * 1024; // todo: ??
// do incremental backup if csum is set // do incremental backup if csum is set
let mut reader = None; let mut reader = None;
@ -436,8 +466,11 @@ fn create_fixed_index(
let (old_csum, _) = index.compute_csum(); let (old_csum, _) = index.compute_csum();
let old_csum = hex::encode(&old_csum); let old_csum = hex::encode(&old_csum);
if old_csum != csum { if old_csum != csum {
bail!("expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup", bail!(
csum, old_csum); "expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
csum,
old_csum
);
} }
reader = Some(index); reader = Some(index);
@ -483,24 +516,28 @@ pub const API_METHOD_DYNAMIC_APPEND: ApiMethod = ApiMethod::new(
&IntegerSchema::new("Corresponding chunk offsets.") &IntegerSchema::new("Corresponding chunk offsets.")
.minimum(0) .minimum(0)
.schema() .schema()
).schema() )
.schema()
), ),
]), ]),
) ),
); );
fn dynamic_append ( fn dynamic_append(
param: Value, param: Value,
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let wid = required_integer_param(&param, "wid")? as usize; let wid = required_integer_param(&param, "wid")? as usize;
let digest_list = required_array_param(&param, "digest-list")?; let digest_list = required_array_param(&param, "digest-list")?;
let offset_list = required_array_param(&param, "offset-list")?; let offset_list = required_array_param(&param, "offset-list")?;
if offset_list.len() != digest_list.len() { if offset_list.len() != digest_list.len() {
bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len()); bail!(
"offset list has wrong length ({} != {})",
offset_list.len(),
digest_list.len()
);
} }
let env: &BackupEnvironment = rpcenv.as_ref(); let env: &BackupEnvironment = rpcenv.as_ref();
@ -511,11 +548,16 @@ fn dynamic_append (
let digest_str = item.as_str().unwrap(); let digest_str = item.as_str().unwrap();
let digest = <[u8; 32]>::from_hex(digest_str)?; let digest = <[u8; 32]>::from_hex(digest_str)?;
let offset = offset_list[i].as_u64().unwrap(); let offset = offset_list[i].as_u64().unwrap();
let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?; let size = env
.lookup_chunk(&digest)
.ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
env.dynamic_writer_append_chunk(wid, offset, size, &digest)?; env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
env.debug(format!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size)); env.debug(format!(
"successfully added chunk {} to dynamic index {} (offset {}, size {})",
digest_str, wid, offset, size
));
} }
Ok(Value::Null) Ok(Value::Null)
@ -548,24 +590,28 @@ pub const API_METHOD_FIXED_APPEND: ApiMethod = ApiMethod::new(
&IntegerSchema::new("Corresponding chunk offsets.") &IntegerSchema::new("Corresponding chunk offsets.")
.minimum(0) .minimum(0)
.schema() .schema()
).schema() )
.schema()
) )
]), ]),
) ),
); );
fn fixed_append ( fn fixed_append(
param: Value, param: Value,
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let wid = required_integer_param(&param, "wid")? as usize; let wid = required_integer_param(&param, "wid")? as usize;
let digest_list = required_array_param(&param, "digest-list")?; let digest_list = required_array_param(&param, "digest-list")?;
let offset_list = required_array_param(&param, "offset-list")?; let offset_list = required_array_param(&param, "offset-list")?;
if offset_list.len() != digest_list.len() { if offset_list.len() != digest_list.len() {
bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len()); bail!(
"offset list has wrong length ({} != {})",
offset_list.len(),
digest_list.len()
);
} }
let env: &BackupEnvironment = rpcenv.as_ref(); let env: &BackupEnvironment = rpcenv.as_ref();
@ -576,11 +622,16 @@ fn fixed_append (
let digest_str = item.as_str().unwrap(); let digest_str = item.as_str().unwrap();
let digest = <[u8; 32]>::from_hex(digest_str)?; let digest = <[u8; 32]>::from_hex(digest_str)?;
let offset = offset_list[i].as_u64().unwrap(); let offset = offset_list[i].as_u64().unwrap();
let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?; let size = env
.lookup_chunk(&digest)
.ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
env.fixed_writer_append_chunk(wid, offset, size, &digest)?; env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
env.debug(format!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size)); env.debug(format!(
"successfully added chunk {} to fixed index {} (offset {}, size {})",
digest_str, wid, offset, size
));
} }
Ok(Value::Null) Ok(Value::Null)
@ -603,28 +654,35 @@ pub const API_METHOD_CLOSE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
( (
"chunk-count", "chunk-count",
false, false,
&IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.") &IntegerSchema::new(
.minimum(1) "Chunk count. This is used to verify that the server got all chunks."
.schema() )
.minimum(1)
.schema()
), ),
( (
"size", "size",
false, false,
&IntegerSchema::new("File size. This is used to verify that the server got all data.") &IntegerSchema::new(
.minimum(1) "File size. This is used to verify that the server got all data."
.schema() )
.minimum(1)
.schema()
),
(
"csum",
false,
&StringSchema::new("Digest list checksum.").schema()
), ),
("csum", false, &StringSchema::new("Digest list checksum.").schema()),
]), ]),
) ),
); );
fn close_dynamic_index ( fn close_dynamic_index(
param: Value, param: Value,
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let wid = required_integer_param(&param, "wid")? as usize; let wid = required_integer_param(&param, "wid")? as usize;
let chunk_count = required_integer_param(&param, "chunk-count")? as u64; let chunk_count = required_integer_param(&param, "chunk-count")? as u64;
let size = required_integer_param(&param, "size")? as u64; let size = required_integer_param(&param, "size")? as u64;
@ -673,12 +731,11 @@ pub const API_METHOD_CLOSE_FIXED_INDEX: ApiMethod = ApiMethod::new(
) )
); );
fn close_fixed_index ( fn close_fixed_index(
param: Value, param: Value,
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let wid = required_integer_param(&param, "wid")? as usize; let wid = required_integer_param(&param, "wid")? as usize;
let chunk_count = required_integer_param(&param, "chunk-count")? as u64; let chunk_count = required_integer_param(&param, "chunk-count")? as u64;
let size = required_integer_param(&param, "size")? as u64; let size = required_integer_param(&param, "size")? as u64;
@ -694,12 +751,11 @@ fn close_fixed_index (
Ok(Value::Null) Ok(Value::Null)
} }
fn finish_backup ( fn finish_backup(
_param: Value, _param: Value,
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let env: &BackupEnvironment = rpcenv.as_ref(); let env: &BackupEnvironment = rpcenv.as_ref();
env.finish_backup()?; env.finish_backup()?;
@ -711,10 +767,7 @@ fn finish_backup (
#[sortable] #[sortable]
pub const API_METHOD_GET_PREVIOUS_BACKUP_TIME: ApiMethod = ApiMethod::new( pub const API_METHOD_GET_PREVIOUS_BACKUP_TIME: ApiMethod = ApiMethod::new(
&ApiHandler::Sync(&get_previous_backup_time), &ApiHandler::Sync(&get_previous_backup_time),
&ObjectSchema::new( &ObjectSchema::new("Get previous backup time.", &[]),
"Get previous backup time.",
&[],
)
); );
fn get_previous_backup_time( fn get_previous_backup_time(
@ -722,10 +775,12 @@ fn get_previous_backup_time(
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let env: &BackupEnvironment = rpcenv.as_ref(); let env: &BackupEnvironment = rpcenv.as_ref();
let backup_time = env.last_backup.as_ref().map(|info| info.backup_dir.backup_time()); let backup_time = env
.last_backup
.as_ref()
.map(|info| info.backup_dir.backup_time());
Ok(json!(backup_time)) Ok(json!(backup_time))
} }
@ -735,10 +790,8 @@ pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
&ApiHandler::AsyncHttp(&download_previous), &ApiHandler::AsyncHttp(&download_previous),
&ObjectSchema::new( &ObjectSchema::new(
"Download archive from previous backup.", "Download archive from previous backup.",
&sorted!([ &sorted!([("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA)]),
("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA) ),
]),
)
); );
fn download_previous( fn download_previous(
@ -748,7 +801,6 @@ fn download_previous(
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>, rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture { ) -> ApiResponseFuture {
async move { async move {
let env: &BackupEnvironment = rpcenv.as_ref(); let env: &BackupEnvironment = rpcenv.as_ref();
@ -772,10 +824,13 @@ fn download_previous(
let index = env.datastore.open_dynamic_reader(&path)?; let index = env.datastore.open_dynamic_reader(&path)?;
Some(Box::new(index)) Some(Box::new(index))
} }
_ => { None } _ => None,
}; };
if let Some(index) = index { if let Some(index) = index {
env.log(format!("register chunks in '{}' from previous backup.", archive_name)); env.log(format!(
"register chunks in '{}' from previous backup.",
archive_name
));
for pos in 0..index.index_count() { for pos in 0..index.index_count() {
let info = index.chunk_info(pos).unwrap(); let info = index.chunk_info(pos).unwrap();
@ -787,5 +842,6 @@ fn download_previous(
env.log(format!("download '{}' from previous backup.", archive_name)); env.log(format!("download '{}' from previous backup.", archive_name));
crate::api2::helpers::create_download_response(path).await crate::api2::helpers::create_download_response(path).await
}.boxed() }
.boxed()
} }

View File

@ -4,19 +4,19 @@ use std::task::{Context, Poll};
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use futures::*; use futures::*;
use hyper::Body;
use hyper::http::request::Parts;
use serde_json::{json, Value};
use hex::FromHex; use hex::FromHex;
use hyper::http::request::Parts;
use hyper::Body;
use serde_json::{json, Value};
use proxmox_sys::sortable; use proxmox_router::{ApiHandler, ApiMethod, ApiResponseFuture, RpcEnvironment};
use proxmox_router::{ApiResponseFuture, ApiHandler, ApiMethod, RpcEnvironment};
use proxmox_schema::*; use proxmox_schema::*;
use proxmox_sys::sortable;
use pbs_datastore::{DataStore, DataBlob}; use pbs_api_types::{BACKUP_ARCHIVE_NAME_SCHEMA, CHUNK_DIGEST_SCHEMA};
use pbs_datastore::file_formats::{DataBlobHeader, EncryptedDataBlobHeader}; use pbs_datastore::file_formats::{DataBlobHeader, EncryptedDataBlobHeader};
use pbs_datastore::{DataBlob, DataStore};
use pbs_tools::json::{required_integer_param, required_string_param}; use pbs_tools::json::{required_integer_param, required_string_param};
use pbs_api_types::{CHUNK_DIGEST_SCHEMA, BACKUP_ARCHIVE_NAME_SCHEMA};
use super::environment::*; use super::environment::*;
@ -30,8 +30,21 @@ pub struct UploadChunk {
} }
impl UploadChunk { impl UploadChunk {
pub fn new(stream: Body, store: Arc<DataStore>, digest: [u8; 32], size: u32, encoded_size: u32) -> Self { pub fn new(
Self { stream, store, size, encoded_size, raw_data: Some(vec![]), digest } stream: Body,
store: Arc<DataStore>,
digest: [u8; 32],
size: u32,
encoded_size: u32,
) -> Self {
Self {
stream,
store,
size,
encoded_size,
raw_data: Some(vec![]),
digest,
}
} }
} }
@ -77,7 +90,12 @@ impl Future for UploadChunk {
Err(err) => break err, Err(err) => break err,
}; };
return Poll::Ready(Ok((this.digest, this.size, compressed_size as u32, is_duplicate))) return Poll::Ready(Ok((
this.digest,
this.size,
compressed_size as u32,
is_duplicate,
)));
} else { } else {
break format_err!("poll upload chunk stream failed - already finished."); break format_err!("poll upload chunk stream failed - already finished.");
} }
@ -94,24 +112,36 @@ pub const API_METHOD_UPLOAD_FIXED_CHUNK: ApiMethod = ApiMethod::new(
&ObjectSchema::new( &ObjectSchema::new(
"Upload a new chunk.", "Upload a new chunk.",
&sorted!([ &sorted!([
("wid", false, &IntegerSchema::new("Fixed writer ID.") (
.minimum(1) "wid",
.maximum(256) false,
.schema() &IntegerSchema::new("Fixed writer ID.")
.minimum(1)
.maximum(256)
.schema()
), ),
("digest", false, &CHUNK_DIGEST_SCHEMA), ("digest", false, &CHUNK_DIGEST_SCHEMA),
("size", false, &IntegerSchema::new("Chunk size.") (
.minimum(1) "size",
.maximum(1024*1024*16) false,
.schema() &IntegerSchema::new("Chunk size.")
.minimum(1)
.maximum(1024 * 1024 * 16)
.schema()
), ),
("encoded-size", false, &IntegerSchema::new("Encoded chunk size.") (
.minimum((std::mem::size_of::<DataBlobHeader>() as isize)+1) "encoded-size",
.maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize)) false,
.schema() &IntegerSchema::new("Encoded chunk size.")
.minimum((std::mem::size_of::<DataBlobHeader>() as isize) + 1)
.maximum(
1024 * 1024 * 16
+ (std::mem::size_of::<EncryptedDataBlobHeader>() as isize)
)
.schema()
), ),
]), ]),
) ),
); );
fn upload_fixed_chunk( fn upload_fixed_chunk(
@ -121,7 +151,6 @@ fn upload_fixed_chunk(
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>, rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture { ) -> ApiResponseFuture {
async move { async move {
let wid = required_integer_param(&param, "wid")? as usize; let wid = required_integer_param(&param, "wid")? as usize;
let size = required_integer_param(&param, "size")? as u32; let size = required_integer_param(&param, "size")? as u32;
@ -152,24 +181,36 @@ pub const API_METHOD_UPLOAD_DYNAMIC_CHUNK: ApiMethod = ApiMethod::new(
&ObjectSchema::new( &ObjectSchema::new(
"Upload a new chunk.", "Upload a new chunk.",
&sorted!([ &sorted!([
("wid", false, &IntegerSchema::new("Dynamic writer ID.") (
.minimum(1) "wid",
.maximum(256) false,
.schema() &IntegerSchema::new("Dynamic writer ID.")
.minimum(1)
.maximum(256)
.schema()
), ),
("digest", false, &CHUNK_DIGEST_SCHEMA), ("digest", false, &CHUNK_DIGEST_SCHEMA),
("size", false, &IntegerSchema::new("Chunk size.") (
.minimum(1) "size",
.maximum(1024*1024*16) false,
.schema() &IntegerSchema::new("Chunk size.")
.minimum(1)
.maximum(1024 * 1024 * 16)
.schema()
), ),
("encoded-size", false, &IntegerSchema::new("Encoded chunk size.") (
.minimum((std::mem::size_of::<DataBlobHeader>() as isize) +1) "encoded-size",
.maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize)) false,
.schema() &IntegerSchema::new("Encoded chunk size.")
.minimum((std::mem::size_of::<DataBlobHeader>() as isize) + 1)
.maximum(
1024 * 1024 * 16
+ (std::mem::size_of::<EncryptedDataBlobHeader>() as isize)
)
.schema()
), ),
]), ]),
) ),
); );
fn upload_dynamic_chunk( fn upload_dynamic_chunk(
@ -179,7 +220,6 @@ fn upload_dynamic_chunk(
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>, rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture { ) -> ApiResponseFuture {
async move { async move {
let wid = required_integer_param(&param, "wid")? as usize; let wid = required_integer_param(&param, "wid")? as usize;
let size = required_integer_param(&param, "size")? as u32; let size = required_integer_param(&param, "size")? as u32;
@ -191,8 +231,7 @@ fn upload_dynamic_chunk(
let env: &BackupEnvironment = rpcenv.as_ref(); let env: &BackupEnvironment = rpcenv.as_ref();
let (digest, size, compressed_size, is_duplicate) = let (digest, size, compressed_size, is_duplicate) =
UploadChunk::new(req_body, env.datastore.clone(), digest, size, encoded_size) UploadChunk::new(req_body, env.datastore.clone(), digest, size, encoded_size).await?;
.await?;
env.register_dynamic_chunk(wid, digest, size, compressed_size, is_duplicate)?; env.register_dynamic_chunk(wid, digest, size, compressed_size, is_duplicate)?;
let digest_str = hex::encode(&digest); let digest_str = hex::encode(&digest);
@ -200,12 +239,13 @@ fn upload_dynamic_chunk(
let result = Ok(json!(digest_str)); let result = Ok(json!(digest_str));
Ok(env.format_response(result)) Ok(env.format_response(result))
}.boxed() }
.boxed()
} }
pub const API_METHOD_UPLOAD_SPEEDTEST: ApiMethod = ApiMethod::new( pub const API_METHOD_UPLOAD_SPEEDTEST: ApiMethod = ApiMethod::new(
&ApiHandler::AsyncHttp(&upload_speedtest), &ApiHandler::AsyncHttp(&upload_speedtest),
&ObjectSchema::new("Test upload speed.", &[]) &ObjectSchema::new("Test upload speed.", &[]),
); );
fn upload_speedtest( fn upload_speedtest(
@ -215,9 +255,7 @@ fn upload_speedtest(
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>, rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture { ) -> ApiResponseFuture {
async move { async move {
let result = req_body let result = req_body
.map_err(Error::from) .map_err(Error::from)
.try_fold(0, |size: usize, chunk| { .try_fold(0, |size: usize, chunk| {
@ -237,7 +275,8 @@ fn upload_speedtest(
} }
let env: &BackupEnvironment = rpcenv.as_ref(); let env: &BackupEnvironment = rpcenv.as_ref();
Ok(env.format_response(Ok(Value::Null))) Ok(env.format_response(Ok(Value::Null)))
}.boxed() }
.boxed()
} }
#[sortable] #[sortable]
@ -247,13 +286,19 @@ pub const API_METHOD_UPLOAD_BLOB: ApiMethod = ApiMethod::new(
"Upload binary blob file.", "Upload binary blob file.",
&sorted!([ &sorted!([
("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA), ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
("encoded-size", false, &IntegerSchema::new("Encoded blob size.") (
.minimum(std::mem::size_of::<DataBlobHeader>() as isize) "encoded-size",
.maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize)) false,
.schema() &IntegerSchema::new("Encoded blob size.")
.minimum(std::mem::size_of::<DataBlobHeader>() as isize)
.maximum(
1024 * 1024 * 16
+ (std::mem::size_of::<EncryptedDataBlobHeader>() as isize)
)
.schema()
) )
]), ]),
) ),
); );
fn upload_blob( fn upload_blob(
@ -263,7 +308,6 @@ fn upload_blob(
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>, rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture { ) -> ApiResponseFuture {
async move { async move {
let file_name = required_string_param(&param, "file-name")?.to_owned(); let file_name = required_string_param(&param, "file-name")?.to_owned();
let encoded_size = required_integer_param(&param, "encoded-size")? as usize; let encoded_size = required_integer_param(&param, "encoded-size")? as usize;
@ -283,11 +327,16 @@ fn upload_blob(
.await?; .await?;
if encoded_size != data.len() { if encoded_size != data.len() {
bail!("got blob with unexpected length ({} != {})", encoded_size, data.len()); bail!(
"got blob with unexpected length ({} != {})",
encoded_size,
data.len()
);
} }
env.add_blob(&file_name, data)?; env.add_blob(&file_name, data)?;
Ok(env.format_response(Ok(Value::Null))) Ok(env.format_response(Ok(Value::Null)))
}.boxed() }
.boxed()
} }

View File

@ -1,15 +1,12 @@
use proxmox_router::{Router, SubdirMap};
use proxmox_router::list_subdirs_api_method; use proxmox_router::list_subdirs_api_method;
use proxmox_router::{Router, SubdirMap};
use proxmox_sys::sortable; use proxmox_sys::sortable;
pub mod tfa;
pub mod openid; pub mod openid;
pub mod tfa;
#[sortable] #[sortable]
const SUBDIRS: SubdirMap = &sorted!([ const SUBDIRS: SubdirMap = &sorted!([("openid", &openid::ROUTER), ("tfa", &tfa::ROUTER),]);
("openid", &openid::ROUTER),
("tfa", &tfa::ROUTER),
]);
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SUBDIRS)) .get(&list_subdirs_api_method!(SUBDIRS))

View File

@ -1,16 +1,15 @@
/// Configure OpenId realms
use anyhow::Error;
use serde_json::Value;
use ::serde::{Deserialize, Serialize}; use ::serde::{Deserialize, Serialize};
/// Configure OpenId realms
use anyhow::Error;
use hex::FromHex; use hex::FromHex;
use serde_json::Value;
use proxmox_router::{http_bail, Router, RpcEnvironment, Permission}; use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail}; use proxmox_schema::{api, param_bail};
use pbs_api_types::{ use pbs_api_types::{
OpenIdRealmConfig, OpenIdRealmConfigUpdater, OpenIdRealmConfig, OpenIdRealmConfigUpdater, PRIV_REALM_ALLOCATE, PRIV_SYS_AUDIT,
PROXMOX_CONFIG_DIGEST_SCHEMA, REALM_ID_SCHEMA, PRIV_SYS_AUDIT, PRIV_REALM_ALLOCATE, PROXMOX_CONFIG_DIGEST_SCHEMA, REALM_ID_SCHEMA,
}; };
use pbs_config::domains; use pbs_config::domains;
@ -33,7 +32,6 @@ pub fn list_openid_realms(
_param: Value, _param: Value,
mut rpcenv: &mut dyn RpcEnvironment, mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<OpenIdRealmConfig>, Error> { ) -> Result<Vec<OpenIdRealmConfig>, Error> {
let (config, digest) = domains::config()?; let (config, digest) = domains::config()?;
let list = config.convert_to_typed_array("openid")?; let list = config.convert_to_typed_array("openid")?;
@ -59,14 +57,13 @@ pub fn list_openid_realms(
)] )]
/// Create a new OpenId realm /// Create a new OpenId realm
pub fn create_openid_realm(config: OpenIdRealmConfig) -> Result<(), Error> { pub fn create_openid_realm(config: OpenIdRealmConfig) -> Result<(), Error> {
let _lock = domains::lock_config()?; let _lock = domains::lock_config()?;
let (mut domains, _digest) = domains::config()?; let (mut domains, _digest) = domains::config()?;
if config.realm == "pbs" || if config.realm == "pbs"
config.realm == "pam" || || config.realm == "pam"
domains.sections.get(&config.realm).is_some() || domains.sections.get(&config.realm).is_some()
{ {
param_bail!("realm", "realm '{}' already exists.", config.realm); param_bail!("realm", "realm '{}' already exists.", config.realm);
} }
@ -101,7 +98,6 @@ pub fn delete_openid_realm(
digest: Option<String>, digest: Option<String>,
_rpcenv: &mut dyn RpcEnvironment, _rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = domains::lock_config()?; let _lock = domains::lock_config()?;
let (mut domains, expected_digest) = domains::config()?; let (mut domains, expected_digest) = domains::config()?;
@ -111,7 +107,7 @@ pub fn delete_openid_realm(
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
} }
if domains.sections.remove(&realm).is_none() { if domains.sections.remove(&realm).is_none() {
http_bail!(NOT_FOUND, "realm '{}' does not exist.", realm); http_bail!(NOT_FOUND, "realm '{}' does not exist.", realm);
} }
@ -138,7 +134,6 @@ pub fn read_openid_realm(
realm: String, realm: String,
mut rpcenv: &mut dyn RpcEnvironment, mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<OpenIdRealmConfig, Error> { ) -> Result<OpenIdRealmConfig, Error> {
let (domains, digest) = domains::config()?; let (domains, digest) = domains::config()?;
let config = domains.lookup("openid", &realm)?; let config = domains.lookup("openid", &realm)?;
@ -150,7 +145,7 @@ pub fn read_openid_realm(
#[api()] #[api()]
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")] #[serde(rename_all = "kebab-case")]
#[allow(non_camel_case_types)] #[allow(non_camel_case_types)]
/// Deletable property name /// Deletable property name
pub enum DeletableProperty { pub enum DeletableProperty {
@ -206,7 +201,6 @@ pub fn update_openid_realm(
digest: Option<String>, digest: Option<String>,
_rpcenv: &mut dyn RpcEnvironment, _rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = domains::lock_config()?; let _lock = domains::lock_config()?;
let (mut domains, expected_digest) = domains::config()?; let (mut domains, expected_digest) = domains::config()?;
@ -221,12 +215,24 @@ pub fn update_openid_realm(
if let Some(delete) = delete { if let Some(delete) = delete {
for delete_prop in delete { for delete_prop in delete {
match delete_prop { match delete_prop {
DeletableProperty::client_key => { config.client_key = None; }, DeletableProperty::client_key => {
DeletableProperty::comment => { config.comment = None; }, config.client_key = None;
DeletableProperty::autocreate => { config.autocreate = None; }, }
DeletableProperty::scopes => { config.scopes = None; }, DeletableProperty::comment => {
DeletableProperty::prompt => { config.prompt = None; }, config.comment = None;
DeletableProperty::acr_values => { config.acr_values = None; }, }
DeletableProperty::autocreate => {
config.autocreate = None;
}
DeletableProperty::scopes => {
config.scopes = None;
}
DeletableProperty::prompt => {
config.prompt = None;
}
DeletableProperty::acr_values => {
config.acr_values = None;
}
} }
} }
} }
@ -240,14 +246,28 @@ pub fn update_openid_realm(
} }
} }
if let Some(issuer_url) = update.issuer_url { config.issuer_url = issuer_url; } if let Some(issuer_url) = update.issuer_url {
if let Some(client_id) = update.client_id { config.client_id = client_id; } config.issuer_url = issuer_url;
}
if let Some(client_id) = update.client_id {
config.client_id = client_id;
}
if update.client_key.is_some() { config.client_key = update.client_key; } if update.client_key.is_some() {
if update.autocreate.is_some() { config.autocreate = update.autocreate; } config.client_key = update.client_key;
if update.scopes.is_some() { config.scopes = update.scopes; } }
if update.prompt.is_some() { config.prompt = update.prompt; } if update.autocreate.is_some() {
if update.acr_values.is_some() { config.acr_values = update.acr_values; } config.autocreate = update.autocreate;
}
if update.scopes.is_some() {
config.scopes = update.scopes;
}
if update.prompt.is_some() {
config.prompt = update.prompt;
}
if update.acr_values.is_some() {
config.acr_values = update.acr_values;
}
domains.set_data(&realm, "openid", &config)?; domains.set_data(&realm, "openid", &config)?;

View File

@ -5,10 +5,10 @@ use std::sync::{Arc, Mutex};
use std::time::SystemTime; use std::time::SystemTime;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use hex::FromHex;
use lazy_static::lazy_static; use lazy_static::lazy_static;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json::{json, Value}; use serde_json::{json, Value};
use hex::FromHex;
use proxmox_router::{ use proxmox_router::{
http_bail, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap, http_bail, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap,

View File

@ -1,18 +1,17 @@
use anyhow::Error;
use ::serde::{Deserialize, Serialize}; use ::serde::{Deserialize, Serialize};
use serde_json::Value; use anyhow::Error;
use hex::FromHex; use hex::FromHex;
use serde_json::Value;
use proxmox_router::{http_bail, Router, RpcEnvironment, Permission}; use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail}; use proxmox_schema::{api, param_bail};
use pbs_api_types::{ use pbs_api_types::{
Authid, ScsiTapeChanger, ScsiTapeChangerUpdater, LtoTapeDrive, Authid, LtoTapeDrive, ScsiTapeChanger, ScsiTapeChangerUpdater, CHANGER_NAME_SCHEMA,
PROXMOX_CONFIG_DIGEST_SCHEMA, CHANGER_NAME_SCHEMA, SLOT_ARRAY_SCHEMA, PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, SLOT_ARRAY_SCHEMA,
PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY,
}; };
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use pbs_tape::linux_list_drives::{linux_tape_changer_list, check_drive_path}; use pbs_tape::linux_list_drives::{check_drive_path, linux_tape_changer_list};
#[api( #[api(
protected: true, protected: true,
@ -30,7 +29,6 @@ use pbs_tape::linux_list_drives::{linux_tape_changer_list, check_drive_path};
)] )]
/// Create a new changer device /// Create a new changer device
pub fn create_changer(config: ScsiTapeChanger) -> Result<(), Error> { pub fn create_changer(config: ScsiTapeChanger) -> Result<(), Error> {
let _lock = pbs_config::drive::lock()?; let _lock = pbs_config::drive::lock()?;
let (mut section_config, _digest) = pbs_config::drive::config()?; let (mut section_config, _digest) = pbs_config::drive::config()?;
@ -47,7 +45,12 @@ pub fn create_changer(config: ScsiTapeChanger) -> Result<(), Error> {
} }
if changer.path == config.path { if changer.path == config.path {
param_bail!("path", "Path '{}' already in use by '{}'", config.path, changer.name); param_bail!(
"path",
"Path '{}' already in use by '{}'",
config.path,
changer.name
);
} }
} }
@ -79,7 +82,6 @@ pub fn get_config(
_param: Value, _param: Value,
mut rpcenv: &mut dyn RpcEnvironment, mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<ScsiTapeChanger, Error> { ) -> Result<ScsiTapeChanger, Error> {
let (config, digest) = pbs_config::drive::config()?; let (config, digest) = pbs_config::drive::config()?;
let data: ScsiTapeChanger = config.lookup("changer", &name)?; let data: ScsiTapeChanger = config.lookup("changer", &name)?;
@ -176,7 +178,6 @@ pub fn update_changer(
digest: Option<String>, digest: Option<String>,
_param: Value, _param: Value,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = pbs_config::drive::lock()?; let _lock = pbs_config::drive::lock()?;
let (mut config, expected_digest) = pbs_config::drive::config()?; let (mut config, expected_digest) = pbs_config::drive::config()?;
@ -244,7 +245,6 @@ pub fn update_changer(
)] )]
/// Delete a tape changer configuration /// Delete a tape changer configuration
pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> { pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> {
let _lock = pbs_config::drive::lock()?; let _lock = pbs_config::drive::lock()?;
let (mut config, _digest) = pbs_config::drive::config()?; let (mut config, _digest) = pbs_config::drive::config()?;
@ -252,18 +252,31 @@ pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> {
match config.sections.get(&name) { match config.sections.get(&name) {
Some((section_type, _)) => { Some((section_type, _)) => {
if section_type != "changer" { if section_type != "changer" {
param_bail!("name", "Entry '{}' exists, but is not a changer device", name); param_bail!(
"name",
"Entry '{}' exists, but is not a changer device",
name
);
} }
config.sections.remove(&name); config.sections.remove(&name);
}, }
None => http_bail!(NOT_FOUND, "Delete changer '{}' failed - no such entry", name), None => http_bail!(
NOT_FOUND,
"Delete changer '{}' failed - no such entry",
name
),
} }
let drive_list: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?; let drive_list: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
for drive in drive_list { for drive in drive_list {
if let Some(changer) = drive.changer { if let Some(changer) = drive.changer {
if changer == name { if changer == name {
param_bail!("name", "Delete changer '{}' failed - used by drive '{}'", name, drive.name); param_bail!(
"name",
"Delete changer '{}' failed - used by drive '{}'",
name,
drive.name
);
} }
} }
} }
@ -278,7 +291,6 @@ const ITEM_ROUTER: Router = Router::new()
.put(&API_METHOD_UPDATE_CHANGER) .put(&API_METHOD_UPDATE_CHANGER)
.delete(&API_METHOD_DELETE_CHANGER); .delete(&API_METHOD_DELETE_CHANGER);
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_CHANGERS) .get(&API_METHOD_LIST_CHANGERS)
.post(&API_METHOD_CREATE_CHANGER) .post(&API_METHOD_CREATE_CHANGER)

View File

@ -1,31 +1,27 @@
use std::path::PathBuf; use std::path::PathBuf;
use anyhow::Error;
use serde_json::Value;
use ::serde::{Deserialize, Serialize}; use ::serde::{Deserialize, Serialize};
use anyhow::Error;
use hex::FromHex; use hex::FromHex;
use serde_json::Value;
use proxmox_router::{http_bail, Router, RpcEnvironment, RpcEnvironmentType, Permission}; use proxmox_router::{http_bail, Permission, Router, RpcEnvironment, RpcEnvironmentType};
use proxmox_schema::{api, param_bail, ApiType}; use proxmox_schema::{api, param_bail, ApiType};
use proxmox_section_config::SectionConfigData; use proxmox_section_config::SectionConfigData;
use proxmox_sys::WorkerTaskContext; use proxmox_sys::WorkerTaskContext;
use pbs_datastore::chunk_store::ChunkStore;
use pbs_config::BackupLockGuard;
use pbs_api_types::{ use pbs_api_types::{
Authid, DatastoreNotify, Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DATASTORE_SCHEMA,
DATASTORE_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA,
PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY,
DataStoreConfig, DataStoreConfigUpdater, PROXMOX_CONFIG_DIGEST_SCHEMA,
}; };
use pbs_config::BackupLockGuard;
use pbs_datastore::chunk_store::ChunkStore;
use crate::api2::admin::{sync::list_sync_jobs, verify::list_verification_jobs};
use crate::api2::config::sync::delete_sync_job; use crate::api2::config::sync::delete_sync_job;
use crate::api2::config::tape_backup_job::{delete_tape_backup_job, list_tape_backup_jobs};
use crate::api2::config::verify::delete_verification_job; use crate::api2::config::verify::delete_verification_job;
use crate::api2::config::tape_backup_job::{list_tape_backup_jobs, delete_tape_backup_job};
use crate::api2::admin::{
sync::list_sync_jobs,
verify::list_verification_jobs,
};
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use proxmox_rest_server::WorkerTask; use proxmox_rest_server::WorkerTask;
@ -50,7 +46,6 @@ pub fn list_datastores(
_param: Value, _param: Value,
mut rpcenv: &mut dyn RpcEnvironment, mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<DataStoreConfig>, Error> { ) -> Result<Vec<DataStoreConfig>, Error> {
let (config, digest) = pbs_config::datastore::config()?; let (config, digest) = pbs_config::datastore::config()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@ -58,7 +53,7 @@ pub fn list_datastores(
rpcenv["digest"] = hex::encode(&digest).into(); rpcenv["digest"] = hex::encode(&digest).into();
let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?; let list: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
let filter_by_privs = |store: &DataStoreConfig| { let filter_by_privs = |store: &DataStoreConfig| {
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store.name]); let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store.name]);
(user_privs & PRIV_DATASTORE_AUDIT) != 0 (user_privs & PRIV_DATASTORE_AUDIT) != 0
@ -76,7 +71,13 @@ pub(crate) fn do_create_datastore(
let path: PathBuf = datastore.path.clone().into(); let path: PathBuf = datastore.path.clone().into();
let backup_user = pbs_config::backup_user()?; let backup_user = pbs_config::backup_user()?;
let _store = ChunkStore::create(&datastore.name, path, backup_user.uid, backup_user.gid, worker)?; let _store = ChunkStore::create(
&datastore.name,
path,
backup_user.uid,
backup_user.gid,
worker,
)?;
config.set_data(&datastore.name, "datastore", &datastore)?; config.set_data(&datastore.name, "datastore", &datastore)?;
@ -107,7 +108,6 @@ pub fn create_datastore(
config: DataStoreConfig, config: DataStoreConfig,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> { ) -> Result<String, Error> {
let lock = pbs_config::datastore::lock_config()?; let lock = pbs_config::datastore::lock_config()?;
let (section_config, _digest) = pbs_config::datastore::config()?; let (section_config, _digest) = pbs_config::datastore::config()?;
@ -124,7 +124,7 @@ pub fn create_datastore(
Some(config.name.to_string()), Some(config.name.to_string()),
auth_id.to_string(), auth_id.to_string(),
to_stdout, to_stdout,
move |worker| do_create_datastore(lock, section_config, config, Some(&worker)), move |worker| do_create_datastore(lock, section_config, config, Some(&worker)),
) )
} }
@ -156,7 +156,7 @@ pub fn read_datastore(
#[api()] #[api()]
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")] #[serde(rename_all = "kebab-case")]
#[allow(non_camel_case_types)] #[allow(non_camel_case_types)]
/// Deletable property name /// Deletable property name
pub enum DeletableProperty { pub enum DeletableProperty {
@ -226,7 +226,6 @@ pub fn update_datastore(
delete: Option<Vec<DeletableProperty>>, delete: Option<Vec<DeletableProperty>>,
digest: Option<String>, digest: Option<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = pbs_config::datastore::lock_config()?; let _lock = pbs_config::datastore::lock_config()?;
// pass/compare digest // pass/compare digest
@ -239,23 +238,51 @@ pub fn update_datastore(
let mut data: DataStoreConfig = config.lookup("datastore", &name)?; let mut data: DataStoreConfig = config.lookup("datastore", &name)?;
if let Some(delete) = delete { if let Some(delete) = delete {
for delete_prop in delete { for delete_prop in delete {
match delete_prop { match delete_prop {
DeletableProperty::comment => { data.comment = None; }, DeletableProperty::comment => {
DeletableProperty::gc_schedule => { data.gc_schedule = None; }, data.comment = None;
DeletableProperty::prune_schedule => { data.prune_schedule = None; }, }
DeletableProperty::keep_last => { data.keep_last = None; }, DeletableProperty::gc_schedule => {
DeletableProperty::keep_hourly => { data.keep_hourly = None; }, data.gc_schedule = None;
DeletableProperty::keep_daily => { data.keep_daily = None; }, }
DeletableProperty::keep_weekly => { data.keep_weekly = None; }, DeletableProperty::prune_schedule => {
DeletableProperty::keep_monthly => { data.keep_monthly = None; }, data.prune_schedule = None;
DeletableProperty::keep_yearly => { data.keep_yearly = None; }, }
DeletableProperty::verify_new => { data.verify_new = None; }, DeletableProperty::keep_last => {
DeletableProperty::notify => { data.notify = None; }, data.keep_last = None;
DeletableProperty::notify_user => { data.notify_user = None; }, }
DeletableProperty::tuning => { data.tuning = None; }, DeletableProperty::keep_hourly => {
DeletableProperty::maintenance_mode => { data.maintenance_mode = None; }, data.keep_hourly = None;
}
DeletableProperty::keep_daily => {
data.keep_daily = None;
}
DeletableProperty::keep_weekly => {
data.keep_weekly = None;
}
DeletableProperty::keep_monthly => {
data.keep_monthly = None;
}
DeletableProperty::keep_yearly => {
data.keep_yearly = None;
}
DeletableProperty::verify_new => {
data.verify_new = None;
}
DeletableProperty::notify => {
data.notify = None;
}
DeletableProperty::notify_user => {
data.notify_user = None;
}
DeletableProperty::tuning => {
data.tuning = None;
}
DeletableProperty::maintenance_mode => {
data.maintenance_mode = None;
}
} }
} }
} }
@ -281,29 +308,54 @@ pub fn update_datastore(
data.prune_schedule = update.prune_schedule; data.prune_schedule = update.prune_schedule;
} }
if update.keep_last.is_some() { data.keep_last = update.keep_last; } if update.keep_last.is_some() {
if update.keep_hourly.is_some() { data.keep_hourly = update.keep_hourly; } data.keep_last = update.keep_last;
if update.keep_daily.is_some() { data.keep_daily = update.keep_daily; } }
if update.keep_weekly.is_some() { data.keep_weekly = update.keep_weekly; } if update.keep_hourly.is_some() {
if update.keep_monthly.is_some() { data.keep_monthly = update.keep_monthly; } data.keep_hourly = update.keep_hourly;
if update.keep_yearly.is_some() { data.keep_yearly = update.keep_yearly; } }
if update.keep_daily.is_some() {
data.keep_daily = update.keep_daily;
}
if update.keep_weekly.is_some() {
data.keep_weekly = update.keep_weekly;
}
if update.keep_monthly.is_some() {
data.keep_monthly = update.keep_monthly;
}
if update.keep_yearly.is_some() {
data.keep_yearly = update.keep_yearly;
}
if let Some(notify_str) = update.notify { if let Some(notify_str) = update.notify {
let value = DatastoreNotify::API_SCHEMA.parse_property_string(&notify_str)?; let value = DatastoreNotify::API_SCHEMA.parse_property_string(&notify_str)?;
let notify: DatastoreNotify = serde_json::from_value(value)?; let notify: DatastoreNotify = serde_json::from_value(value)?;
if let DatastoreNotify { gc: None, verify: None, sync: None } = notify { if let DatastoreNotify {
gc: None,
verify: None,
sync: None,
} = notify
{
data.notify = None; data.notify = None;
} else { } else {
data.notify = Some(notify_str); data.notify = Some(notify_str);
} }
} }
if update.verify_new.is_some() { data.verify_new = update.verify_new; } if update.verify_new.is_some() {
data.verify_new = update.verify_new;
}
if update.notify_user.is_some() { data.notify_user = update.notify_user; } if update.notify_user.is_some() {
data.notify_user = update.notify_user;
}
if update.tuning.is_some() { data.tuning = update.tuning; } if update.tuning.is_some() {
data.tuning = update.tuning;
}
if update.maintenance_mode.is_some() { data.maintenance_mode = update.maintenance_mode; } if update.maintenance_mode.is_some() {
data.maintenance_mode = update.maintenance_mode;
}
config.set_data(&name, "datastore", &data)?; config.set_data(&name, "datastore", &data)?;
@ -352,7 +404,6 @@ pub async fn delete_datastore(
digest: Option<String>, digest: Option<String>,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = pbs_config::datastore::lock_config()?; let _lock = pbs_config::datastore::lock_config()?;
let (mut config, expected_digest) = pbs_config::datastore::config()?; let (mut config, expected_digest) = pbs_config::datastore::config()?;
@ -363,7 +414,9 @@ pub async fn delete_datastore(
} }
match config.sections.get(&name) { match config.sections.get(&name) {
Some(_) => { config.sections.remove(&name); }, Some(_) => {
config.sections.remove(&name);
}
None => http_bail!(NOT_FOUND, "datastore '{}' does not exist.", name), None => http_bail!(NOT_FOUND, "datastore '{}' does not exist.", name),
} }
@ -376,7 +429,10 @@ pub async fn delete_datastore(
} }
let tape_jobs = list_tape_backup_jobs(Value::Null, rpcenv)?; let tape_jobs = list_tape_backup_jobs(Value::Null, rpcenv)?;
for job_config in tape_jobs.into_iter().filter(|config| config.setup.store == name) { for job_config in tape_jobs
.into_iter()
.filter(|config| config.setup.store == name)
{
delete_tape_backup_job(job_config.id, None, rpcenv)?; delete_tape_backup_job(job_config.id, None, rpcenv)?;
} }
} }

View File

@ -1,18 +1,18 @@
use anyhow::{format_err, Error};
use ::serde::{Deserialize, Serialize}; use ::serde::{Deserialize, Serialize};
use serde_json::Value; use anyhow::{format_err, Error};
use hex::FromHex; use hex::FromHex;
use serde_json::Value;
use proxmox_router::{http_bail, Router, RpcEnvironment, Permission}; use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail}; use proxmox_schema::{api, param_bail};
use pbs_api_types::{ use pbs_api_types::{
Authid, LtoTapeDrive, LtoTapeDriveUpdater, ScsiTapeChanger, Authid, LtoTapeDrive, LtoTapeDriveUpdater, ScsiTapeChanger, DRIVE_NAME_SCHEMA, PRIV_TAPE_AUDIT,
PROXMOX_CONFIG_DIGEST_SCHEMA, DRIVE_NAME_SCHEMA, PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY, PRIV_TAPE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
}; };
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use pbs_tape::linux_list_drives::{lto_tape_device_list, check_drive_path}; use pbs_tape::linux_list_drives::{check_drive_path, lto_tape_device_list};
#[api( #[api(
protected: true, protected: true,
@ -30,7 +30,6 @@ use pbs_tape::linux_list_drives::{lto_tape_device_list, check_drive_path};
)] )]
/// Create a new drive /// Create a new drive
pub fn create_drive(config: LtoTapeDrive) -> Result<(), Error> { pub fn create_drive(config: LtoTapeDrive) -> Result<(), Error> {
let _lock = pbs_config::drive::lock()?; let _lock = pbs_config::drive::lock()?;
let (mut section_config, _digest) = pbs_config::drive::config()?; let (mut section_config, _digest) = pbs_config::drive::config()?;
@ -46,7 +45,12 @@ pub fn create_drive(config: LtoTapeDrive) -> Result<(), Error> {
param_bail!("name", "Entry '{}' already exists", config.name); param_bail!("name", "Entry '{}' already exists", config.name);
} }
if drive.path == config.path { if drive.path == config.path {
param_bail!("path", "Path '{}' already used in drive '{}'", config.path, drive.name); param_bail!(
"path",
"Path '{}' already used in drive '{}'",
config.path,
drive.name
);
} }
} }
@ -78,7 +82,6 @@ pub fn get_config(
_param: Value, _param: Value,
mut rpcenv: &mut dyn RpcEnvironment, mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<LtoTapeDrive, Error> { ) -> Result<LtoTapeDrive, Error> {
let (config, digest) = pbs_config::drive::config()?; let (config, digest) = pbs_config::drive::config()?;
let data: LtoTapeDrive = config.lookup("lto", &name)?; let data: LtoTapeDrive = config.lookup("lto", &name)?;
@ -176,9 +179,8 @@ pub fn update_drive(
update: LtoTapeDriveUpdater, update: LtoTapeDriveUpdater,
delete: Option<Vec<DeletableProperty>>, delete: Option<Vec<DeletableProperty>>,
digest: Option<String>, digest: Option<String>,
_param: Value, _param: Value,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = pbs_config::drive::lock()?; let _lock = pbs_config::drive::lock()?;
let (mut config, expected_digest) = pbs_config::drive::config()?; let (mut config, expected_digest) = pbs_config::drive::config()?;
@ -196,8 +198,10 @@ pub fn update_drive(
DeletableProperty::changer => { DeletableProperty::changer => {
data.changer = None; data.changer = None;
data.changer_drivenum = None; data.changer_drivenum = None;
}, }
DeletableProperty::changer_drivenum => { data.changer_drivenum = None; }, DeletableProperty::changer_drivenum => {
data.changer_drivenum = None;
}
} }
} }
} }
@ -218,7 +222,10 @@ pub fn update_drive(
data.changer_drivenum = None; data.changer_drivenum = None;
} else { } else {
if data.changer.is_none() { if data.changer.is_none() {
param_bail!("changer", format_err!("Option 'changer-drivenum' requires option 'changer'.")); param_bail!(
"changer",
format_err!("Option 'changer-drivenum' requires option 'changer'.")
);
} }
data.changer_drivenum = Some(changer_drivenum); data.changer_drivenum = Some(changer_drivenum);
} }
@ -246,7 +253,6 @@ pub fn update_drive(
)] )]
/// Delete a drive configuration /// Delete a drive configuration
pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> { pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> {
let _lock = pbs_config::drive::lock()?; let _lock = pbs_config::drive::lock()?;
let (mut config, _digest) = pbs_config::drive::config()?; let (mut config, _digest) = pbs_config::drive::config()?;
@ -254,10 +260,14 @@ pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> {
match config.sections.get(&name) { match config.sections.get(&name) {
Some((section_type, _)) => { Some((section_type, _)) => {
if section_type != "lto" { if section_type != "lto" {
param_bail!("name", "Entry '{}' exists, but is not a lto tape drive", name); param_bail!(
"name",
"Entry '{}' exists, but is not a lto tape drive",
name
);
} }
config.sections.remove(&name); config.sections.remove(&name);
}, }
None => http_bail!(NOT_FOUND, "Delete drive '{}' failed - no such drive", name), None => http_bail!(NOT_FOUND, "Delete drive '{}' failed - no such drive", name),
} }
@ -271,7 +281,6 @@ const ITEM_ROUTER: Router = Router::new()
.put(&API_METHOD_UPDATE_DRIVE) .put(&API_METHOD_UPDATE_DRIVE)
.delete(&API_METHOD_DELETE_DRIVE); .delete(&API_METHOD_DELETE_DRIVE);
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_DRIVES) .get(&API_METHOD_LIST_DRIVES)
.post(&API_METHOD_CREATE_DRIVE) .post(&API_METHOD_CREATE_DRIVE)

View File

@ -1,12 +1,12 @@
use anyhow::Error;
use ::serde::{Deserialize, Serialize}; use ::serde::{Deserialize, Serialize};
use anyhow::Error;
use proxmox_router::{http_bail, Router, RpcEnvironment, Permission}; use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail}; use proxmox_schema::{api, param_bail};
use pbs_api_types::{ use pbs_api_types::{
Authid, MediaPoolConfig, MediaPoolConfigUpdater, MEDIA_POOL_NAME_SCHEMA, Authid, MediaPoolConfig, MediaPoolConfigUpdater, MEDIA_POOL_NAME_SCHEMA, PRIV_TAPE_AUDIT,
PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY, PRIV_TAPE_MODIFY,
}; };
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
@ -26,10 +26,7 @@ use pbs_config::CachedUserInfo;
}, },
)] )]
/// Create a new media pool /// Create a new media pool
pub fn create_pool( pub fn create_pool(config: MediaPoolConfig) -> Result<(), Error> {
config: MediaPoolConfig,
) -> Result<(), Error> {
let _lock = pbs_config::media_pool::lock()?; let _lock = pbs_config::media_pool::lock()?;
let (mut section_config, _digest) = pbs_config::media_pool::config()?; let (mut section_config, _digest) = pbs_config::media_pool::config()?;
@ -59,9 +56,7 @@ pub fn create_pool(
}, },
)] )]
/// List media pools /// List media pools
pub fn list_pools( pub fn list_pools(mut rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<MediaPoolConfig>, Error> {
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<MediaPoolConfig>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
@ -69,7 +64,7 @@ pub fn list_pools(
let list = config.convert_to_typed_array::<MediaPoolConfig>("pool")?; let list = config.convert_to_typed_array::<MediaPoolConfig>("pool")?;
let list = list let list = list
.into_iter() .into_iter()
.filter(|pool| { .filter(|pool| {
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool.name]); let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool.name]);
@ -99,7 +94,6 @@ pub fn list_pools(
)] )]
/// Get media pool configuration /// Get media pool configuration
pub fn get_config(name: String) -> Result<MediaPoolConfig, Error> { pub fn get_config(name: String) -> Result<MediaPoolConfig, Error> {
let (config, _digest) = pbs_config::media_pool::config()?; let (config, _digest) = pbs_config::media_pool::config()?;
let data: MediaPoolConfig = config.lookup("pool", &name)?; let data: MediaPoolConfig = config.lookup("pool", &name)?;
@ -155,7 +149,6 @@ pub fn update_pool(
update: MediaPoolConfigUpdater, update: MediaPoolConfigUpdater,
delete: Option<Vec<DeletableProperty>>, delete: Option<Vec<DeletableProperty>>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = pbs_config::media_pool::lock()?; let _lock = pbs_config::media_pool::lock()?;
let (mut config, _digest) = pbs_config::media_pool::config()?; let (mut config, _digest) = pbs_config::media_pool::config()?;
@ -165,19 +158,37 @@ pub fn update_pool(
if let Some(delete) = delete { if let Some(delete) = delete {
for delete_prop in delete { for delete_prop in delete {
match delete_prop { match delete_prop {
DeletableProperty::allocation => { data.allocation = None; }, DeletableProperty::allocation => {
DeletableProperty::retention => { data.retention = None; }, data.allocation = None;
DeletableProperty::template => { data.template = None; }, }
DeletableProperty::encrypt => { data.encrypt = None; }, DeletableProperty::retention => {
DeletableProperty::comment => { data.comment = None; }, data.retention = None;
}
DeletableProperty::template => {
data.template = None;
}
DeletableProperty::encrypt => {
data.encrypt = None;
}
DeletableProperty::comment => {
data.comment = None;
}
} }
} }
} }
if update.allocation.is_some() { data.allocation = update.allocation; } if update.allocation.is_some() {
if update.retention.is_some() { data.retention = update.retention; } data.allocation = update.allocation;
if update.template.is_some() { data.template = update.template; } }
if update.encrypt.is_some() { data.encrypt = update.encrypt; } if update.retention.is_some() {
data.retention = update.retention;
}
if update.template.is_some() {
data.template = update.template;
}
if update.encrypt.is_some() {
data.encrypt = update.encrypt;
}
if let Some(comment) = update.comment { if let Some(comment) = update.comment {
let comment = comment.trim(); let comment = comment.trim();
@ -210,13 +221,14 @@ pub fn update_pool(
)] )]
/// Delete a media pool configuration /// Delete a media pool configuration
pub fn delete_pool(name: String) -> Result<(), Error> { pub fn delete_pool(name: String) -> Result<(), Error> {
let _lock = pbs_config::media_pool::lock()?; let _lock = pbs_config::media_pool::lock()?;
let (mut config, _digest) = pbs_config::media_pool::config()?; let (mut config, _digest) = pbs_config::media_pool::config()?;
match config.sections.get(&name) { match config.sections.get(&name) {
Some(_) => { config.sections.remove(&name); }, Some(_) => {
config.sections.remove(&name);
}
None => http_bail!(NOT_FOUND, "delete pool '{}' failed - no such pool", name), None => http_bail!(NOT_FOUND, "delete pool '{}' failed - no such pool", name),
} }
@ -230,7 +242,6 @@ const ITEM_ROUTER: Router = Router::new()
.put(&API_METHOD_UPDATE_POOL) .put(&API_METHOD_UPDATE_POOL)
.delete(&API_METHOD_DELETE_POOL); .delete(&API_METHOD_DELETE_POOL);
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_POOLS) .get(&API_METHOD_LIST_POOLS)
.post(&API_METHOD_CREATE_POOL) .post(&API_METHOD_CREATE_POOL)

View File

@ -1,20 +1,20 @@
//! Backup Server Configuration //! Backup Server Configuration
use proxmox_router::{Router, SubdirMap};
use proxmox_router::list_subdirs_api_method; use proxmox_router::list_subdirs_api_method;
use proxmox_router::{Router, SubdirMap};
pub mod access; pub mod access;
pub mod acme; pub mod acme;
pub mod changer;
pub mod datastore; pub mod datastore;
pub mod drive;
pub mod media_pool;
pub mod remote; pub mod remote;
pub mod sync; pub mod sync;
pub mod verify;
pub mod drive;
pub mod changer;
pub mod media_pool;
pub mod tape_encryption_keys;
pub mod tape_backup_job; pub mod tape_backup_job;
pub mod tape_encryption_keys;
pub mod traffic_control; pub mod traffic_control;
pub mod verify;
const SUBDIRS: SubdirMap = &[ const SUBDIRS: SubdirMap = &[
("access", &access::ROUTER), ("access", &access::ROUTER),

View File

@ -1,20 +1,20 @@
use anyhow::{bail, format_err, Error};
use proxmox_sys::sortable;
use proxmox_router::SubdirMap;
use proxmox_router::list_subdirs_api_method;
use serde_json::Value;
use ::serde::{Deserialize, Serialize}; use ::serde::{Deserialize, Serialize};
use anyhow::{bail, format_err, Error};
use hex::FromHex; use hex::FromHex;
use proxmox_router::list_subdirs_api_method;
use proxmox_router::SubdirMap;
use proxmox_sys::sortable;
use serde_json::Value;
use proxmox_router::{http_bail, http_err, ApiMethod, Router, RpcEnvironment, Permission}; use proxmox_router::{http_bail, http_err, ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail}; use proxmox_schema::{api, param_bail};
use pbs_client::{HttpClient, HttpClientOptions};
use pbs_api_types::{ use pbs_api_types::{
REMOTE_ID_SCHEMA, REMOTE_PASSWORD_SCHEMA, Remote, RemoteConfig, RemoteConfigUpdater, Authid, DataStoreListItem, GroupListItem, RateLimitConfig, Remote, RemoteConfig,
Authid, PROXMOX_CONFIG_DIGEST_SCHEMA, DATASTORE_SCHEMA, GroupListItem, RemoteConfigUpdater, SyncJobConfig, DATASTORE_SCHEMA, PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY,
DataStoreListItem, RateLimitConfig, SyncJobConfig, PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, REMOTE_ID_SCHEMA, REMOTE_PASSWORD_SCHEMA,
}; };
use pbs_client::{HttpClient, HttpClientOptions};
use pbs_config::sync; use pbs_config::sync;
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
@ -84,12 +84,7 @@ pub fn list_remotes(
}, },
)] )]
/// Create new remote. /// Create new remote.
pub fn create_remote( pub fn create_remote(name: String, config: RemoteConfig, password: String) -> Result<(), Error> {
name: String,
config: RemoteConfig,
password: String,
) -> Result<(), Error> {
let _lock = pbs_config::remote::lock_config()?; let _lock = pbs_config::remote::lock_config()?;
let (mut section_config, _digest) = pbs_config::remote::config()?; let (mut section_config, _digest) = pbs_config::remote::config()?;
@ -98,7 +93,11 @@ pub fn create_remote(
param_bail!("name", "remote '{}' already exists.", name); param_bail!("name", "remote '{}' already exists.", name);
} }
let remote = Remote { name: name.clone(), config, password }; let remote = Remote {
name: name.clone(),
config,
password,
};
section_config.set_data(&name, "remote", &remote)?; section_config.set_data(&name, "remote", &remote)?;
@ -188,7 +187,6 @@ pub fn update_remote(
delete: Option<Vec<DeletableProperty>>, delete: Option<Vec<DeletableProperty>>,
digest: Option<String>, digest: Option<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = pbs_config::remote::lock_config()?; let _lock = pbs_config::remote::lock_config()?;
let (mut config, expected_digest) = pbs_config::remote::config()?; let (mut config, expected_digest) = pbs_config::remote::config()?;
@ -203,9 +201,15 @@ pub fn update_remote(
if let Some(delete) = delete { if let Some(delete) = delete {
for delete_prop in delete { for delete_prop in delete {
match delete_prop { match delete_prop {
DeletableProperty::comment => { data.config.comment = None; }, DeletableProperty::comment => {
DeletableProperty::fingerprint => { data.config.fingerprint = None; }, data.config.comment = None;
DeletableProperty::port => { data.config.port = None; }, }
DeletableProperty::fingerprint => {
data.config.fingerprint = None;
}
DeletableProperty::port => {
data.config.port = None;
}
} }
} }
} }
@ -218,12 +222,22 @@ pub fn update_remote(
data.config.comment = Some(comment); data.config.comment = Some(comment);
} }
} }
if let Some(host) = update.host { data.config.host = host; } if let Some(host) = update.host {
if update.port.is_some() { data.config.port = update.port; } data.config.host = host;
if let Some(auth_id) = update.auth_id { data.config.auth_id = auth_id; } }
if let Some(password) = password { data.password = password; } if update.port.is_some() {
data.config.port = update.port;
}
if let Some(auth_id) = update.auth_id {
data.config.auth_id = auth_id;
}
if let Some(password) = password {
data.password = password;
}
if update.fingerprint.is_some() { data.config.fingerprint = update.fingerprint; } if update.fingerprint.is_some() {
data.config.fingerprint = update.fingerprint;
}
config.set_data(&name, "remote", &data)?; config.set_data(&name, "remote", &data)?;
@ -251,13 +265,18 @@ pub fn update_remote(
)] )]
/// Remove a remote from the configuration file. /// Remove a remote from the configuration file.
pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> { pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
let (sync_jobs, _) = sync::config()?; let (sync_jobs, _) = sync::config()?;
let job_list: Vec<SyncJobConfig> = sync_jobs.convert_to_typed_array("sync")?; let job_list: Vec<SyncJobConfig> = sync_jobs.convert_to_typed_array("sync")?;
for job in job_list { for job in job_list {
if job.remote == name { if job.remote == name {
param_bail!("name", "remote '{}' is used by sync job '{}' (datastore '{}')", name, job.id, job.store); param_bail!(
"name",
"remote '{}' is used by sync job '{}' (datastore '{}')",
name,
job.id,
job.store
);
} }
} }
@ -271,7 +290,9 @@ pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error>
} }
match config.sections.get(&name) { match config.sections.get(&name) {
Some(_) => { config.sections.remove(&name); }, Some(_) => {
config.sections.remove(&name);
}
None => http_bail!(NOT_FOUND, "remote '{}' does not exist.", name), None => http_bail!(NOT_FOUND, "remote '{}' does not exist.", name),
} }
@ -285,7 +306,10 @@ pub async fn remote_client(
remote: &Remote, remote: &Remote,
limit: Option<RateLimitConfig>, limit: Option<RateLimitConfig>,
) -> Result<HttpClient, Error> { ) -> Result<HttpClient, Error> {
let mut options = HttpClientOptions::new_non_interactive(remote.password.clone(), remote.config.fingerprint.clone()); let mut options = HttpClientOptions::new_non_interactive(
remote.password.clone(),
remote.config.fingerprint.clone(),
);
if let Some(limit) = limit { if let Some(limit) = limit {
options = options.rate_limit(limit); options = options.rate_limit(limit);
@ -295,15 +319,22 @@ pub async fn remote_client(
&remote.config.host, &remote.config.host,
remote.config.port.unwrap_or(8007), remote.config.port.unwrap_or(8007),
&remote.config.auth_id, &remote.config.auth_id,
options)?; options,
let _auth_info = client.login() // make sure we can auth )?;
let _auth_info = client
.login() // make sure we can auth
.await .await
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.config.host, err))?; .map_err(|err| {
format_err!(
"remote connection to '{}' failed - {}",
remote.config.host,
err
)
})?;
Ok(client) Ok(client)
} }
#[api( #[api(
input: { input: {
properties: { properties: {
@ -327,15 +358,15 @@ pub async fn scan_remote_datastores(name: String) -> Result<Vec<DataStoreListIte
let remote: Remote = remote_config.lookup("remote", &name)?; let remote: Remote = remote_config.lookup("remote", &name)?;
let map_remote_err = |api_err| { let map_remote_err = |api_err| {
http_err!(INTERNAL_SERVER_ERROR, http_err!(
"failed to scan remote '{}' - {}", INTERNAL_SERVER_ERROR,
&name, "failed to scan remote '{}' - {}",
api_err) &name,
api_err
)
}; };
let client = remote_client(&remote, None) let client = remote_client(&remote, None).await.map_err(map_remote_err)?;
.await
.map_err(map_remote_err)?;
let api_res = client let api_res = client
.get("api2/json/admin/datastore", None) .get("api2/json/admin/datastore", None)
.await .await
@ -377,15 +408,15 @@ pub async fn scan_remote_groups(name: String, store: String) -> Result<Vec<Group
let remote: Remote = remote_config.lookup("remote", &name)?; let remote: Remote = remote_config.lookup("remote", &name)?;
let map_remote_err = |api_err| { let map_remote_err = |api_err| {
http_err!(INTERNAL_SERVER_ERROR, http_err!(
"failed to scan remote '{}' - {}", INTERNAL_SERVER_ERROR,
&name, "failed to scan remote '{}' - {}",
api_err) &name,
api_err
)
}; };
let client = remote_client(&remote, None) let client = remote_client(&remote, None).await.map_err(map_remote_err)?;
.await
.map_err(map_remote_err)?;
let api_res = client let api_res = client
.get(&format!("api2/json/admin/datastore/{}/groups", store), None) .get(&format!("api2/json/admin/datastore/{}/groups", store), None)
.await .await
@ -402,13 +433,8 @@ pub async fn scan_remote_groups(name: String, store: String) -> Result<Vec<Group
} }
#[sortable] #[sortable]
const DATASTORE_SCAN_SUBDIRS: SubdirMap = &[ const DATASTORE_SCAN_SUBDIRS: SubdirMap =
( &[("groups", &Router::new().get(&API_METHOD_SCAN_REMOTE_GROUPS))];
"groups",
&Router::new()
.get(&API_METHOD_SCAN_REMOTE_GROUPS)
),
];
const DATASTORE_SCAN_ROUTER: Router = Router::new() const DATASTORE_SCAN_ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(DATASTORE_SCAN_SUBDIRS)) .get(&list_subdirs_api_method!(DATASTORE_SCAN_SUBDIRS))

View File

@ -1,15 +1,15 @@
use anyhow::{bail, Error};
use serde_json::Value;
use ::serde::{Deserialize, Serialize}; use ::serde::{Deserialize, Serialize};
use anyhow::{bail, Error};
use hex::FromHex; use hex::FromHex;
use serde_json::Value;
use proxmox_router::{http_bail, Router, RpcEnvironment, Permission}; use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail}; use proxmox_schema::{api, param_bail};
use pbs_api_types::{ use pbs_api_types::{
Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA, Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_AUDIT,
PRIV_REMOTE_AUDIT, PRIV_REMOTE_READ, PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA,
}; };
use pbs_config::sync; use pbs_config::sync;
@ -49,10 +49,8 @@ pub fn check_sync_job_modify_access(
let correct_owner = match job.owner { let correct_owner = match job.owner {
Some(ref owner) => { Some(ref owner) => {
owner == auth_id owner == auth_id
|| (owner.is_token() || (owner.is_token() && !auth_id.is_token() && owner.user() == auth_id.user())
&& !auth_id.is_token() }
&& owner.user() == auth_id.user())
},
// default sync owner // default sync owner
None => auth_id == Authid::root_auth_id(), None => auth_id == Authid::root_auth_id(),
}; };
@ -98,7 +96,7 @@ pub fn list_sync_jobs(
.into_iter() .into_iter()
.filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job)) .filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job))
.collect(); .collect();
Ok(list) Ok(list)
} }
#[api( #[api(
@ -181,7 +179,7 @@ pub fn read_sync_job(
#[api()] #[api()]
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")] #[serde(rename_all = "kebab-case")]
#[allow(non_camel_case_types)] #[allow(non_camel_case_types)]
/// Deletable property name /// Deletable property name
pub enum DeletableProperty { pub enum DeletableProperty {
@ -258,18 +256,36 @@ pub fn update_sync_job(
let mut data: SyncJobConfig = config.lookup("sync", &id)?; let mut data: SyncJobConfig = config.lookup("sync", &id)?;
if let Some(delete) = delete { if let Some(delete) = delete {
for delete_prop in delete { for delete_prop in delete {
match delete_prop { match delete_prop {
DeletableProperty::owner => { data.owner = None; }, DeletableProperty::owner => {
DeletableProperty::comment => { data.comment = None; }, data.owner = None;
DeletableProperty::schedule => { data.schedule = None; }, }
DeletableProperty::remove_vanished => { data.remove_vanished = None; }, DeletableProperty::comment => {
DeletableProperty::group_filter => { data.group_filter = None; }, data.comment = None;
DeletableProperty::rate_in => { data.limit.rate_in = None; }, }
DeletableProperty::rate_out => { data.limit.rate_out = None; }, DeletableProperty::schedule => {
DeletableProperty::burst_in => { data.limit.burst_in = None; }, data.schedule = None;
DeletableProperty::burst_out => { data.limit.burst_out = None; }, }
DeletableProperty::remove_vanished => {
data.remove_vanished = None;
}
DeletableProperty::group_filter => {
data.group_filter = None;
}
DeletableProperty::rate_in => {
data.limit.rate_in = None;
}
DeletableProperty::rate_out => {
data.limit.rate_out = None;
}
DeletableProperty::burst_in => {
data.limit.burst_in = None;
}
DeletableProperty::burst_out => {
data.limit.burst_out = None;
}
} }
} }
} }
@ -283,11 +299,21 @@ pub fn update_sync_job(
} }
} }
if let Some(store) = update.store { data.store = store; } if let Some(store) = update.store {
if let Some(remote) = update.remote { data.remote = remote; } data.store = store;
if let Some(remote_store) = update.remote_store { data.remote_store = remote_store; } }
if let Some(owner) = update.owner { data.owner = Some(owner); } if let Some(remote) = update.remote {
if let Some(group_filter) = update.group_filter { data.group_filter = Some(group_filter); } data.remote = remote;
}
if let Some(remote_store) = update.remote_store {
data.remote_store = remote_store;
}
if let Some(owner) = update.owner {
data.owner = Some(owner);
}
if let Some(group_filter) = update.group_filter {
data.group_filter = Some(group_filter);
}
if update.limit.rate_in.is_some() { if update.limit.rate_in.is_some() {
data.limit.rate_in = update.limit.rate_in; data.limit.rate_in = update.limit.rate_in;
@ -306,8 +332,12 @@ pub fn update_sync_job(
} }
let schedule_changed = data.schedule != update.schedule; let schedule_changed = data.schedule != update.schedule;
if update.schedule.is_some() { data.schedule = update.schedule; } if update.schedule.is_some() {
if update.remove_vanished.is_some() { data.remove_vanished = update.remove_vanished; } data.schedule = update.schedule;
}
if update.remove_vanished.is_some() {
data.remove_vanished = update.remove_vanished;
}
if !check_sync_job_modify_access(&user_info, &auth_id, &data) { if !check_sync_job_modify_access(&user_info, &auth_id, &data) {
bail!("permission check failed"); bail!("permission check failed");
@ -366,8 +396,10 @@ pub fn delete_sync_job(
bail!("permission check failed"); bail!("permission check failed");
} }
config.sections.remove(&id); config.sections.remove(&id);
}, }
Err(_) => { http_bail!(NOT_FOUND, "job '{}' does not exist.", id) }, Err(_) => {
http_bail!(NOT_FOUND, "job '{}' does not exist.", id)
}
}; };
sync::save_config(&config)?; sync::save_config(&config)?;
@ -387,25 +419,30 @@ pub const ROUTER: Router = Router::new()
.post(&API_METHOD_CREATE_SYNC_JOB) .post(&API_METHOD_CREATE_SYNC_JOB)
.match_all("id", &ITEM_ROUTER); .match_all("id", &ITEM_ROUTER);
#[test] #[test]
fn sync_job_access_test() -> Result<(), Error> { fn sync_job_access_test() -> Result<(), Error> {
let (user_cfg, _) = pbs_config::user::test_cfg_from_str(r###" let (user_cfg, _) = pbs_config::user::test_cfg_from_str(
r###"
user: noperm@pbs user: noperm@pbs
user: read@pbs user: read@pbs
user: write@pbs user: write@pbs
"###).expect("test user.cfg is not parsable"); "###,
let acl_tree = pbs_config::acl::AclTree::from_raw(r###" )
.expect("test user.cfg is not parsable");
let acl_tree = pbs_config::acl::AclTree::from_raw(
r###"
acl:1:/datastore/localstore1:read@pbs,write@pbs:DatastoreAudit acl:1:/datastore/localstore1:read@pbs,write@pbs:DatastoreAudit
acl:1:/datastore/localstore1:write@pbs:DatastoreBackup acl:1:/datastore/localstore1:write@pbs:DatastoreBackup
acl:1:/datastore/localstore2:write@pbs:DatastorePowerUser acl:1:/datastore/localstore2:write@pbs:DatastorePowerUser
acl:1:/datastore/localstore3:write@pbs:DatastoreAdmin acl:1:/datastore/localstore3:write@pbs:DatastoreAdmin
acl:1:/remote/remote1:read@pbs,write@pbs:RemoteAudit acl:1:/remote/remote1:read@pbs,write@pbs:RemoteAudit
acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
"###).expect("test acl.cfg is not parsable"); "###,
)
.expect("test acl.cfg is not parsable");
let user_info = CachedUserInfo::test_new(user_cfg, acl_tree); let user_info = CachedUserInfo::test_new(user_cfg, acl_tree);
@ -429,28 +466,52 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
}; };
// should work without ACLs // should work without ACLs
assert_eq!(check_sync_job_read_access(&user_info, root_auth_id, &job), true); assert_eq!(
assert_eq!(check_sync_job_modify_access(&user_info, root_auth_id, &job), true); check_sync_job_read_access(&user_info, root_auth_id, &job),
true
);
assert_eq!(
check_sync_job_modify_access(&user_info, root_auth_id, &job),
true
);
// user without permissions must fail // user without permissions must fail
assert_eq!(check_sync_job_read_access(&user_info, &no_perm_auth_id, &job), false); assert_eq!(
assert_eq!(check_sync_job_modify_access(&user_info, &no_perm_auth_id, &job), false); check_sync_job_read_access(&user_info, &no_perm_auth_id, &job),
false
);
assert_eq!(
check_sync_job_modify_access(&user_info, &no_perm_auth_id, &job),
false
);
// reading without proper read permissions on either remote or local must fail // reading without proper read permissions on either remote or local must fail
assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false); assert_eq!(
check_sync_job_read_access(&user_info, &read_auth_id, &job),
false
);
// reading without proper read permissions on local end must fail // reading without proper read permissions on local end must fail
job.remote = "remote1".to_string(); job.remote = "remote1".to_string();
assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false); assert_eq!(
check_sync_job_read_access(&user_info, &read_auth_id, &job),
false
);
// reading without proper read permissions on remote end must fail // reading without proper read permissions on remote end must fail
job.remote = "remote0".to_string(); job.remote = "remote0".to_string();
job.store = "localstore1".to_string(); job.store = "localstore1".to_string();
assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false); assert_eq!(
check_sync_job_read_access(&user_info, &read_auth_id, &job),
false
);
// writing without proper write permissions on either end must fail // writing without proper write permissions on either end must fail
job.store = "localstore0".to_string(); job.store = "localstore0".to_string();
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false); assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
false
);
// writing without proper write permissions on local end must fail // writing without proper write permissions on local end must fail
job.remote = "remote1".to_string(); job.remote = "remote1".to_string();
@ -458,46 +519,85 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
// writing without proper write permissions on remote end must fail // writing without proper write permissions on remote end must fail
job.remote = "remote0".to_string(); job.remote = "remote0".to_string();
job.store = "localstore1".to_string(); job.store = "localstore1".to_string();
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false); assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
false
);
// reset remote to one where users have access // reset remote to one where users have access
job.remote = "remote1".to_string(); job.remote = "remote1".to_string();
// user with read permission can only read, but not modify/run // user with read permission can only read, but not modify/run
assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), true); assert_eq!(
check_sync_job_read_access(&user_info, &read_auth_id, &job),
true
);
job.owner = Some(read_auth_id.clone()); job.owner = Some(read_auth_id.clone());
assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false); assert_eq!(
check_sync_job_modify_access(&user_info, &read_auth_id, &job),
false
);
job.owner = None; job.owner = None;
assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false); assert_eq!(
check_sync_job_modify_access(&user_info, &read_auth_id, &job),
false
);
job.owner = Some(write_auth_id.clone()); job.owner = Some(write_auth_id.clone());
assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false); assert_eq!(
check_sync_job_modify_access(&user_info, &read_auth_id, &job),
false
);
// user with simple write permission can modify/run // user with simple write permission can modify/run
assert_eq!(check_sync_job_read_access(&user_info, &write_auth_id, &job), true); assert_eq!(
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true); check_sync_job_read_access(&user_info, &write_auth_id, &job),
true
);
assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
true
);
// but can't modify/run with deletion // but can't modify/run with deletion
job.remove_vanished = Some(true); job.remove_vanished = Some(true);
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false); assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
false
);
// unless they have Datastore.Prune as well // unless they have Datastore.Prune as well
job.store = "localstore2".to_string(); job.store = "localstore2".to_string();
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true); assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
true
);
// changing owner is not possible // changing owner is not possible
job.owner = Some(read_auth_id.clone()); job.owner = Some(read_auth_id.clone());
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false); assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
false
);
// also not to the default 'root@pam' // also not to the default 'root@pam'
job.owner = None; job.owner = None;
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false); assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
false
);
// unless they have Datastore.Modify as well // unless they have Datastore.Modify as well
job.store = "localstore3".to_string(); job.store = "localstore3".to_string();
job.owner = Some(read_auth_id); job.owner = Some(read_auth_id);
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true); assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
true
);
job.owner = None; job.owner = None;
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true); assert_eq!(
check_sync_job_modify_access(&user_info, &write_auth_id, &job),
true
);
Ok(()) Ok(())
} }

View File

@ -1,15 +1,14 @@
use anyhow::Error;
use serde_json::Value;
use ::serde::{Deserialize, Serialize}; use ::serde::{Deserialize, Serialize};
use anyhow::Error;
use hex::FromHex; use hex::FromHex;
use serde_json::Value;
use proxmox_router::{http_bail, ApiMethod, Router, RpcEnvironment, Permission}; use proxmox_router::{http_bail, ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail}; use proxmox_schema::{api, param_bail};
use pbs_api_types::{ use pbs_api_types::{
TrafficControlRule, TrafficControlRuleUpdater, TrafficControlRule, TrafficControlRuleUpdater, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
PROXMOX_CONFIG_DIGEST_SCHEMA, TRAFFIC_CONTROL_ID_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA, TRAFFIC_CONTROL_ID_SCHEMA,
PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
}; };
#[api( #[api(
@ -56,13 +55,16 @@ pub fn list_traffic_controls(
)] )]
/// Create new traffic control rule. /// Create new traffic control rule.
pub fn create_traffic_control(config: TrafficControlRule) -> Result<(), Error> { pub fn create_traffic_control(config: TrafficControlRule) -> Result<(), Error> {
let _lock = pbs_config::traffic_control::lock_config()?; let _lock = pbs_config::traffic_control::lock_config()?;
let (mut section_config, _digest) = pbs_config::traffic_control::config()?; let (mut section_config, _digest) = pbs_config::traffic_control::config()?;
if section_config.sections.get(&config.name).is_some() { if section_config.sections.get(&config.name).is_some() {
param_bail!("name", "traffic control rule '{}' already exists.", config.name); param_bail!(
"name",
"traffic control rule '{}' already exists.",
config.name
);
} }
section_config.set_data(&config.name, "rule", &config)?; section_config.set_data(&config.name, "rule", &config)?;
@ -154,7 +156,6 @@ pub fn update_traffic_control(
delete: Option<Vec<DeletableProperty>>, delete: Option<Vec<DeletableProperty>>,
digest: Option<String>, digest: Option<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = pbs_config::traffic_control::lock_config()?; let _lock = pbs_config::traffic_control::lock_config()?;
let (mut config, expected_digest) = pbs_config::traffic_control::config()?; let (mut config, expected_digest) = pbs_config::traffic_control::config()?;
@ -169,12 +170,24 @@ pub fn update_traffic_control(
if let Some(delete) = delete { if let Some(delete) = delete {
for delete_prop in delete { for delete_prop in delete {
match delete_prop { match delete_prop {
DeletableProperty::rate_in => { data.limit.rate_in = None; }, DeletableProperty::rate_in => {
DeletableProperty::rate_out => { data.limit.rate_out = None; }, data.limit.rate_in = None;
DeletableProperty::burst_in => { data.limit.burst_in = None; }, }
DeletableProperty::burst_out => { data.limit.burst_out = None; }, DeletableProperty::rate_out => {
DeletableProperty::comment => { data.comment = None; }, data.limit.rate_out = None;
DeletableProperty::timeframe => { data.timeframe = None; }, }
DeletableProperty::burst_in => {
data.limit.burst_in = None;
}
DeletableProperty::burst_out => {
data.limit.burst_out = None;
}
DeletableProperty::comment => {
data.comment = None;
}
DeletableProperty::timeframe => {
data.timeframe = None;
}
} }
} }
} }
@ -204,8 +217,12 @@ pub fn update_traffic_control(
data.limit.burst_out = update.limit.burst_out; data.limit.burst_out = update.limit.burst_out;
} }
if let Some(network) = update.network { data.network = network; } if let Some(network) = update.network {
if update.timeframe.is_some() { data.timeframe = update.timeframe; } data.network = network;
}
if update.timeframe.is_some() {
data.timeframe = update.timeframe;
}
config.set_data(&name, "rule", &data)?; config.set_data(&name, "rule", &data)?;
@ -233,7 +250,6 @@ pub fn update_traffic_control(
)] )]
/// Remove a traffic control rule from the configuration file. /// Remove a traffic control rule from the configuration file.
pub fn delete_traffic_control(name: String, digest: Option<String>) -> Result<(), Error> { pub fn delete_traffic_control(name: String, digest: Option<String>) -> Result<(), Error> {
let _lock = pbs_config::traffic_control::lock_config()?; let _lock = pbs_config::traffic_control::lock_config()?;
let (mut config, expected_digest) = pbs_config::traffic_control::config()?; let (mut config, expected_digest) = pbs_config::traffic_control::config()?;
@ -244,7 +260,9 @@ pub fn delete_traffic_control(name: String, digest: Option<String>) -> Result<()
} }
match config.sections.get(&name) { match config.sections.get(&name) {
Some(_) => { config.sections.remove(&name); }, Some(_) => {
config.sections.remove(&name);
}
None => http_bail!(NOT_FOUND, "traffic control rule '{}' does not exist.", name), None => http_bail!(NOT_FOUND, "traffic control rule '{}' does not exist.", name),
} }
@ -253,7 +271,6 @@ pub fn delete_traffic_control(name: String, digest: Option<String>) -> Result<()
Ok(()) Ok(())
} }
const ITEM_ROUTER: Router = Router::new() const ITEM_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_TRAFFIC_CONTROL) .get(&API_METHOD_READ_TRAFFIC_CONTROL)
.put(&API_METHOD_UPDATE_TRAFFIC_CONTROL) .put(&API_METHOD_UPDATE_TRAFFIC_CONTROL)

View File

@ -1,14 +1,14 @@
use anyhow::Error;
use serde_json::Value;
use ::serde::{Deserialize, Serialize}; use ::serde::{Deserialize, Serialize};
use anyhow::Error;
use hex::FromHex; use hex::FromHex;
use serde_json::Value;
use proxmox_router::{http_bail, Router, RpcEnvironment, Permission}; use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail}; use proxmox_schema::{api, param_bail};
use pbs_api_types::{ use pbs_api_types::{
Authid, VerificationJobConfig, VerificationJobConfigUpdater, JOB_ID_SCHEMA, Authid, VerificationJobConfig, VerificationJobConfigUpdater, JOB_ID_SCHEMA,
PROXMOX_CONFIG_DIGEST_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_VERIFY, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_VERIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
}; };
use pbs_config::verify; use pbs_config::verify;
@ -42,19 +42,20 @@ pub fn list_verification_jobs(
let list = config.convert_to_typed_array("verification")?; let list = config.convert_to_typed_array("verification")?;
let list = list.into_iter() let list = list
.into_iter()
.filter(|job: &VerificationJobConfig| { .filter(|job: &VerificationJobConfig| {
let privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]); let privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
privs & required_privs != 00 privs & required_privs != 00
}).collect(); })
.collect();
rpcenv["digest"] = hex::encode(&digest).into(); rpcenv["digest"] = hex::encode(&digest).into();
Ok(list) Ok(list)
} }
#[api( #[api(
protected: true, protected: true,
input: { input: {
@ -73,12 +74,17 @@ pub fn list_verification_jobs(
/// Create a new verification job. /// Create a new verification job.
pub fn create_verification_job( pub fn create_verification_job(
config: VerificationJobConfig, config: VerificationJobConfig,
rpcenv: &mut dyn RpcEnvironment rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> { ) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
user_info.check_privs(&auth_id, &["datastore", &config.store], PRIV_DATASTORE_VERIFY, false)?; user_info.check_privs(
&auth_id,
&["datastore", &config.store],
PRIV_DATASTORE_VERIFY,
false,
)?;
let _lock = verify::lock_config()?; let _lock = verify::lock_config()?;
@ -124,7 +130,12 @@ pub fn read_verification_job(
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?; let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_VERIFY; let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_VERIFY;
user_info.check_privs(&auth_id, &["datastore", &verification_job.store], required_privs, true)?; user_info.check_privs(
&auth_id,
&["datastore", &verification_job.store],
required_privs,
true,
)?;
rpcenv["digest"] = hex::encode(&digest).into(); rpcenv["digest"] = hex::encode(&digest).into();
@ -133,7 +144,7 @@ pub fn read_verification_job(
#[api()] #[api()]
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")] #[serde(rename_all = "kebab-case")]
/// Deletable property name /// Deletable property name
pub enum DeletableProperty { pub enum DeletableProperty {
/// Delete the ignore verified property. /// Delete the ignore verified property.
@ -143,7 +154,7 @@ pub enum DeletableProperty {
/// Delete the job schedule. /// Delete the job schedule.
Schedule, Schedule,
/// Delete outdated after property. /// Delete outdated after property.
OutdatedAfter OutdatedAfter,
} }
#[api( #[api(
@ -201,15 +212,28 @@ pub fn update_verification_job(
let mut data: VerificationJobConfig = config.lookup("verification", &id)?; let mut data: VerificationJobConfig = config.lookup("verification", &id)?;
// check existing store // check existing store
user_info.check_privs(&auth_id, &["datastore", &data.store], PRIV_DATASTORE_VERIFY, true)?; user_info.check_privs(
&auth_id,
&["datastore", &data.store],
PRIV_DATASTORE_VERIFY,
true,
)?;
if let Some(delete) = delete { if let Some(delete) = delete {
for delete_prop in delete { for delete_prop in delete {
match delete_prop { match delete_prop {
DeletableProperty::IgnoreVerified => { data.ignore_verified = None; }, DeletableProperty::IgnoreVerified => {
DeletableProperty::OutdatedAfter => { data.outdated_after = None; }, data.ignore_verified = None;
DeletableProperty::Comment => { data.comment = None; }, }
DeletableProperty::Schedule => { data.schedule = None; }, DeletableProperty::OutdatedAfter => {
data.outdated_after = None;
}
DeletableProperty::Comment => {
data.comment = None;
}
DeletableProperty::Schedule => {
data.schedule = None;
}
} }
} }
} }
@ -225,15 +249,25 @@ pub fn update_verification_job(
if let Some(store) = update.store { if let Some(store) = update.store {
// check new store // check new store
user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_VERIFY, true)?; user_info.check_privs(
&auth_id,
&["datastore", &store],
PRIV_DATASTORE_VERIFY,
true,
)?;
data.store = store; data.store = store;
} }
if update.ignore_verified.is_some() {
if update.ignore_verified.is_some() { data.ignore_verified = update.ignore_verified; } data.ignore_verified = update.ignore_verified;
if update.outdated_after.is_some() { data.outdated_after = update.outdated_after; } }
if update.outdated_after.is_some() {
data.outdated_after = update.outdated_after;
}
let schedule_changed = data.schedule != update.schedule; let schedule_changed = data.schedule != update.schedule;
if update.schedule.is_some() { data.schedule = update.schedule; } if update.schedule.is_some() {
data.schedule = update.schedule;
}
config.set_data(&id, "verification", &data)?; config.set_data(&id, "verification", &data)?;
@ -278,7 +312,12 @@ pub fn delete_verification_job(
let (mut config, expected_digest) = verify::config()?; let (mut config, expected_digest) = verify::config()?;
let job: VerificationJobConfig = config.lookup("verification", &id)?; let job: VerificationJobConfig = config.lookup("verification", &id)?;
user_info.check_privs(&auth_id, &["datastore", &job.store], PRIV_DATASTORE_VERIFY, true)?; user_info.check_privs(
&auth_id,
&["datastore", &job.store],
PRIV_DATASTORE_VERIFY,
true,
)?;
if let Some(ref digest) = digest { if let Some(ref digest) = digest {
let digest = <[u8; 32]>::from_hex(digest)?; let digest = <[u8; 32]>::from_hex(digest)?;
@ -286,7 +325,9 @@ pub fn delete_verification_job(
} }
match config.sections.get(&id) { match config.sections.get(&id) {
Some(_) => { config.sections.remove(&id); }, Some(_) => {
config.sections.remove(&id);
}
None => http_bail!(NOT_FOUND, "job '{}' does not exist.", id), None => http_bail!(NOT_FOUND, "job '{}' does not exist.", id),
} }

View File

@ -2,7 +2,7 @@ use std::path::PathBuf;
use anyhow::Error; use anyhow::Error;
use futures::stream::TryStreamExt; use futures::stream::TryStreamExt;
use hyper::{Body, Response, StatusCode, header}; use hyper::{header, Body, Response, StatusCode};
use proxmox_router::http_bail; use proxmox_router::http_bail;

View File

@ -4,15 +4,15 @@ pub mod access;
pub mod admin; pub mod admin;
pub mod backup; pub mod backup;
pub mod config; pub mod config;
pub mod helpers;
pub mod node; pub mod node;
pub mod reader;
pub mod status;
pub mod types;
pub mod version;
pub mod ping; pub mod ping;
pub mod pull; pub mod pull;
pub mod reader;
pub mod status;
pub mod tape; pub mod tape;
pub mod helpers; pub mod types;
pub mod version;
use proxmox_router::{list_subdirs_api_method, Router, SubdirMap}; use proxmox_router::{list_subdirs_api_method, Router, SubdirMap};

View File

@ -1,12 +1,12 @@
use anyhow::{Error, bail, format_err}; use anyhow::{bail, format_err, Error};
use serde_json::{json, Value}; use serde_json::{json, Value};
use std::collections::HashMap; use std::collections::HashMap;
use proxmox_sys::fs::{replace_file, CreateOptions};
use proxmox_router::{ use proxmox_router::{
list_subdirs_api_method, RpcEnvironment, RpcEnvironmentType, Permission, Router, SubdirMap list_subdirs_api_method, Permission, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
}; };
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_sys::fs::{replace_file, CreateOptions};
use proxmox_apt::repositories::{ use proxmox_apt::repositories::{
APTRepositoryFile, APTRepositoryFileError, APTRepositoryHandle, APTRepositoryInfo, APTRepositoryFile, APTRepositoryFileError, APTRepositoryHandle, APTRepositoryInfo,
@ -15,17 +15,13 @@ use proxmox_apt::repositories::{
use proxmox_http::ProxyConfig; use proxmox_http::ProxyConfig;
use pbs_api_types::{ use pbs_api_types::{
APTUpdateInfo, NODE_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA, APTUpdateInfo, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, UPID_SCHEMA,
}; };
use crate::config::node; use crate::config::node;
use crate::tools::{apt, pbs_simple_http, subscription};
use proxmox_rest_server::WorkerTask; use proxmox_rest_server::WorkerTask;
use crate::tools::{
apt,
pbs_simple_http,
subscription,
};
#[api( #[api(
input: { input: {
@ -49,7 +45,6 @@ use crate::tools::{
)] )]
/// List available APT updates /// List available APT updates
fn apt_update_available(_param: Value) -> Result<Value, Error> { fn apt_update_available(_param: Value) -> Result<Value, Error> {
if let Ok(false) = apt::pkg_cache_expired() { if let Ok(false) = apt::pkg_cache_expired() {
if let Ok(Some(cache)) = apt::read_pkg_state() { if let Ok(Some(cache)) = apt::read_pkg_state() {
return Ok(json!(cache.package_status)); return Ok(json!(cache.package_status));
@ -62,7 +57,6 @@ fn apt_update_available(_param: Value) -> Result<Value, Error> {
} }
pub fn update_apt_proxy_config(proxy_config: Option<&ProxyConfig>) -> Result<(), Error> { pub fn update_apt_proxy_config(proxy_config: Option<&ProxyConfig>) -> Result<(), Error> {
const PROXY_CFG_FN: &str = "/etc/apt/apt.conf.d/76pveproxy"; // use same file as PVE const PROXY_CFG_FN: &str = "/etc/apt/apt.conf.d/76pveproxy"; // use same file as PVE
if let Some(proxy_config) = proxy_config { if let Some(proxy_config) = proxy_config {
@ -90,7 +84,9 @@ fn read_and_update_proxy_config() -> Result<Option<ProxyConfig>, Error> {
} }
fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> { fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
if !quiet { worker.log_message("starting apt-get update") } if !quiet {
worker.log_message("starting apt-get update")
}
read_and_update_proxy_config()?; read_and_update_proxy_config()?;
@ -98,7 +94,8 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
command.arg("update"); command.arg("update");
// apt "errors" quite easily, and run_command is a bit rigid, so handle this inline for now. // apt "errors" quite easily, and run_command is a bit rigid, so handle this inline for now.
let output = command.output() let output = command
.output()
.map_err(|err| format_err!("failed to execute {:?} - {}", command, err))?; .map_err(|err| format_err!("failed to execute {:?} - {}", command, err))?;
if !quiet { if !quiet {
@ -109,7 +106,13 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
if !output.status.success() { if !output.status.success() {
if output.status.code().is_some() { if output.status.code().is_some() {
let msg = String::from_utf8(output.stderr) let msg = String::from_utf8(output.stderr)
.map(|m| if m.is_empty() { String::from("no error message") } else { m }) .map(|m| {
if m.is_empty() {
String::from("no error message")
} else {
m
}
})
.unwrap_or_else(|_| String::from("non utf8 error message (suppressed)")); .unwrap_or_else(|_| String::from("non utf8 error message (suppressed)"));
worker.log_warning(msg); worker.log_warning(msg);
} else { } else {
@ -154,7 +157,6 @@ pub fn apt_update_database(
quiet: bool, quiet: bool,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> { ) -> Result<String, Error> {
let auth_id = rpcenv.get_auth_id().unwrap(); let auth_id = rpcenv.get_auth_id().unwrap();
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
@ -176,7 +178,7 @@ pub fn apt_update_database(
if notified_version != pkg.version { if notified_version != pkg.version {
to_notify.push(pkg); to_notify.push(pkg);
} }
}, }
None => to_notify.push(pkg), None => to_notify.push(pkg),
} }
} }
@ -220,19 +222,17 @@ pub fn apt_update_database(
}, },
)] )]
/// Retrieve the changelog of the specified package. /// Retrieve the changelog of the specified package.
fn apt_get_changelog( fn apt_get_changelog(param: Value) -> Result<Value, Error> {
param: Value,
) -> Result<Value, Error> {
let name = pbs_tools::json::required_string_param(&param, "name")?.to_owned(); let name = pbs_tools::json::required_string_param(&param, "name")?.to_owned();
let version = param["version"].as_str(); let version = param["version"].as_str();
let pkg_info = apt::list_installed_apt_packages(|data| { let pkg_info = apt::list_installed_apt_packages(
match version { |data| match version {
Some(version) => version == data.active_version, Some(version) => version == data.active_version,
None => data.active_version == data.candidate_version None => data.active_version == data.candidate_version,
} },
}, Some(&name)); Some(&name),
);
if pkg_info.is_empty() { if pkg_info.is_empty() {
bail!("Package '{}' not found", name); bail!("Package '{}' not found", name);
@ -245,33 +245,47 @@ fn apt_get_changelog(
// FIXME: use 'apt-get changelog' for proxmox packages as well, once repo supports it // FIXME: use 'apt-get changelog' for proxmox packages as well, once repo supports it
if changelog_url.starts_with("http://download.proxmox.com/") { if changelog_url.starts_with("http://download.proxmox.com/") {
let changelog = proxmox_async::runtime::block_on(client.get_string(changelog_url, None)) let changelog = proxmox_async::runtime::block_on(client.get_string(changelog_url, None))
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?; .map_err(|err| {
format_err!(
"Error downloading changelog from '{}': {}",
changelog_url,
err
)
})?;
Ok(json!(changelog)) Ok(json!(changelog))
} else if changelog_url.starts_with("https://enterprise.proxmox.com/") { } else if changelog_url.starts_with("https://enterprise.proxmox.com/") {
let sub = match subscription::read_subscription()? { let sub = match subscription::read_subscription()? {
Some(sub) => sub, Some(sub) => sub,
None => bail!("cannot retrieve changelog from enterprise repo: no subscription info found") None => {
bail!("cannot retrieve changelog from enterprise repo: no subscription info found")
}
}; };
let (key, id) = match sub.key { let (key, id) = match sub.key {
Some(key) => { Some(key) => match sub.serverid {
match sub.serverid { Some(id) => (key, id),
Some(id) => (key, id), None => bail!("cannot retrieve changelog from enterprise repo: no server id found"),
None =>
bail!("cannot retrieve changelog from enterprise repo: no server id found")
}
}, },
None => bail!("cannot retrieve changelog from enterprise repo: no subscription key found") None => {
bail!("cannot retrieve changelog from enterprise repo: no subscription key found")
}
}; };
let mut auth_header = HashMap::new(); let mut auth_header = HashMap::new();
auth_header.insert("Authorization".to_owned(), auth_header.insert(
format!("Basic {}", base64::encode(format!("{}:{}", key, id)))); "Authorization".to_owned(),
format!("Basic {}", base64::encode(format!("{}:{}", key, id))),
);
let changelog = proxmox_async::runtime::block_on(client.get_string(changelog_url, Some(&auth_header))) let changelog =
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?; proxmox_async::runtime::block_on(client.get_string(changelog_url, Some(&auth_header)))
.map_err(|err| {
format_err!(
"Error downloading changelog from '{}': {}",
changelog_url,
err
)
})?;
Ok(json!(changelog)) Ok(json!(changelog))
} else { } else {
let mut command = std::process::Command::new("apt-get"); let mut command = std::process::Command::new("apt-get");
command.arg("changelog"); command.arg("changelog");
@ -348,23 +362,35 @@ pub fn get_versions() -> Result<Vec<APTUpdateInfo>, Error> {
"running kernel: {}", "running kernel: {}",
nix::sys::utsname::uname().release().to_owned() nix::sys::utsname::uname().release().to_owned()
); );
if let Some(proxmox_backup) = pbs_packages.iter().find(|pkg| pkg.package == "proxmox-backup") { if let Some(proxmox_backup) = pbs_packages
.iter()
.find(|pkg| pkg.package == "proxmox-backup")
{
let mut proxmox_backup = proxmox_backup.clone(); let mut proxmox_backup = proxmox_backup.clone();
proxmox_backup.extra_info = Some(running_kernel); proxmox_backup.extra_info = Some(running_kernel);
packages.push(proxmox_backup); packages.push(proxmox_backup);
} else { } else {
packages.push(unknown_package("proxmox-backup".into(), Some(running_kernel))); packages.push(unknown_package(
"proxmox-backup".into(),
Some(running_kernel),
));
} }
let version = pbs_buildcfg::PROXMOX_PKG_VERSION; let version = pbs_buildcfg::PROXMOX_PKG_VERSION;
let release = pbs_buildcfg::PROXMOX_PKG_RELEASE; let release = pbs_buildcfg::PROXMOX_PKG_RELEASE;
let daemon_version_info = Some(format!("running version: {}.{}", version, release)); let daemon_version_info = Some(format!("running version: {}.{}", version, release));
if let Some(pkg) = pbs_packages.iter().find(|pkg| pkg.package == "proxmox-backup-server") { if let Some(pkg) = pbs_packages
.iter()
.find(|pkg| pkg.package == "proxmox-backup-server")
{
let mut pkg = pkg.clone(); let mut pkg = pkg.clone();
pkg.extra_info = daemon_version_info; pkg.extra_info = daemon_version_info;
packages.push(pkg); packages.push(pkg);
} else { } else {
packages.push(unknown_package("proxmox-backup".into(), daemon_version_info)); packages.push(unknown_package(
"proxmox-backup".into(),
daemon_version_info,
));
} }
let mut kernel_pkgs: Vec<APTUpdateInfo> = pbs_packages let mut kernel_pkgs: Vec<APTUpdateInfo> = pbs_packages
@ -609,15 +635,22 @@ pub fn change_repository(
} }
const SUBDIRS: SubdirMap = &[ const SUBDIRS: SubdirMap = &[
("changelog", &Router::new().get(&API_METHOD_APT_GET_CHANGELOG)), (
("repositories", &Router::new() "changelog",
.get(&API_METHOD_GET_REPOSITORIES) &Router::new().get(&API_METHOD_APT_GET_CHANGELOG),
.post(&API_METHOD_CHANGE_REPOSITORY)
.put(&API_METHOD_ADD_REPOSITORY)
), ),
("update", &Router::new() (
.get(&API_METHOD_APT_UPDATE_AVAILABLE) "repositories",
.post(&API_METHOD_APT_UPDATE_DATABASE) &Router::new()
.get(&API_METHOD_GET_REPOSITORIES)
.post(&API_METHOD_CHANGE_REPOSITORY)
.put(&API_METHOD_ADD_REPOSITORY),
),
(
"update",
&Router::new()
.get(&API_METHOD_APT_UPDATE_AVAILABLE)
.post(&API_METHOD_APT_UPDATE_DATABASE),
), ),
("versions", &Router::new().get(&API_METHOD_GET_VERSIONS)), ("versions", &Router::new().get(&API_METHOD_GET_VERSIONS)),
]; ];

View File

@ -7,9 +7,9 @@ use openssl::pkey::PKey;
use openssl::x509::X509; use openssl::x509::X509;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use proxmox_router::list_subdirs_api_method;
use proxmox_router::SubdirMap; use proxmox_router::SubdirMap;
use proxmox_router::{Permission, Router, RpcEnvironment}; use proxmox_router::{Permission, Router, RpcEnvironment};
use proxmox_router::list_subdirs_api_method;
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_sys::{task_log, task_warn}; use proxmox_sys::{task_log, task_warn};
@ -305,7 +305,10 @@ async fn order_certificate(
}; };
if domains.is_empty() { if domains.is_empty() {
task_log!(worker, "No domains configured to be ordered from an ACME server."); task_log!(
worker,
"No domains configured to be ordered from an ACME server."
);
return Ok(None); return Ok(None);
} }
@ -363,7 +366,9 @@ async fn order_certificate(
task_warn!( task_warn!(
worker, worker,
"Failed to teardown plugin '{}' for domain '{}' - {}", "Failed to teardown plugin '{}' for domain '{}' - {}",
plugin_id, domain, err plugin_id,
domain,
err
); );
} }
@ -453,7 +458,10 @@ async fn request_validation(
let auth = acme.get_authorization(auth_url).await?; let auth = acme.get_authorization(auth_url).await?;
match auth.status { match auth.status {
Status::Pending => { Status::Pending => {
task_log!(worker, "Status is still 'pending', trying again in 10 seconds"); task_log!(
worker,
"Status is still 'pending', trying again in 10 seconds"
);
tokio::time::sleep(Duration::from_secs(10)).await; tokio::time::sleep(Duration::from_secs(10)).await;
} }
Status::Valid => return Ok(()), Status::Valid => return Ok(()),
@ -574,7 +582,10 @@ pub fn revoke_acme_cert(rpcenv: &mut dyn RpcEnvironment) -> Result<String, Error
let mut acme = node_config.acme_client().await?; let mut acme = node_config.acme_client().await?;
task_log!(worker, "Revoking old certificate"); task_log!(worker, "Revoking old certificate");
acme.revoke_certificate(cert_pem.as_bytes(), None).await?; acme.revoke_certificate(cert_pem.as_bytes(), None).await?;
task_log!(worker, "Deleting certificate and regenerating a self-signed one"); task_log!(
worker,
"Deleting certificate and regenerating a self-signed one"
);
delete_custom_certificate().await?; delete_custom_certificate().await?;
Ok(()) Ok(())
}, },

View File

@ -1,5 +1,5 @@
use anyhow::Error;
use ::serde::{Deserialize, Serialize}; use ::serde::{Deserialize, Serialize};
use anyhow::Error;
use hex::FromHex; use hex::FromHex;
use proxmox_router::{Permission, Router, RpcEnvironment}; use proxmox_router::{Permission, Router, RpcEnvironment};
@ -36,7 +36,7 @@ pub fn get_node_config(mut rpcenv: &mut dyn RpcEnvironment) -> Result<NodeConfig
#[api()] #[api()]
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
#[serde(rename_all="kebab-case")] #[serde(rename_all = "kebab-case")]
#[allow(non_camel_case_types)] #[allow(non_camel_case_types)]
/// Deletable property name /// Deletable property name
pub enum DeletableProperty { pub enum DeletableProperty {
@ -57,10 +57,10 @@ pub enum DeletableProperty {
/// Delete the email-from property. /// Delete the email-from property.
email_from, email_from,
/// Delete the ciphers-tls-1.3 property. /// Delete the ciphers-tls-1.3 property.
#[serde(rename="ciphers-tls-1.3")] #[serde(rename = "ciphers-tls-1.3")]
ciphers_tls_1_3, ciphers_tls_1_3,
/// Delete the ciphers-tls-1.2 property. /// Delete the ciphers-tls-1.2 property.
#[serde(rename="ciphers-tls-1.2")] #[serde(rename = "ciphers-tls-1.2")]
ciphers_tls_1_2, ciphers_tls_1_2,
/// Delete the default-lang property. /// Delete the default-lang property.
default_lang, default_lang,
@ -117,36 +117,88 @@ pub fn update_node_config(
if let Some(delete) = delete { if let Some(delete) = delete {
for delete_prop in delete { for delete_prop in delete {
match delete_prop { match delete_prop {
DeletableProperty::acme => { config.acme = None; }, DeletableProperty::acme => {
DeletableProperty::acmedomain0 => { config.acmedomain0 = None; }, config.acme = None;
DeletableProperty::acmedomain1 => { config.acmedomain1 = None; }, }
DeletableProperty::acmedomain2 => { config.acmedomain2 = None; }, DeletableProperty::acmedomain0 => {
DeletableProperty::acmedomain3 => { config.acmedomain3 = None; }, config.acmedomain0 = None;
DeletableProperty::acmedomain4 => { config.acmedomain4 = None; }, }
DeletableProperty::http_proxy => { config.http_proxy = None; }, DeletableProperty::acmedomain1 => {
DeletableProperty::email_from => { config.email_from = None; }, config.acmedomain1 = None;
DeletableProperty::ciphers_tls_1_3 => { config.ciphers_tls_1_3 = None; }, }
DeletableProperty::ciphers_tls_1_2 => { config.ciphers_tls_1_2 = None; }, DeletableProperty::acmedomain2 => {
DeletableProperty::default_lang => { config.default_lang = None; }, config.acmedomain2 = None;
DeletableProperty::description => { config.description = None; }, }
DeletableProperty::task_log_max_days => { config.task_log_max_days = None; }, DeletableProperty::acmedomain3 => {
config.acmedomain3 = None;
}
DeletableProperty::acmedomain4 => {
config.acmedomain4 = None;
}
DeletableProperty::http_proxy => {
config.http_proxy = None;
}
DeletableProperty::email_from => {
config.email_from = None;
}
DeletableProperty::ciphers_tls_1_3 => {
config.ciphers_tls_1_3 = None;
}
DeletableProperty::ciphers_tls_1_2 => {
config.ciphers_tls_1_2 = None;
}
DeletableProperty::default_lang => {
config.default_lang = None;
}
DeletableProperty::description => {
config.description = None;
}
DeletableProperty::task_log_max_days => {
config.task_log_max_days = None;
}
} }
} }
} }
if update.acme.is_some() { config.acme = update.acme; } if update.acme.is_some() {
if update.acmedomain0.is_some() { config.acmedomain0 = update.acmedomain0; } config.acme = update.acme;
if update.acmedomain1.is_some() { config.acmedomain1 = update.acmedomain1; } }
if update.acmedomain2.is_some() { config.acmedomain2 = update.acmedomain2; } if update.acmedomain0.is_some() {
if update.acmedomain3.is_some() { config.acmedomain3 = update.acmedomain3; } config.acmedomain0 = update.acmedomain0;
if update.acmedomain4.is_some() { config.acmedomain4 = update.acmedomain4; } }
if update.http_proxy.is_some() { config.http_proxy = update.http_proxy; } if update.acmedomain1.is_some() {
if update.email_from.is_some() { config.email_from = update.email_from; } config.acmedomain1 = update.acmedomain1;
if update.ciphers_tls_1_3.is_some() { config.ciphers_tls_1_3 = update.ciphers_tls_1_3; } }
if update.ciphers_tls_1_2.is_some() { config.ciphers_tls_1_2 = update.ciphers_tls_1_2; } if update.acmedomain2.is_some() {
if update.default_lang.is_some() { config.default_lang = update.default_lang; } config.acmedomain2 = update.acmedomain2;
if update.description.is_some() { config.description = update.description; } }
if update.task_log_max_days.is_some() { config.task_log_max_days = update.task_log_max_days; } if update.acmedomain3.is_some() {
config.acmedomain3 = update.acmedomain3;
}
if update.acmedomain4.is_some() {
config.acmedomain4 = update.acmedomain4;
}
if update.http_proxy.is_some() {
config.http_proxy = update.http_proxy;
}
if update.email_from.is_some() {
config.email_from = update.email_from;
}
if update.ciphers_tls_1_3.is_some() {
config.ciphers_tls_1_3 = update.ciphers_tls_1_3;
}
if update.ciphers_tls_1_2.is_some() {
config.ciphers_tls_1_2 = update.ciphers_tls_1_2;
}
if update.default_lang.is_some() {
config.default_lang = update.default_lang;
}
if update.description.is_some() {
config.description = update.description;
}
if update.task_log_max_days.is_some() {
config.task_log_max_days = update.task_log_max_days;
}
crate::config::node::save_config(&config)?; crate::config::node::save_config(&config)?;

View File

@ -1,20 +1,20 @@
use ::serde::{Deserialize, Serialize};
use anyhow::{bail, Error}; use anyhow::{bail, Error};
use serde_json::json; use serde_json::json;
use ::serde::{Deserialize, Serialize};
use proxmox_router::{Router, RpcEnvironment, RpcEnvironmentType, Permission}; use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_section_config::SectionConfigData; use proxmox_section_config::SectionConfigData;
use proxmox_sys::task_log; use proxmox_sys::task_log;
use pbs_api_types::{ use pbs_api_types::{
DataStoreConfig, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA, DataStoreConfig, BLOCKDEVICE_NAME_SCHEMA, DATASTORE_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT,
DATASTORE_SCHEMA, UPID_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PRIV_SYS_MODIFY, UPID_SCHEMA,
}; };
use crate::tools::disks::{ use crate::tools::disks::{
DiskManage, FileSystemType, DiskUsageType, create_file_system, create_single_linux_partition, get_disk_usage_info, get_fs_uuid,
create_file_system, create_single_linux_partition, get_fs_uuid, get_disk_usage_info, DiskManage, DiskUsageType, FileSystemType,
}; };
use crate::tools::systemd::{self, types::*}; use crate::tools::systemd::{self, types::*};
@ -31,7 +31,7 @@ const BASE_MOUNT_DIR: &str = "/mnt/datastore/";
}, },
)] )]
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all="kebab-case")] #[serde(rename_all = "kebab-case")]
/// Datastore mount info. /// Datastore mount info.
pub struct DatastoreMountInfo { pub struct DatastoreMountInfo {
/// The path of the mount unit. /// The path of the mount unit.
@ -69,8 +69,7 @@ pub struct DatastoreMountInfo {
}, },
)] )]
/// List systemd datastore mount units. /// List systemd datastore mount units.
pub fn list_datastore_mounts() -> Result<Vec<DatastoreMountInfo>, Error> { pub fn list_datastore_mounts() -> Result<Vec<DatastoreMountInfo>, Error> {
lazy_static::lazy_static! { lazy_static::lazy_static! {
static ref MOUNT_NAME_REGEX: regex::Regex = regex::Regex::new(r"^mnt-datastore-(.+)\.mount$").unwrap(); static ref MOUNT_NAME_REGEX: regex::Regex = regex::Regex::new(r"^mnt-datastore-(.+)\.mount$").unwrap();
} }
@ -144,7 +143,6 @@ pub fn create_datastore_disk(
filesystem: Option<FileSystemType>, filesystem: Option<FileSystemType>,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> { ) -> Result<String, Error> {
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let auth_id = rpcenv.get_auth_id().unwrap(); let auth_id = rpcenv.get_auth_id().unwrap();
@ -161,15 +159,18 @@ pub fn create_datastore_disk(
let default_path = std::path::PathBuf::from(&mount_point); let default_path = std::path::PathBuf::from(&mount_point);
match std::fs::metadata(&default_path) { match std::fs::metadata(&default_path) {
Err(_) => {}, // path does not exist Err(_) => {} // path does not exist
Ok(_) => { Ok(_) => {
bail!("path {:?} already exists", default_path); bail!("path {:?} already exists", default_path);
} }
} }
let upid_str = WorkerTask::new_thread( let upid_str = WorkerTask::new_thread(
"dircreate", Some(name.clone()), auth_id, to_stdout, move |worker| "dircreate",
{ Some(name.clone()),
auth_id,
to_stdout,
move |worker| {
task_log!(worker, "create datastore '{}' on disk {}", name, disk); task_log!(worker, "create datastore '{}' on disk {}", name, disk);
let add_datastore = add_datastore.unwrap_or(false); let add_datastore = add_datastore.unwrap_or(false);
@ -185,7 +186,8 @@ pub fn create_datastore_disk(
let uuid = get_fs_uuid(&partition)?; let uuid = get_fs_uuid(&partition)?;
let uuid_path = format!("/dev/disk/by-uuid/{}", uuid); let uuid_path = format!("/dev/disk/by-uuid/{}", uuid);
let mount_unit_name = create_datastore_mount_unit(&name, &mount_point, filesystem, &uuid_path)?; let mount_unit_name =
create_datastore_mount_unit(&name, &mount_point, filesystem, &uuid_path)?;
crate::tools::systemd::reload_daemon()?; crate::tools::systemd::reload_daemon()?;
crate::tools::systemd::enable_unit(&mount_unit_name)?; crate::tools::systemd::enable_unit(&mount_unit_name)?;
@ -202,11 +204,17 @@ pub fn create_datastore_disk(
bail!("datastore '{}' already exists.", datastore.name); bail!("datastore '{}' already exists.", datastore.name);
} }
crate::api2::config::datastore::do_create_datastore(lock, config, datastore, Some(&worker))?; crate::api2::config::datastore::do_create_datastore(
lock,
config,
datastore,
Some(&worker),
)?;
} }
Ok(()) Ok(())
})?; },
)?;
Ok(upid_str) Ok(upid_str)
} }
@ -229,17 +237,19 @@ pub fn create_datastore_disk(
)] )]
/// Remove a Filesystem mounted under '/mnt/datastore/<name>'.". /// Remove a Filesystem mounted under '/mnt/datastore/<name>'.".
pub fn delete_datastore_disk(name: String) -> Result<(), Error> { pub fn delete_datastore_disk(name: String) -> Result<(), Error> {
let path = format!("{}{}", BASE_MOUNT_DIR, name); let path = format!("{}{}", BASE_MOUNT_DIR, name);
// path of datastore cannot be changed // path of datastore cannot be changed
let (config, _) = pbs_config::datastore::config()?; let (config, _) = pbs_config::datastore::config()?;
let datastores: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?; let datastores: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
let conflicting_datastore: Option<DataStoreConfig> = datastores.into_iter() let conflicting_datastore: Option<DataStoreConfig> =
.find(|ds| ds.path == path); datastores.into_iter().find(|ds| ds.path == path);
if let Some(conflicting_datastore) = conflicting_datastore { if let Some(conflicting_datastore) = conflicting_datastore {
bail!("Can't remove '{}' since it's required by datastore '{}'", bail!(
conflicting_datastore.path, conflicting_datastore.name); "Can't remove '{}' since it's required by datastore '{}'",
conflicting_datastore.path,
conflicting_datastore.name
);
} }
// disable systemd mount-unit // disable systemd mount-unit
@ -262,33 +272,33 @@ pub fn delete_datastore_disk(name: String) -> Result<(), Error> {
until the next reboot or until unmounted manually!", until the next reboot or until unmounted manually!",
path path
), ),
Ok(_) => Ok(()) Ok(_) => Ok(()),
} }
} }
const ITEM_ROUTER: Router = Router::new() const ITEM_ROUTER: Router = Router::new().delete(&API_METHOD_DELETE_DATASTORE_DISK);
.delete(&API_METHOD_DELETE_DATASTORE_DISK);
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_DATASTORE_MOUNTS) .get(&API_METHOD_LIST_DATASTORE_MOUNTS)
.post(&API_METHOD_CREATE_DATASTORE_DISK) .post(&API_METHOD_CREATE_DATASTORE_DISK)
.match_all("name", &ITEM_ROUTER); .match_all("name", &ITEM_ROUTER);
fn create_datastore_mount_unit( fn create_datastore_mount_unit(
datastore_name: &str, datastore_name: &str,
mount_point: &str, mount_point: &str,
fs_type: FileSystemType, fs_type: FileSystemType,
what: &str, what: &str,
) -> Result<String, Error> { ) -> Result<String, Error> {
let mut mount_unit_name = proxmox_sys::systemd::escape_unit(mount_point, true); let mut mount_unit_name = proxmox_sys::systemd::escape_unit(mount_point, true);
mount_unit_name.push_str(".mount"); mount_unit_name.push_str(".mount");
let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name); let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
let unit = SystemdUnitSection { let unit = SystemdUnitSection {
Description: format!("Mount datatstore '{}' under '{}'", datastore_name, mount_point), Description: format!(
"Mount datatstore '{}' under '{}'",
datastore_name, mount_point
),
..Default::default() ..Default::default()
}; };

View File

@ -1,25 +1,22 @@
use anyhow::{bail, Error}; use anyhow::{bail, Error};
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_router::{Router, RpcEnvironment, RpcEnvironmentType, Permission}; use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_sys::task_log; use proxmox_sys::task_log;
use pbs_api_types::{ use pbs_api_types::{
ZpoolListItem, ZfsRaidLevel, ZfsCompressionType, DataStoreConfig, DataStoreConfig, ZfsCompressionType, ZfsRaidLevel, ZpoolListItem, DATASTORE_SCHEMA,
NODE_SCHEMA, ZPOOL_NAME_SCHEMA, DATASTORE_SCHEMA, DISK_ARRAY_SCHEMA, DISK_ARRAY_SCHEMA, DISK_LIST_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, UPID_SCHEMA,
DISK_LIST_SCHEMA, ZFS_ASHIFT_SCHEMA, UPID_SCHEMA, ZFS_ASHIFT_SCHEMA, ZPOOL_NAME_SCHEMA,
PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
}; };
use crate::tools::disks::{ use crate::tools::disks::{
zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree, parse_zpool_status_config_tree, vdev_list_to_tree, zpool_list, zpool_status, DiskUsageType,
DiskUsageType,
}; };
use proxmox_rest_server::WorkerTask; use proxmox_rest_server::WorkerTask;
#[api( #[api(
protected: true, protected: true,
input: { input: {
@ -42,7 +39,6 @@ use proxmox_rest_server::WorkerTask;
)] )]
/// List zfs pools. /// List zfs pools.
pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> { pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> {
let data = zpool_list(None, false)?; let data = zpool_list(None, false)?;
let mut list = Vec::new(); let mut list = Vec::new();
@ -87,15 +83,12 @@ pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> {
}, },
)] )]
/// Get zpool status details. /// Get zpool status details.
pub fn zpool_details( pub fn zpool_details(name: String) -> Result<Value, Error> {
name: String,
) -> Result<Value, Error> {
let key_value_list = zpool_status(&name)?; let key_value_list = zpool_status(&name)?;
let config = match key_value_list.iter().find(|(k, _)| k == "config") { let config = match key_value_list.iter().find(|(k, _)| k == "config") {
Some((_, v)) => v, Some((_, v)) => v,
None => bail!("got zpool status without config key"), None => bail!("got zpool status without config key"),
}; };
let vdev_list = parse_zpool_status_config_tree(config)?; let vdev_list = parse_zpool_status_config_tree(config)?;
@ -107,11 +100,12 @@ pub fn zpool_details(
} }
} }
tree["name"] = tree.as_object_mut().unwrap() tree["name"] = tree
.as_object_mut()
.unwrap()
.remove("pool") .remove("pool")
.unwrap_or_else(|| name.into()); .unwrap_or_else(|| name.into());
Ok(tree) Ok(tree)
} }
@ -163,7 +157,6 @@ pub fn create_zpool(
add_datastore: Option<bool>, add_datastore: Option<bool>,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> { ) -> Result<String, Error> {
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let auth_id = rpcenv.get_auth_id().unwrap(); let auth_id = rpcenv.get_auth_id().unwrap();
@ -174,8 +167,12 @@ pub fn create_zpool(
let devices_text = devices.clone(); let devices_text = devices.clone();
let devices = DISK_ARRAY_SCHEMA.parse_property_string(&devices)?; let devices = DISK_ARRAY_SCHEMA.parse_property_string(&devices)?;
let devices: Vec<String> = devices.as_array().unwrap().iter() let devices: Vec<String> = devices
.map(|v| v.as_str().unwrap().to_string()).collect(); .as_array()
.unwrap()
.iter()
.map(|v| v.as_str().unwrap().to_string())
.collect();
let disk_map = crate::tools::disks::get_disks(None, true)?; let disk_map = crate::tools::disks::get_disks(None, true)?;
for disk in devices.iter() { for disk in devices.iter() {
@ -220,20 +217,35 @@ pub fn create_zpool(
let default_path = std::path::PathBuf::from(&mount_point); let default_path = std::path::PathBuf::from(&mount_point);
match std::fs::metadata(&default_path) { match std::fs::metadata(&default_path) {
Err(_) => {}, // path does not exist Err(_) => {} // path does not exist
Ok(_) => { Ok(_) => {
bail!("path {:?} already exists", default_path); bail!("path {:?} already exists", default_path);
} }
} }
let upid_str = WorkerTask::new_thread( let upid_str = WorkerTask::new_thread(
"zfscreate", Some(name.clone()), auth_id, to_stdout, move |worker| "zfscreate",
{ Some(name.clone()),
task_log!(worker, "create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text); auth_id,
to_stdout,
move |worker| {
task_log!(
worker,
"create {:?} zpool '{}' on devices '{}'",
raidlevel,
name,
devices_text
);
let mut command = std::process::Command::new("zpool"); let mut command = std::process::Command::new("zpool");
command.args(&["create", "-o", &format!("ashift={}", ashift), "-m", &mount_point, &name]); command.args(&[
"create",
"-o",
&format!("ashift={}", ashift),
"-m",
&mount_point,
&name,
]);
match raidlevel { match raidlevel {
ZfsRaidLevel::Single => { ZfsRaidLevel::Single => {
@ -244,10 +256,10 @@ pub fn create_zpool(
command.args(devices); command.args(devices);
} }
ZfsRaidLevel::Raid10 => { ZfsRaidLevel::Raid10 => {
devices.chunks(2).for_each(|pair| { devices.chunks(2).for_each(|pair| {
command.arg("mirror"); command.arg("mirror");
command.args(pair); command.args(pair);
}); });
} }
ZfsRaidLevel::RaidZ => { ZfsRaidLevel::RaidZ => {
command.arg("raidz"); command.arg("raidz");
@ -269,7 +281,10 @@ pub fn create_zpool(
task_log!(worker, "{}", output); task_log!(worker, "{}", output);
if std::path::Path::new("/lib/systemd/system/zfs-import@.service").exists() { if std::path::Path::new("/lib/systemd/system/zfs-import@.service").exists() {
let import_unit = format!("zfs-import@{}.service", proxmox_sys::systemd::escape_unit(&name, false)); let import_unit = format!(
"zfs-import@{}.service",
proxmox_sys::systemd::escape_unit(&name, false)
);
crate::tools::systemd::enable_unit(&import_unit)?; crate::tools::systemd::enable_unit(&import_unit)?;
} }
@ -294,17 +309,22 @@ pub fn create_zpool(
bail!("datastore '{}' already exists.", datastore.name); bail!("datastore '{}' already exists.", datastore.name);
} }
crate::api2::config::datastore::do_create_datastore(lock, config, datastore, Some(&worker))?; crate::api2::config::datastore::do_create_datastore(
lock,
config,
datastore,
Some(&worker),
)?;
} }
Ok(()) Ok(())
})?; },
)?;
Ok(upid_str) Ok(upid_str)
} }
pub const POOL_ROUTER: Router = Router::new() pub const POOL_ROUTER: Router = Router::new().get(&API_METHOD_ZPOOL_DETAILS);
.get(&API_METHOD_ZPOOL_DETAILS);
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_ZPOOLS) .get(&API_METHOD_LIST_ZPOOLS)

View File

@ -1,21 +1,21 @@
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use anyhow::{Error}; use ::serde::{Deserialize, Serialize};
use anyhow::Error;
use lazy_static::lazy_static; use lazy_static::lazy_static;
use openssl::sha; use openssl::sha;
use regex::Regex; use regex::Regex;
use serde_json::{json, Value}; use serde_json::{json, Value};
use ::serde::{Deserialize, Serialize};
use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission}; use pbs_api_types::{IPRE, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE};
use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_sys::fs::{file_get_contents, replace_file, CreateOptions}; use proxmox_sys::fs::{file_get_contents, replace_file, CreateOptions};
use pbs_api_types::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
use pbs_api_types::{ use pbs_api_types::{
PROXMOX_CONFIG_DIGEST_SCHEMA, FIRST_DNS_SERVER_SCHEMA, SECOND_DNS_SERVER_SCHEMA, FIRST_DNS_SERVER_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
THIRD_DNS_SERVER_SCHEMA, NODE_SCHEMA, SEARCH_DOMAIN_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA, SEARCH_DOMAIN_SCHEMA, SECOND_DNS_SERVER_SCHEMA,
PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, THIRD_DNS_SERVER_SCHEMA,
}; };
static RESOLV_CONF_FN: &str = "/etc/resolv.conf"; static RESOLV_CONF_FN: &str = "/etc/resolv.conf";
@ -34,7 +34,6 @@ pub enum DeletableProperty {
} }
pub fn read_etc_resolv_conf() -> Result<Value, Error> { pub fn read_etc_resolv_conf() -> Result<Value, Error> {
let mut result = json!({}); let mut result = json!({});
let mut nscount = 0; let mut nscount = 0;
@ -47,24 +46,27 @@ pub fn read_etc_resolv_conf() -> Result<Value, Error> {
lazy_static! { lazy_static! {
static ref DOMAIN_REGEX: Regex = Regex::new(r"^\s*(?:search|domain)\s+(\S+)\s*").unwrap(); static ref DOMAIN_REGEX: Regex = Regex::new(r"^\s*(?:search|domain)\s+(\S+)\s*").unwrap();
static ref SERVER_REGEX: Regex = Regex::new( static ref SERVER_REGEX: Regex =
concat!(r"^\s*nameserver\s+(", IPRE!(), r")\s*")).unwrap(); Regex::new(concat!(r"^\s*nameserver\s+(", IPRE!(), r")\s*")).unwrap();
} }
let mut options = String::new(); let mut options = String::new();
for line in data.lines() { for line in data.lines() {
if let Some(caps) = DOMAIN_REGEX.captures(line) { if let Some(caps) = DOMAIN_REGEX.captures(line) {
result["search"] = Value::from(&caps[1]); result["search"] = Value::from(&caps[1]);
} else if let Some(caps) = SERVER_REGEX.captures(line) { } else if let Some(caps) = SERVER_REGEX.captures(line) {
nscount += 1; nscount += 1;
if nscount > 3 { continue }; if nscount > 3 {
continue;
};
let nameserver = &caps[1]; let nameserver = &caps[1];
let id = format!("dns{}", nscount); let id = format!("dns{}", nscount);
result[id] = Value::from(nameserver); result[id] = Value::from(nameserver);
} else { } else {
if !options.is_empty() { options.push('\n'); } if !options.is_empty() {
options.push('\n');
}
options.push_str(line); options.push_str(line);
} }
} }
@ -127,7 +129,6 @@ pub fn update_dns(
delete: Option<Vec<DeletableProperty>>, delete: Option<Vec<DeletableProperty>>,
digest: Option<String>, digest: Option<String>,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
lazy_static! { lazy_static! {
static ref MUTEX: Arc<Mutex<()>> = Arc::new(Mutex::new(())); static ref MUTEX: Arc<Mutex<()>> = Arc::new(Mutex::new(()));
} }
@ -145,17 +146,31 @@ pub fn update_dns(
for delete_prop in delete { for delete_prop in delete {
let config = config.as_object_mut().unwrap(); let config = config.as_object_mut().unwrap();
match delete_prop { match delete_prop {
DeletableProperty::dns1 => { config.remove("dns1"); }, DeletableProperty::dns1 => {
DeletableProperty::dns2 => { config.remove("dns2"); }, config.remove("dns1");
DeletableProperty::dns3 => { config.remove("dns3"); }, }
DeletableProperty::dns2 => {
config.remove("dns2");
}
DeletableProperty::dns3 => {
config.remove("dns3");
}
} }
} }
} }
if let Some(search) = search { config["search"] = search.into(); } if let Some(search) = search {
if let Some(dns1) = dns1 { config["dns1"] = dns1.into(); } config["search"] = search.into();
if let Some(dns2) = dns2 { config["dns2"] = dns2.into(); } }
if let Some(dns3) = dns3 { config["dns3"] = dns3.into(); } if let Some(dns1) = dns1 {
config["dns1"] = dns1.into();
}
if let Some(dns2) = dns2 {
config["dns2"] = dns2.into();
}
if let Some(dns3) = dns3 {
config["dns3"] = dns3.into();
}
let mut data = String::new(); let mut data = String::new();
@ -219,7 +234,6 @@ pub fn get_dns(
_info: &ApiMethod, _info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment, _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
read_etc_resolv_conf() read_etc_resolv_conf()
} }

View File

@ -1,10 +1,10 @@
use std::process::{Command, Stdio}; use std::process::{Command, Stdio};
use anyhow::{Error}; use anyhow::Error;
use serde_json::{json, Value}; use serde_json::{json, Value};
use std::io::{BufRead,BufReader}; use std::io::{BufRead, BufReader};
use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission}; use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::api; use proxmox_schema::api;
use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_AUDIT}; use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_AUDIT};
@ -69,7 +69,6 @@ fn get_journal(
_info: &ApiMethod, _info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment, _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let mut args = vec![]; let mut args = vec![];
if let Some(lastentries) = lastentries { if let Some(lastentries) = lastentries {
@ -127,5 +126,4 @@ fn get_journal(
Ok(json!(lines)) Ok(json!(lines))
} }
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new().get(&API_METHOD_GET_JOURNAL);
.get(&API_METHOD_GET_JOURNAL);

View File

@ -12,23 +12,23 @@ use hyper::Request;
use serde_json::{json, Value}; use serde_json::{json, Value};
use tokio::io::{AsyncBufReadExt, BufReader}; use tokio::io::{AsyncBufReadExt, BufReader};
use proxmox_sys::sortable;
use proxmox_sys::fd::fd_change_cloexec; use proxmox_sys::fd::fd_change_cloexec;
use proxmox_sys::sortable;
use proxmox_http::websocket::WebSocket;
use proxmox_router::list_subdirs_api_method;
use proxmox_router::{ use proxmox_router::{
ApiHandler, ApiMethod, ApiResponseFuture, Permission, RpcEnvironment, Router, SubdirMap, ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment, SubdirMap,
}; };
use proxmox_schema::*; use proxmox_schema::*;
use proxmox_router::list_subdirs_api_method;
use proxmox_http::websocket::WebSocket;
use proxmox_rest_server::WorkerTask; use proxmox_rest_server::WorkerTask;
use pbs_api_types::{Authid, NODE_SCHEMA, PRIV_SYS_CONSOLE}; use pbs_api_types::{Authid, NODE_SCHEMA, PRIV_SYS_CONSOLE};
use pbs_tools::ticket::{self, Empty, Ticket}; use pbs_tools::ticket::{self, Empty, Ticket};
use crate::tools;
use crate::auth_helpers::private_auth_key; use crate::auth_helpers::private_auth_key;
use crate::tools;
pub mod apt; pub mod apt;
pub mod certificates; pub mod certificates;
@ -303,7 +303,7 @@ fn upgrade_to_websocket(
.map_err(Error::from) .map_err(Error::from)
.await .await
{ {
Ok(upgraded) => upgraded, Ok(upgraded) => upgraded,
_ => bail!("error"), _ => bail!("error"),
}; };

View File

@ -1,16 +1,16 @@
use anyhow::{Error, bail}; use anyhow::{bail, Error};
use serde::{Deserialize, Serialize};
use serde_json::{Value, to_value};
use hex::FromHex; use hex::FromHex;
use serde::{Deserialize, Serialize};
use serde_json::{to_value, Value};
use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission}; use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::api; use proxmox_schema::api;
use pbs_api_types::{ use pbs_api_types::{
Authid, Interface, NetworkInterfaceType, LinuxBondMode, NetworkConfigMethod, BondXmitHashPolicy, Authid, BondXmitHashPolicy, Interface, LinuxBondMode, NetworkConfigMethod,
NetworkInterfaceType, CIDR_V4_SCHEMA, CIDR_V6_SCHEMA, IP_V4_SCHEMA, IP_V6_SCHEMA,
NETWORK_INTERFACE_ARRAY_SCHEMA, NETWORK_INTERFACE_LIST_SCHEMA, NETWORK_INTERFACE_NAME_SCHEMA, NETWORK_INTERFACE_ARRAY_SCHEMA, NETWORK_INTERFACE_LIST_SCHEMA, NETWORK_INTERFACE_NAME_SCHEMA,
CIDR_V4_SCHEMA, CIDR_V6_SCHEMA, IP_V4_SCHEMA, IP_V6_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
}; };
use pbs_config::network::{self, NetworkConfig}; use pbs_config::network::{self, NetworkConfig};
@ -18,41 +18,57 @@ use proxmox_rest_server::WorkerTask;
fn split_interface_list(list: &str) -> Result<Vec<String>, Error> { fn split_interface_list(list: &str) -> Result<Vec<String>, Error> {
let value = NETWORK_INTERFACE_ARRAY_SCHEMA.parse_property_string(list)?; let value = NETWORK_INTERFACE_ARRAY_SCHEMA.parse_property_string(list)?;
Ok(value.as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_string()).collect()) Ok(value
.as_array()
.unwrap()
.iter()
.map(|v| v.as_str().unwrap().to_string())
.collect())
} }
fn check_duplicate_gateway_v4(config: &NetworkConfig, iface: &str) -> Result<(), Error> { fn check_duplicate_gateway_v4(config: &NetworkConfig, iface: &str) -> Result<(), Error> {
let current_gateway_v4 = config
let current_gateway_v4 = config.interfaces.iter() .interfaces
.iter()
.find(|(_, interface)| interface.gateway.is_some()) .find(|(_, interface)| interface.gateway.is_some())
.map(|(name, _)| name.to_string()); .map(|(name, _)| name.to_string());
if let Some(current_gateway_v4) = current_gateway_v4 { if let Some(current_gateway_v4) = current_gateway_v4 {
if current_gateway_v4 != iface { if current_gateway_v4 != iface {
bail!("Default IPv4 gateway already exists on interface '{}'", current_gateway_v4); bail!(
"Default IPv4 gateway already exists on interface '{}'",
current_gateway_v4
);
} }
} }
Ok(()) Ok(())
} }
fn check_duplicate_gateway_v6(config: &NetworkConfig, iface: &str) -> Result<(), Error> { fn check_duplicate_gateway_v6(config: &NetworkConfig, iface: &str) -> Result<(), Error> {
let current_gateway_v6 = config
let current_gateway_v6 = config.interfaces.iter() .interfaces
.iter()
.find(|(_, interface)| interface.gateway6.is_some()) .find(|(_, interface)| interface.gateway6.is_some())
.map(|(name, _)| name.to_string()); .map(|(name, _)| name.to_string());
if let Some(current_gateway_v6) = current_gateway_v6 { if let Some(current_gateway_v6) = current_gateway_v6 {
if current_gateway_v6 != iface { if current_gateway_v6 != iface {
bail!("Default IPv6 gateway already exists on interface '{}'", current_gateway_v6); bail!(
"Default IPv6 gateway already exists on interface '{}'",
current_gateway_v6
);
} }
} }
Ok(()) Ok(())
} }
fn set_bridge_ports(iface: &mut Interface, ports: Vec<String>) -> Result<(), Error> { fn set_bridge_ports(iface: &mut Interface, ports: Vec<String>) -> Result<(), Error> {
if iface.interface_type != NetworkInterfaceType::Bridge { if iface.interface_type != NetworkInterfaceType::Bridge {
bail!("interface '{}' is no bridge (type is {:?})", iface.name, iface.interface_type); bail!(
"interface '{}' is no bridge (type is {:?})",
iface.name,
iface.interface_type
);
} }
iface.bridge_ports = Some(ports); iface.bridge_ports = Some(ports);
Ok(()) Ok(())
@ -60,7 +76,11 @@ fn set_bridge_ports(iface: &mut Interface, ports: Vec<String>) -> Result<(), Err
fn set_bond_slaves(iface: &mut Interface, slaves: Vec<String>) -> Result<(), Error> { fn set_bond_slaves(iface: &mut Interface, slaves: Vec<String>) -> Result<(), Error> {
if iface.interface_type != NetworkInterfaceType::Bond { if iface.interface_type != NetworkInterfaceType::Bond {
bail!("interface '{}' is no bond (type is {:?})", iface.name, iface.interface_type); bail!(
"interface '{}' is no bond (type is {:?})",
iface.name,
iface.interface_type
);
} }
iface.slaves = Some(slaves); iface.slaves = Some(slaves);
Ok(()) Ok(())
@ -91,14 +111,15 @@ pub fn list_network_devices(
_info: &ApiMethod, _info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment, mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let (config, digest) = network::config()?; let (config, digest) = network::config()?;
let digest = hex::encode(&digest); let digest = hex::encode(&digest);
let mut list = Vec::new(); let mut list = Vec::new();
for (iface, interface) in config.interfaces.iter() { for (iface, interface) in config.interfaces.iter() {
if iface == "lo" { continue; } // do not list lo if iface == "lo" {
continue;
} // do not list lo
let mut item: Value = to_value(interface)?; let mut item: Value = to_value(interface)?;
item["digest"] = digest.clone().into(); item["digest"] = digest.clone().into();
item["iface"] = iface.to_string().into(); item["iface"] = iface.to_string().into();
@ -131,7 +152,6 @@ pub fn list_network_devices(
)] )]
/// Read a network interface configuration. /// Read a network interface configuration.
pub fn read_interface(iface: String) -> Result<Value, Error> { pub fn read_interface(iface: String) -> Result<Value, Error> {
let (config, digest) = network::config()?; let (config, digest) = network::config()?;
let interface = config.lookup(&iface)?; let interface = config.lookup(&iface)?;
@ -142,7 +162,6 @@ pub fn read_interface(iface: String) -> Result<Value, Error> {
Ok(data) Ok(data)
} }
#[api( #[api(
protected: true, protected: true,
input: { input: {
@ -256,7 +275,6 @@ pub fn create_interface(
slaves: Option<String>, slaves: Option<String>,
param: Value, param: Value,
) -> Result<(), Error> { ) -> Result<(), Error> {
let interface_type = pbs_tools::json::required_string_param(&param, "type")?; let interface_type = pbs_tools::json::required_string_param(&param, "type")?;
let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.into())?; let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.into())?;
@ -271,35 +289,55 @@ pub fn create_interface(
let mut interface = Interface::new(iface.clone()); let mut interface = Interface::new(iface.clone());
interface.interface_type = interface_type; interface.interface_type = interface_type;
if let Some(autostart) = autostart { interface.autostart = autostart; } if let Some(autostart) = autostart {
if method.is_some() { interface.method = method; } interface.autostart = autostart;
if method6.is_some() { interface.method6 = method6; } }
if mtu.is_some() { interface.mtu = mtu; } if method.is_some() {
if comments.is_some() { interface.comments = comments; } interface.method = method;
if comments6.is_some() { interface.comments6 = comments6; } }
if method6.is_some() {
interface.method6 = method6;
}
if mtu.is_some() {
interface.mtu = mtu;
}
if comments.is_some() {
interface.comments = comments;
}
if comments6.is_some() {
interface.comments6 = comments6;
}
if let Some(cidr) = cidr { if let Some(cidr) = cidr {
let (_, _, is_v6) = network::parse_cidr(&cidr)?; let (_, _, is_v6) = network::parse_cidr(&cidr)?;
if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); } if is_v6 {
bail!("invalid address type (expected IPv4, got IPv6)");
}
interface.cidr = Some(cidr); interface.cidr = Some(cidr);
} }
if let Some(cidr6) = cidr6 { if let Some(cidr6) = cidr6 {
let (_, _, is_v6) = network::parse_cidr(&cidr6)?; let (_, _, is_v6) = network::parse_cidr(&cidr6)?;
if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); } if !is_v6 {
bail!("invalid address type (expected IPv6, got IPv4)");
}
interface.cidr6 = Some(cidr6); interface.cidr6 = Some(cidr6);
} }
if let Some(gateway) = gateway { if let Some(gateway) = gateway {
let is_v6 = gateway.contains(':'); let is_v6 = gateway.contains(':');
if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); } if is_v6 {
bail!("invalid address type (expected IPv4, got IPv6)");
}
check_duplicate_gateway_v4(&config, &iface)?; check_duplicate_gateway_v4(&config, &iface)?;
interface.gateway = Some(gateway); interface.gateway = Some(gateway);
} }
if let Some(gateway6) = gateway6 { if let Some(gateway6) = gateway6 {
let is_v6 = gateway6.contains(':'); let is_v6 = gateway6.contains(':');
if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); } if !is_v6 {
bail!("invalid address type (expected IPv6, got IPv4)");
}
check_duplicate_gateway_v6(&config, &iface)?; check_duplicate_gateway_v6(&config, &iface)?;
interface.gateway6 = Some(gateway6); interface.gateway6 = Some(gateway6);
} }
@ -310,7 +348,9 @@ pub fn create_interface(
let ports = split_interface_list(&ports)?; let ports = split_interface_list(&ports)?;
set_bridge_ports(&mut interface, ports)?; set_bridge_ports(&mut interface, ports)?;
} }
if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; } if bridge_vlan_aware.is_some() {
interface.bridge_vlan_aware = bridge_vlan_aware;
}
} }
NetworkInterfaceType::Bond => { NetworkInterfaceType::Bond => {
if let Some(mode) = bond_mode { if let Some(mode) = bond_mode {
@ -322,9 +362,7 @@ pub fn create_interface(
interface.bond_primary = bond_primary; interface.bond_primary = bond_primary;
} }
if bond_xmit_hash_policy.is_some() { if bond_xmit_hash_policy.is_some() {
if mode != LinuxBondMode::ieee802_3ad && if mode != LinuxBondMode::ieee802_3ad && mode != LinuxBondMode::balance_xor {
mode != LinuxBondMode::balance_xor
{
bail!("bond_xmit_hash_policy is only valid with LACP(802.3ad) or balance-xor mode"); bail!("bond_xmit_hash_policy is only valid with LACP(802.3ad) or balance-xor mode");
} }
interface.bond_xmit_hash_policy = bond_xmit_hash_policy; interface.bond_xmit_hash_policy = bond_xmit_hash_policy;
@ -335,7 +373,10 @@ pub fn create_interface(
set_bond_slaves(&mut interface, slaves)?; set_bond_slaves(&mut interface, slaves)?;
} }
} }
_ => bail!("creating network interface type '{:?}' is not supported", interface_type), _ => bail!(
"creating network interface type '{:?}' is not supported",
interface_type
),
} }
if interface.cidr.is_some() || interface.gateway.is_some() { if interface.cidr.is_some() || interface.gateway.is_some() {
@ -395,7 +436,6 @@ pub enum DeletableProperty {
bond_xmit_hash_policy, bond_xmit_hash_policy,
} }
#[api( #[api(
protected: true, protected: true,
input: { input: {
@ -523,7 +563,6 @@ pub fn update_interface(
digest: Option<String>, digest: Option<String>,
param: Value, param: Value,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _lock = network::lock_config()?; let _lock = network::lock_config()?;
let (mut config, expected_digest) = network::config()?; let (mut config, expected_digest) = network::config()?;
@ -533,49 +572,95 @@ pub fn update_interface(
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
} }
if gateway.is_some() { check_duplicate_gateway_v4(&config, &iface)?; } if gateway.is_some() {
if gateway6.is_some() { check_duplicate_gateway_v6(&config, &iface)?; } check_duplicate_gateway_v4(&config, &iface)?;
}
if gateway6.is_some() {
check_duplicate_gateway_v6(&config, &iface)?;
}
let interface = config.lookup_mut(&iface)?; let interface = config.lookup_mut(&iface)?;
if let Some(interface_type) = param.get("type") { if let Some(interface_type) = param.get("type") {
let interface_type = NetworkInterfaceType::deserialize(interface_type)?; let interface_type = NetworkInterfaceType::deserialize(interface_type)?;
if interface_type != interface.interface_type { if interface_type != interface.interface_type {
bail!("got unexpected interface type ({:?} != {:?})", interface_type, interface.interface_type); bail!(
"got unexpected interface type ({:?} != {:?})",
interface_type,
interface.interface_type
);
} }
} }
if let Some(delete) = delete { if let Some(delete) = delete {
for delete_prop in delete { for delete_prop in delete {
match delete_prop { match delete_prop {
DeletableProperty::cidr => { interface.cidr = None; }, DeletableProperty::cidr => {
DeletableProperty::cidr6 => { interface.cidr6 = None; }, interface.cidr = None;
DeletableProperty::gateway => { interface.gateway = None; }, }
DeletableProperty::gateway6 => { interface.gateway6 = None; }, DeletableProperty::cidr6 => {
DeletableProperty::method => { interface.method = None; }, interface.cidr6 = None;
DeletableProperty::method6 => { interface.method6 = None; }, }
DeletableProperty::comments => { interface.comments = None; }, DeletableProperty::gateway => {
DeletableProperty::comments6 => { interface.comments6 = None; }, interface.gateway = None;
DeletableProperty::mtu => { interface.mtu = None; }, }
DeletableProperty::autostart => { interface.autostart = false; }, DeletableProperty::gateway6 => {
DeletableProperty::bridge_ports => { set_bridge_ports(interface, Vec::new())?; } interface.gateway6 = None;
DeletableProperty::bridge_vlan_aware => { interface.bridge_vlan_aware = None; } }
DeletableProperty::slaves => { set_bond_slaves(interface, Vec::new())?; } DeletableProperty::method => {
DeletableProperty::bond_primary => { interface.bond_primary = None; } interface.method = None;
DeletableProperty::bond_xmit_hash_policy => { interface.bond_xmit_hash_policy = None } }
DeletableProperty::method6 => {
interface.method6 = None;
}
DeletableProperty::comments => {
interface.comments = None;
}
DeletableProperty::comments6 => {
interface.comments6 = None;
}
DeletableProperty::mtu => {
interface.mtu = None;
}
DeletableProperty::autostart => {
interface.autostart = false;
}
DeletableProperty::bridge_ports => {
set_bridge_ports(interface, Vec::new())?;
}
DeletableProperty::bridge_vlan_aware => {
interface.bridge_vlan_aware = None;
}
DeletableProperty::slaves => {
set_bond_slaves(interface, Vec::new())?;
}
DeletableProperty::bond_primary => {
interface.bond_primary = None;
}
DeletableProperty::bond_xmit_hash_policy => interface.bond_xmit_hash_policy = None,
} }
} }
} }
if let Some(autostart) = autostart { interface.autostart = autostart; } if let Some(autostart) = autostart {
if method.is_some() { interface.method = method; } interface.autostart = autostart;
if method6.is_some() { interface.method6 = method6; } }
if mtu.is_some() { interface.mtu = mtu; } if method.is_some() {
interface.method = method;
}
if method6.is_some() {
interface.method6 = method6;
}
if mtu.is_some() {
interface.mtu = mtu;
}
if let Some(ports) = bridge_ports { if let Some(ports) = bridge_ports {
let ports = split_interface_list(&ports)?; let ports = split_interface_list(&ports)?;
set_bridge_ports(interface, ports)?; set_bridge_ports(interface, ports)?;
} }
if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; } if bridge_vlan_aware.is_some() {
interface.bridge_vlan_aware = bridge_vlan_aware;
}
if let Some(slaves) = slaves { if let Some(slaves) = slaves {
let slaves = split_interface_list(&slaves)?; let slaves = split_interface_list(&slaves)?;
set_bond_slaves(interface, slaves)?; set_bond_slaves(interface, slaves)?;
@ -589,9 +674,7 @@ pub fn update_interface(
interface.bond_primary = bond_primary; interface.bond_primary = bond_primary;
} }
if bond_xmit_hash_policy.is_some() { if bond_xmit_hash_policy.is_some() {
if mode != LinuxBondMode::ieee802_3ad && if mode != LinuxBondMode::ieee802_3ad && mode != LinuxBondMode::balance_xor {
mode != LinuxBondMode::balance_xor
{
bail!("bond_xmit_hash_policy is only valid with LACP(802.3ad) or balance-xor mode"); bail!("bond_xmit_hash_policy is only valid with LACP(802.3ad) or balance-xor mode");
} }
interface.bond_xmit_hash_policy = bond_xmit_hash_policy; interface.bond_xmit_hash_policy = bond_xmit_hash_policy;
@ -600,30 +683,42 @@ pub fn update_interface(
if let Some(cidr) = cidr { if let Some(cidr) = cidr {
let (_, _, is_v6) = network::parse_cidr(&cidr)?; let (_, _, is_v6) = network::parse_cidr(&cidr)?;
if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); } if is_v6 {
bail!("invalid address type (expected IPv4, got IPv6)");
}
interface.cidr = Some(cidr); interface.cidr = Some(cidr);
} }
if let Some(cidr6) = cidr6 { if let Some(cidr6) = cidr6 {
let (_, _, is_v6) = network::parse_cidr(&cidr6)?; let (_, _, is_v6) = network::parse_cidr(&cidr6)?;
if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); } if !is_v6 {
bail!("invalid address type (expected IPv6, got IPv4)");
}
interface.cidr6 = Some(cidr6); interface.cidr6 = Some(cidr6);
} }
if let Some(gateway) = gateway { if let Some(gateway) = gateway {
let is_v6 = gateway.contains(':'); let is_v6 = gateway.contains(':');
if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); } if is_v6 {
bail!("invalid address type (expected IPv4, got IPv6)");
}
interface.gateway = Some(gateway); interface.gateway = Some(gateway);
} }
if let Some(gateway6) = gateway6 { if let Some(gateway6) = gateway6 {
let is_v6 = gateway6.contains(':'); let is_v6 = gateway6.contains(':');
if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); } if !is_v6 {
bail!("invalid address type (expected IPv6, got IPv4)");
}
interface.gateway6 = Some(gateway6); interface.gateway6 = Some(gateway6);
} }
if comments.is_some() { interface.comments = comments; } if comments.is_some() {
if comments6.is_some() { interface.comments6 = comments6; } interface.comments = comments;
}
if comments6.is_some() {
interface.comments6 = comments6;
}
if interface.cidr.is_some() || interface.gateway.is_some() { if interface.cidr.is_some() || interface.gateway.is_some() {
interface.method = Some(NetworkConfigMethod::Static); interface.method = Some(NetworkConfigMethod::Static);
@ -696,21 +791,26 @@ pub fn delete_interface(iface: String, digest: Option<String>) -> Result<(), Err
}, },
)] )]
/// Reload network configuration (requires ifupdown2). /// Reload network configuration (requires ifupdown2).
pub async fn reload_network_config( pub async fn reload_network_config(rpcenv: &mut dyn RpcEnvironment) -> Result<String, Error> {
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
network::assert_ifupdown2_installed()?; network::assert_ifupdown2_installed()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), auth_id.to_string(), true, |_worker| async { let upid_str = WorkerTask::spawn(
"srvreload",
Some(String::from("networking")),
auth_id.to_string(),
true,
|_worker| async {
let _ = std::fs::rename(
network::NETWORK_INTERFACES_NEW_FILENAME,
network::NETWORK_INTERFACES_FILENAME,
);
let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME); network::network_reload()?;
Ok(())
network::network_reload()?; },
Ok(()) )?;
})?;
Ok(upid_str) Ok(upid_str)
} }
@ -730,7 +830,6 @@ pub async fn reload_network_config(
)] )]
/// Revert network configuration (rm /etc/network/interfaces.new). /// Revert network configuration (rm /etc/network/interfaces.new).
pub fn revert_network_config() -> Result<(), Error> { pub fn revert_network_config() -> Result<(), Error> {
let _ = std::fs::remove_file(network::NETWORK_INTERFACES_NEW_FILENAME); let _ = std::fs::remove_file(network::NETWORK_INTERFACES_NEW_FILENAME);
Ok(()) Ok(())

View File

@ -33,5 +33,4 @@ fn get_report(
Ok(json!(generate_report())) Ok(json!(generate_report()))
} }
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new().get(&API_METHOD_GET_REPORT);
.get(&API_METHOD_GET_REPORT);

View File

@ -1,13 +1,11 @@
use anyhow::{bail, Error}; use anyhow::{bail, Error};
use serde_json::{Value, json}; use serde_json::{json, Value};
use std::collections::BTreeMap; use std::collections::BTreeMap;
use proxmox_router::{Permission, Router}; use proxmox_router::{Permission, Router};
use proxmox_schema::api; use proxmox_schema::api;
use pbs_api_types::{ use pbs_api_types::{RRDMode, RRDTimeFrame, NODE_SCHEMA, PRIV_SYS_AUDIT};
NODE_SCHEMA, RRDMode, RRDTimeFrame, PRIV_SYS_AUDIT,
};
use crate::rrd_cache::extract_rrd_data; use crate::rrd_cache::extract_rrd_data;
@ -17,7 +15,6 @@ pub fn create_value_from_rrd(
timeframe: RRDTimeFrame, timeframe: RRDTimeFrame,
mode: RRDMode, mode: RRDMode,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let mut result: Vec<Value> = Vec::new(); let mut result: Vec<Value> = Vec::new();
let mut timemap = BTreeMap::new(); let mut timemap = BTreeMap::new();
@ -30,9 +27,13 @@ pub fn create_value_from_rrd(
None => continue, None => continue,
}; };
if let Some(expected_resolution) = last_resolution { if let Some(expected_resolution) = last_resolution {
if reso != expected_resolution { if reso != expected_resolution {
bail!("got unexpected RRD resolution ({} != {})", reso, expected_resolution); bail!(
"got unexpected RRD resolution ({} != {})",
reso,
expected_resolution
);
} }
} else { } else {
last_resolution = Some(reso); last_resolution = Some(reso);
@ -75,29 +76,30 @@ pub fn create_value_from_rrd(
}, },
)] )]
/// Read node stats /// Read node stats
fn get_node_stats( fn get_node_stats(timeframe: RRDTimeFrame, cf: RRDMode, _param: Value) -> Result<Value, Error> {
timeframe: RRDTimeFrame,
cf: RRDMode,
_param: Value,
) -> Result<Value, Error> {
create_value_from_rrd( create_value_from_rrd(
"host", "host",
&[ &[
"cpu", "iowait", "cpu",
"memtotal", "memused", "iowait",
"swaptotal", "swapused", "memtotal",
"netin", "netout", "memused",
"swaptotal",
"swapused",
"netin",
"netout",
"loadavg", "loadavg",
"total", "used", "total",
"read_ios", "read_bytes", "used",
"write_ios", "write_bytes", "read_ios",
"read_bytes",
"write_ios",
"write_bytes",
"io_ticks", "io_ticks",
], ],
timeframe, timeframe,
cf, cf,
) )
} }
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new().get(&API_METHOD_GET_NODE_STATS);
.get(&API_METHOD_GET_NODE_STATS);

View File

@ -3,11 +3,11 @@ use std::process::{Command, Stdio};
use anyhow::{bail, Error}; use anyhow::{bail, Error};
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_sys::sortable; use proxmox_router::{list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap};
use proxmox_router::{list_subdirs_api_method, Router, Permission, RpcEnvironment, SubdirMap};
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_sys::sortable;
use pbs_api_types::{Authid, NODE_SCHEMA, SERVICE_ID_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY}; use pbs_api_types::{Authid, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, SERVICE_ID_SCHEMA};
use proxmox_rest_server::WorkerTask; use proxmox_rest_server::WorkerTask;
@ -22,7 +22,6 @@ static SERVICE_NAME_LIST: [&str; 7] = [
]; ];
pub fn real_service_name(service: &str) -> &str { pub fn real_service_name(service: &str) -> &str {
// since postfix package 3.1.0-3.1 the postfix unit is only here // since postfix package 3.1.0-3.1 the postfix unit is only here
// to manage subinstances, of which the default is called "-". // to manage subinstances, of which the default is called "-".
// This is where we look for the daemon status // This is where we look for the daemon status
@ -35,7 +34,6 @@ pub fn real_service_name(service: &str) -> &str {
} }
fn get_full_service_state(service: &str) -> Result<Value, Error> { fn get_full_service_state(service: &str) -> Result<Value, Error> {
let real_service_name = real_service_name(service); let real_service_name = real_service_name(service);
let mut child = Command::new("systemctl") let mut child = Command::new("systemctl")
@ -43,7 +41,7 @@ fn get_full_service_state(service: &str) -> Result<Value, Error> {
.stdout(Stdio::piped()) .stdout(Stdio::piped())
.spawn()?; .spawn()?;
use std::io::{BufRead,BufReader}; use std::io::{BufRead, BufReader};
let mut result = json!({}); let mut result = json!({});
@ -76,7 +74,6 @@ fn get_full_service_state(service: &str) -> Result<Value, Error> {
} }
fn json_service_state(service: &str, status: Value) -> Value { fn json_service_state(service: &str, status: Value) -> Value {
if let Some(desc) = status["Description"].as_str() { if let Some(desc) = status["Description"].as_str() {
let name = status["Name"].as_str().unwrap_or(service); let name = status["Name"].as_str().unwrap_or(service);
let state = status["SubState"].as_str().unwrap_or("unknown"); let state = status["SubState"].as_str().unwrap_or("unknown");
@ -128,10 +125,7 @@ fn json_service_state(service: &str, status: Value) -> Value {
}, },
)] )]
/// Service list. /// Service list.
fn list_services( fn list_services(_param: Value) -> Result<Value, Error> {
_param: Value,
) -> Result<Value, Error> {
let mut list = vec![]; let mut list = vec![];
for service in &SERVICE_NAME_LIST { for service in &SERVICE_NAME_LIST {
@ -165,11 +159,7 @@ fn list_services(
}, },
)] )]
/// Read service properties. /// Read service properties.
fn get_service_state( fn get_service_state(service: String, _param: Value) -> Result<Value, Error> {
service: String,
_param: Value,
) -> Result<Value, Error> {
let service = service.as_str(); let service = service.as_str();
if !SERVICE_NAME_LIST.contains(&service) { if !SERVICE_NAME_LIST.contains(&service) {
@ -182,11 +172,10 @@ fn get_service_state(
} }
fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Value, Error> { fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Value, Error> {
let workerid = format!("srv{}", &cmd); let workerid = format!("srv{}", &cmd);
let cmd = match cmd { let cmd = match cmd {
"start"|"stop"|"restart"=> cmd.to_string(), "start" | "stop" | "restart" => cmd.to_string(),
"reload" => "try-reload-or-restart".to_string(), // some services do not implement reload "reload" => "try-reload-or-restart".to_string(), // some services do not implement reload
_ => bail!("unknown service command '{}'", cmd), _ => bail!("unknown service command '{}'", cmd),
}; };
@ -198,9 +187,12 @@ fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Valu
auth_id.to_string(), auth_id.to_string(),
false, false,
move |_worker| { move |_worker| {
if service == "proxmox-backup" && cmd == "stop" { if service == "proxmox-backup" && cmd == "stop" {
bail!("invalid service cmd '{} {}' cannot stop essential service!", service, cmd); bail!(
"invalid service cmd '{} {}' cannot stop essential service!",
service,
cmd
);
} }
let real_service_name = real_service_name(&service); let real_service_name = real_service_name(&service);
@ -214,7 +206,7 @@ fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Valu
} }
Ok(()) Ok(())
} },
)?; )?;
Ok(upid.into()) Ok(upid.into())
@ -242,7 +234,6 @@ fn start_service(
_param: Value, _param: Value,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("starting service {}", service); log::info!("starting service {}", service);
@ -271,8 +262,7 @@ fn stop_service(
service: String, service: String,
_param: Value, _param: Value,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("stopping service {}", service); log::info!("stopping service {}", service);
@ -302,7 +292,6 @@ fn restart_service(
_param: Value, _param: Value,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("re-starting service {}", service); log::info!("re-starting service {}", service);
@ -337,7 +326,6 @@ fn reload_service(
_param: Value, _param: Value,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("reloading service {}", service); log::info!("reloading service {}", service);
@ -347,26 +335,11 @@ fn reload_service(
#[sortable] #[sortable]
const SERVICE_SUBDIRS: SubdirMap = &sorted!([ const SERVICE_SUBDIRS: SubdirMap = &sorted!([
( ("reload", &Router::new().post(&API_METHOD_RELOAD_SERVICE)),
"reload", &Router::new() ("restart", &Router::new().post(&API_METHOD_RESTART_SERVICE)),
.post(&API_METHOD_RELOAD_SERVICE) ("start", &Router::new().post(&API_METHOD_START_SERVICE)),
), ("state", &Router::new().get(&API_METHOD_GET_SERVICE_STATE)),
( ("stop", &Router::new().post(&API_METHOD_STOP_SERVICE)),
"restart", &Router::new()
.post(&API_METHOD_RESTART_SERVICE)
),
(
"start", &Router::new()
.post(&API_METHOD_START_SERVICE)
),
(
"state", &Router::new()
.get(&API_METHOD_GET_SERVICE_STATE)
),
(
"stop", &Router::new()
.post(&API_METHOD_STOP_SERVICE)
),
]); ]);
const SERVICE_ROUTER: Router = Router::new() const SERVICE_ROUTER: Router = Router::new()

View File

@ -1,18 +1,18 @@
use std::process::Command;
use std::path::Path; use std::path::Path;
use std::process::Command;
use anyhow::{Error, format_err, bail}; use anyhow::{bail, format_err, Error};
use serde_json::Value; use serde_json::Value;
use proxmox_sys::linux::procfs; use proxmox_sys::linux::procfs;
use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission}; use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::api; use proxmox_schema::api;
use pbs_api_types::{NODE_SCHEMA, NodePowerCommand, PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT}; use pbs_api_types::{NodePowerCommand, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
use crate::api2::types::{ use crate::api2::types::{
NodeCpuInformation, NodeStatus, NodeMemoryCounters, NodeSwapCounters, NodeInformation, NodeCpuInformation, NodeInformation, NodeMemoryCounters, NodeStatus, NodeSwapCounters,
}; };
impl std::convert::From<procfs::ProcFsCPUInfo> for NodeCpuInformation { impl std::convert::From<procfs::ProcFsCPUInfo> for NodeCpuInformation {
@ -111,7 +111,6 @@ fn get_status(
)] )]
/// Reboot or shutdown the node. /// Reboot or shutdown the node.
fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> { fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> {
let systemctl_command = match command { let systemctl_command = match command {
NodePowerCommand::Reboot => "reboot", NodePowerCommand::Reboot => "reboot",
NodePowerCommand::Shutdown => "poweroff", NodePowerCommand::Shutdown => "poweroff",
@ -126,7 +125,13 @@ fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> {
match output.status.code() { match output.status.code() {
Some(code) => { Some(code) => {
let msg = String::from_utf8(output.stderr) let msg = String::from_utf8(output.stderr)
.map(|m| if m.is_empty() { String::from("no error message") } else { m }) .map(|m| {
if m.is_empty() {
String::from("no error message")
} else {
m
}
})
.unwrap_or_else(|_| String::from("non utf8 error message (suppressed)")); .unwrap_or_else(|_| String::from("non utf8 error message (suppressed)"));
bail!("diff failed with status code: {} - {}", code, msg); bail!("diff failed with status code: {} - {}", code, msg);
} }

View File

@ -1,16 +1,15 @@
use anyhow::{Error, format_err, bail}; use anyhow::{bail, format_err, Error};
use serde_json::Value; use serde_json::Value;
use proxmox_router::{Router, RpcEnvironment, Permission}; use proxmox_router::{Permission, Router, RpcEnvironment};
use proxmox_schema::api; use proxmox_schema::api;
use pbs_api_types::{ use pbs_api_types::{
NODE_SCHEMA, SUBSCRIPTION_KEY_SCHEMA, Authid, Authid, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, SUBSCRIPTION_KEY_SCHEMA,
PRIV_SYS_AUDIT,PRIV_SYS_MODIFY,
}; };
use crate::tools; use crate::tools;
use crate::tools::subscription::{self, SubscriptionStatus, SubscriptionInfo}; use crate::tools::subscription::{self, SubscriptionInfo, SubscriptionStatus};
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
#[api( #[api(
@ -33,9 +32,7 @@ use pbs_config::CachedUserInfo;
}, },
)] )]
/// Check and update subscription status. /// Check and update subscription status.
pub fn check_subscription( pub fn check_subscription(force: bool) -> Result<(), Error> {
force: bool,
) -> Result<(), Error> {
let info = match subscription::read_subscription() { let info = match subscription::read_subscription() {
Err(err) => bail!("could not read subscription status: {}", err), Err(err) => bail!("could not read subscription status: {}", err),
Ok(Some(info)) => info, Ok(Some(info)) => info,
@ -93,7 +90,7 @@ pub fn get_subscription(
status: SubscriptionStatus::NOTFOUND, status: SubscriptionStatus::NOTFOUND,
message: Some("There is no subscription key".into()), message: Some("There is no subscription key".into()),
serverid: Some(tools::get_hardware_address()?), serverid: Some(tools::get_hardware_address()?),
url: Some(url.into()), url: Some(url.into()),
..Default::default() ..Default::default()
}, },
}; };
@ -132,10 +129,7 @@ pub fn get_subscription(
}, },
)] )]
/// Set a subscription key and check it. /// Set a subscription key and check it.
pub fn set_subscription( pub fn set_subscription(key: String) -> Result<(), Error> {
key: String,
) -> Result<(), Error> {
let server_id = tools::get_hardware_address()?; let server_id = tools::get_hardware_address()?;
let info = subscription::check_subscription(key, server_id)?; let info = subscription::check_subscription(key, server_id)?;
@ -161,7 +155,6 @@ pub fn set_subscription(
)] )]
/// Delete subscription info. /// Delete subscription info.
pub fn delete_subscription() -> Result<(), Error> { pub fn delete_subscription() -> Result<(), Error> {
subscription::delete_subscription() subscription::delete_subscription()
.map_err(|err| format_err!("Deleting subscription failed: {}", err))?; .map_err(|err| format_err!("Deleting subscription failed: {}", err))?;

View File

@ -1,12 +1,12 @@
use std::process::{Command, Stdio}; use std::process::{Command, Stdio};
use anyhow::{Error}; use anyhow::Error;
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission}; use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::api; use proxmox_schema::api;
use pbs_api_types::{NODE_SCHEMA, SYSTEMD_DATETIME_FORMAT, PRIV_SYS_AUDIT}; use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_AUDIT, SYSTEMD_DATETIME_FORMAT};
fn dump_journal( fn dump_journal(
start: Option<u64>, start: Option<u64>,
@ -15,12 +15,17 @@ fn dump_journal(
until: Option<&str>, until: Option<&str>,
service: Option<&str>, service: Option<&str>,
) -> Result<(u64, Vec<Value>), Error> { ) -> Result<(u64, Vec<Value>), Error> {
let mut args = vec!["-o", "short", "--no-pager"]; let mut args = vec!["-o", "short", "--no-pager"];
if let Some(service) = service { args.extend(&["--unit", service]); } if let Some(service) = service {
if let Some(since) = since { args.extend(&["--since", since]); } args.extend(&["--unit", service]);
if let Some(until) = until { args.extend(&["--until", until]); } }
if let Some(since) = since {
args.extend(&["--since", since]);
}
if let Some(until) = until {
args.extend(&["--until", until]);
}
let mut lines: Vec<Value> = vec![]; let mut lines: Vec<Value> = vec![];
let mut limit = limit.unwrap_or(50); let mut limit = limit.unwrap_or(50);
@ -32,15 +37,19 @@ fn dump_journal(
.stdout(Stdio::piped()) .stdout(Stdio::piped())
.spawn()?; .spawn()?;
use std::io::{BufRead,BufReader}; use std::io::{BufRead, BufReader};
if let Some(ref mut stdout) = child.stdout { if let Some(ref mut stdout) = child.stdout {
for line in BufReader::new(stdout).lines() { for line in BufReader::new(stdout).lines() {
match line { match line {
Ok(line) => { Ok(line) => {
count += 1; count += 1;
if count < start { continue }; if count < start {
if limit == 0 { continue }; continue;
};
if limit == 0 {
continue;
};
lines.push(json!({ "n": count, "t": line })); lines.push(json!({ "n": count, "t": line }));
@ -64,7 +73,7 @@ fn dump_journal(
// so we add a line // so we add a line
if count == 0 { if count == 0 {
count += 1; count += 1;
lines.push(json!({ "n": count, "t": "no content"})); lines.push(json!({ "n": count, "t": "no content"}));
} }
Ok((count, lines)) Ok((count, lines))
@ -133,21 +142,21 @@ fn get_syslog(
_info: &ApiMethod, _info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment, mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let service = param["service"]
let service = param["service"].as_str().map(|service| crate::api2::node::services::real_service_name(service)); .as_str()
.map(|service| crate::api2::node::services::real_service_name(service));
let (count, lines) = dump_journal( let (count, lines) = dump_journal(
param["start"].as_u64(), param["start"].as_u64(),
param["limit"].as_u64(), param["limit"].as_u64(),
param["since"].as_str(), param["since"].as_str(),
param["until"].as_str(), param["until"].as_str(),
service)?; service,
)?;
rpcenv["total"] = Value::from(count); rpcenv["total"] = Value::from(count);
Ok(json!(lines)) Ok(json!(lines))
} }
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new().get(&API_METHOD_GET_SYSLOG);
.get(&API_METHOD_GET_SYSLOG);

View File

@ -4,21 +4,20 @@ use std::io::{BufRead, BufReader};
use anyhow::{bail, Error}; use anyhow::{bail, Error};
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_sys::sortable; use proxmox_router::{list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap};
use proxmox_router::{list_subdirs_api_method, Router, RpcEnvironment, Permission, SubdirMap};
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_sys::sortable;
use pbs_api_types::{ use pbs_api_types::{
Userid, Authid, Tokenname, TaskListItem, TaskStateType, UPID, Authid, TaskListItem, TaskStateType, Tokenname, Userid, DATASTORE_SCHEMA, NODE_SCHEMA,
NODE_SCHEMA, UPID_SCHEMA, VERIFICATION_JOB_WORKER_ID_REGEX,
SYNC_JOB_WORKER_ID_REGEX, DATASTORE_SCHEMA,
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_VERIFY, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_VERIFY, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
SYNC_JOB_WORKER_ID_REGEX, UPID, UPID_SCHEMA, VERIFICATION_JOB_WORKER_ID_REGEX,
}; };
use crate::api2::pull::check_pull_privs; use crate::api2::pull::check_pull_privs;
use proxmox_rest_server::{upid_log_path, upid_read_status, TaskState, TaskListInfoIterator};
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use proxmox_rest_server::{upid_log_path, upid_read_status, TaskListInfoIterator, TaskState};
// matches respective job execution privileges // matches respective job execution privileges
fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> Result<(), Error> { fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> Result<(), Error> {
@ -26,13 +25,15 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
("verificationjob", Some(workerid)) => { ("verificationjob", Some(workerid)) => {
if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(workerid) { if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(workerid) {
if let Some(store) = captures.get(1) { if let Some(store) = captures.get(1) {
return user_info.check_privs(auth_id, return user_info.check_privs(
&["datastore", store.as_str()], auth_id,
PRIV_DATASTORE_VERIFY, &["datastore", store.as_str()],
true); PRIV_DATASTORE_VERIFY,
true,
);
} }
} }
}, }
("syncjob", Some(workerid)) => { ("syncjob", Some(workerid)) => {
if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(workerid) { if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(workerid) {
let remote = captures.get(1); let remote = captures.get(1);
@ -40,29 +41,34 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
let local_store = captures.get(3); let local_store = captures.get(3);
if let (Some(remote), Some(remote_store), Some(local_store)) = if let (Some(remote), Some(remote_store), Some(local_store)) =
(remote, remote_store, local_store) { (remote, remote_store, local_store)
{
return check_pull_privs(auth_id, return check_pull_privs(
local_store.as_str(), auth_id,
remote.as_str(), local_store.as_str(),
remote_store.as_str(), remote.as_str(),
false); remote_store.as_str(),
false,
);
} }
} }
}, }
("garbage_collection", Some(workerid)) => { ("garbage_collection", Some(workerid)) => {
return user_info.check_privs(auth_id, return user_info.check_privs(
&["datastore", workerid], auth_id,
PRIV_DATASTORE_MODIFY, &["datastore", workerid],
true) PRIV_DATASTORE_MODIFY,
}, true,
)
}
("prune", Some(workerid)) => { ("prune", Some(workerid)) => {
return user_info.check_privs(auth_id, return user_info.check_privs(
&["datastore", auth_id,
workerid], &["datastore", workerid],
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_MODIFY,
true); true,
}, );
}
_ => bail!("not a scheduled job task"), _ => bail!("not a scheduled job task"),
}; };
@ -102,7 +108,8 @@ fn check_job_store(upid: &UPID, store: &str) -> bool {
fn check_task_access(auth_id: &Authid, upid: &UPID) -> Result<(), Error> { fn check_task_access(auth_id: &Authid, upid: &UPID) -> Result<(), Error> {
let task_auth_id: Authid = upid.auth_id.parse()?; let task_auth_id: Authid = upid.auth_id.parse()?;
if auth_id == &task_auth_id if auth_id == &task_auth_id
|| (task_auth_id.is_token() && &Authid::from(task_auth_id.user().clone()) == auth_id) { || (task_auth_id.is_token() && &Authid::from(task_auth_id.user().clone()) == auth_id)
{
// task owner can always read // task owner can always read
Ok(()) Ok(())
} else { } else {
@ -111,7 +118,8 @@ fn check_task_access(auth_id: &Authid, upid: &UPID) -> Result<(), Error> {
// access to all tasks // access to all tasks
// or task == job which the user/token could have configured/manually executed // or task == job which the user/token could have configured/manually executed
user_info.check_privs(auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false) user_info
.check_privs(auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)
.or_else(|_| check_job_privs(auth_id, &user_info, upid)) .or_else(|_| check_job_privs(auth_id, &user_info, upid))
.or_else(|_| bail!("task access not allowed")) .or_else(|_| bail!("task access not allowed"))
} }
@ -127,9 +135,10 @@ pub fn tasktype(state: &TaskState) -> TaskStateType {
} }
fn into_task_list_item(info: proxmox_rest_server::TaskListInfo) -> pbs_api_types::TaskListItem { fn into_task_list_item(info: proxmox_rest_server::TaskListInfo) -> pbs_api_types::TaskListItem {
let (endtime, status) = info let (endtime, status) = info.state.map_or_else(
.state || (None, None),
.map_or_else(|| (None, None), |a| (Some(a.endtime()), Some(a.to_string()))); |a| (Some(a.endtime()), Some(a.to_string())),
);
pbs_api_types::TaskListItem { pbs_api_types::TaskListItem {
upid: info.upid_str, upid: info.upid_str,
@ -210,11 +219,7 @@ fn into_task_list_item(info: proxmox_rest_server::TaskListInfo) -> pbs_api_types
}, },
)] )]
/// Get task status. /// Get task status.
async fn get_task_status( async fn get_task_status(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let upid = extract_upid(&param)?; let upid = extract_upid(&param)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@ -249,7 +254,6 @@ async fn get_task_status(
} }
fn extract_upid(param: &Value) -> Result<UPID, Error> { fn extract_upid(param: &Value) -> Result<UPID, Error> {
let upid_str = pbs_tools::json::required_string_param(param, "upid")?; let upid_str = pbs_tools::json::required_string_param(param, "upid")?;
upid_str.parse::<UPID>() upid_str.parse::<UPID>()
@ -289,11 +293,7 @@ fn extract_upid(param: &Value) -> Result<UPID, Error> {
}, },
)] )]
/// Read task log. /// Read task log.
async fn read_task_log( async fn read_task_log(param: Value, mut rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let upid = extract_upid(&param)?; let upid = extract_upid(&param)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@ -317,8 +317,12 @@ async fn read_task_log(
match line { match line {
Ok(line) => { Ok(line) => {
count += 1; count += 1;
if count < start { continue }; if count < start {
if limit == 0 { continue }; continue;
};
if limit == 0 {
continue;
};
lines.push(json!({ "n": count, "t": line })); lines.push(json!({ "n": count, "t": line }));
@ -359,11 +363,7 @@ async fn read_task_log(
}, },
)] )]
/// Try to stop a task. /// Try to stop a task.
fn stop_task( fn stop_task(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let upid = extract_upid(&param)?; let upid = extract_upid(&param)?;
let auth_id = rpcenv.get_auth_id().unwrap(); let auth_id = rpcenv.get_auth_id().unwrap();
@ -465,7 +465,6 @@ pub fn list_tasks(
param: Value, param: Value,
mut rpcenv: &mut dyn RpcEnvironment, mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TaskListItem>, Error> { ) -> Result<Vec<TaskListItem>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&auth_id, &["system", "tasks"]); let user_privs = user_info.lookup_privs(&auth_id, &["system", "tasks"]);
@ -475,7 +474,11 @@ pub fn list_tasks(
let store = param["store"].as_str(); let store = param["store"].as_str();
let list = TaskListInfoIterator::new(running)?; let list = TaskListInfoIterator::new(running)?;
let limit = if limit > 0 { limit as usize } else { usize::MAX }; let limit = if limit > 0 {
limit as usize
} else {
usize::MAX
};
let mut skipped = 0; let mut skipped = 0;
let mut result: Vec<TaskListItem> = Vec::new(); let mut result: Vec<TaskListItem> = Vec::new();
@ -510,15 +513,21 @@ pub fn list_tasks(
} }
if let Some(needle) = &userfilter { if let Some(needle) = &userfilter {
if !info.upid.auth_id.to_string().contains(needle) { continue; } if !info.upid.auth_id.to_string().contains(needle) {
continue;
}
} }
if let Some(store) = store { if let Some(store) = store {
if !check_job_store(&info.upid, store) { continue; } if !check_job_store(&info.upid, store) {
continue;
}
} }
if let Some(typefilter) = &typefilter { if let Some(typefilter) = &typefilter {
if !info.upid.worker_type.contains(typefilter) { continue; } if !info.upid.worker_type.contains(typefilter) {
continue;
}
} }
match (&info.state, &statusfilter) { match (&info.state, &statusfilter) {
@ -528,9 +537,9 @@ pub fn list_tasks(
if !filters.contains(&tasktype(state)) { if !filters.contains(&tasktype(state)) {
continue; continue;
} }
}, }
(None, Some(_)) => continue, (None, Some(_)) => continue,
_ => {}, _ => {}
} }
if skipped < start as usize { if skipped < start as usize {
@ -546,7 +555,8 @@ pub fn list_tasks(
} }
let mut count = result.len() + start as usize; let mut count = result.len() + start as usize;
if !result.is_empty() && result.len() >= limit { // we have a 'virtual' entry as long as we have any new if !result.is_empty() && result.len() >= limit {
// we have a 'virtual' entry as long as we have any new
count += 1; count += 1;
} }
@ -557,14 +567,8 @@ pub fn list_tasks(
#[sortable] #[sortable]
const UPID_API_SUBDIRS: SubdirMap = &sorted!([ const UPID_API_SUBDIRS: SubdirMap = &sorted!([
( ("log", &Router::new().get(&API_METHOD_READ_TASK_LOG)),
"log", &Router::new() ("status", &Router::new().get(&API_METHOD_GET_TASK_STATUS))
.get(&API_METHOD_READ_TASK_LOG)
),
(
"status", &Router::new()
.get(&API_METHOD_GET_TASK_STATUS)
)
]); ]);
pub const UPID_API_ROUTER: Router = Router::new() pub const UPID_API_ROUTER: Router = Router::new()

View File

@ -1,11 +1,11 @@
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_sys::fs::{file_read_firstline, replace_file, CreateOptions}; use proxmox_router::{Permission, Router};
use proxmox_router::{Router, Permission};
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_sys::fs::{file_read_firstline, replace_file, CreateOptions};
use pbs_api_types::{NODE_SCHEMA, TIME_ZONE_SCHEMA, PRIV_SYS_MODIFY}; use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_MODIFY, TIME_ZONE_SCHEMA};
fn read_etc_localtime() -> Result<String, Error> { fn read_etc_localtime() -> Result<String, Error> {
// use /etc/timezone // use /etc/timezone
@ -14,8 +14,8 @@ fn read_etc_localtime() -> Result<String, Error> {
} }
// otherwise guess from the /etc/localtime symlink // otherwise guess from the /etc/localtime symlink
let link = std::fs::read_link("/etc/localtime"). let link = std::fs::read_link("/etc/localtime")
map_err(|err| format_err!("failed to guess timezone - {}", err))?; .map_err(|err| format_err!("failed to guess timezone - {}", err))?;
let link = link.to_string_lossy(); let link = link.to_string_lossy();
match link.rfind("/zoneinfo/") { match link.rfind("/zoneinfo/") {
@ -87,17 +87,19 @@ fn get_time(_param: Value) -> Result<Value, Error> {
}, },
)] )]
/// Set time zone /// Set time zone
fn set_timezone( fn set_timezone(timezone: String, _param: Value) -> Result<Value, Error> {
timezone: String,
_param: Value,
) -> Result<Value, Error> {
let path = std::path::PathBuf::from(format!("/usr/share/zoneinfo/{}", timezone)); let path = std::path::PathBuf::from(format!("/usr/share/zoneinfo/{}", timezone));
if !path.exists() { if !path.exists() {
bail!("No such timezone."); bail!("No such timezone.");
} }
replace_file("/etc/timezone", timezone.as_bytes(), CreateOptions::new(), true)?; replace_file(
"/etc/timezone",
timezone.as_bytes(),
CreateOptions::new(),
true,
)?;
let _ = std::fs::remove_file("/etc/localtime"); let _ = std::fs::remove_file("/etc/localtime");

View File

@ -1,9 +1,9 @@
//! Cheap check if the API daemon is online. //! Cheap check if the API daemon is online.
use anyhow::{Error}; use anyhow::Error;
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_router::{Router, Permission}; use proxmox_router::{Permission, Router};
use proxmox_schema::api; use proxmox_schema::api;
#[api( #[api(
@ -28,5 +28,4 @@ pub fn ping() -> Result<Value, Error> {
"pong": true, "pong": true,
})) }))
} }
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new().get(&API_METHOD_PING);
.get(&API_METHOD_PING);

View File

@ -2,23 +2,22 @@
use std::convert::TryFrom; use std::convert::TryFrom;
use anyhow::{format_err, Error}; use anyhow::{format_err, Error};
use futures::{select, future::FutureExt}; use futures::{future::FutureExt, select};
use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
use proxmox_sys::task_log; use proxmox_sys::task_log;
use pbs_api_types::{ use pbs_api_types::{
Authid, SyncJobConfig, GroupFilter, RateLimitConfig, GROUP_FILTER_LIST_SCHEMA, Authid, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
DATASTORE_SCHEMA, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, GROUP_FILTER_LIST_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ,
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
}; };
use proxmox_rest_server::WorkerTask;
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use proxmox_rest_server::WorkerTask;
use crate::server::pull::{PullParameters, pull_store};
use crate::server::jobstate::Job; use crate::server::jobstate::Job;
use crate::server::pull::{pull_store, PullParameters};
pub fn check_pull_privs( pub fn check_pull_privs(
auth_id: &Authid, auth_id: &Authid,
@ -27,11 +26,15 @@ pub fn check_pull_privs(
remote_store: &str, remote_store: &str,
delete: bool, delete: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?; user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
user_info.check_privs(auth_id, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?; user_info.check_privs(
auth_id,
&["remote", remote, remote_store],
PRIV_REMOTE_READ,
false,
)?;
if delete { if delete {
user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?; user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
@ -48,7 +51,11 @@ impl TryFrom<&SyncJobConfig> for PullParameters {
&sync_job.store, &sync_job.store,
&sync_job.remote, &sync_job.remote,
&sync_job.remote_store, &sync_job.remote_store,
sync_job.owner.as_ref().unwrap_or_else(|| Authid::root_auth_id()).clone(), sync_job
.owner
.as_ref()
.unwrap_or_else(|| Authid::root_auth_id())
.clone(),
sync_job.remove_vanished, sync_job.remove_vanished,
sync_job.group_filter.clone(), sync_job.group_filter.clone(),
sync_job.limit.clone(), sync_job.limit.clone(),
@ -63,12 +70,13 @@ pub fn do_sync_job(
schedule: Option<String>, schedule: Option<String>,
to_stdout: bool, to_stdout: bool,
) -> Result<String, Error> { ) -> Result<String, Error> {
let job_id = format!(
let job_id = format!("{}:{}:{}:{}", "{}:{}:{}:{}",
sync_job.remote, sync_job.remote,
sync_job.remote_store, sync_job.remote_store,
sync_job.store, sync_job.store,
job.jobname()); job.jobname()
);
let worker_type = job.jobtype().to_string(); let worker_type = job.jobtype().to_string();
let (email, notify) = crate::server::lookup_datastore_notify_settings(&sync_job.store); let (email, notify) = crate::server::lookup_datastore_notify_settings(&sync_job.store);
@ -79,14 +87,12 @@ pub fn do_sync_job(
auth_id.to_string(), auth_id.to_string(),
to_stdout, to_stdout,
move |worker| async move { move |worker| async move {
job.start(&worker.upid().to_string())?; job.start(&worker.upid().to_string())?;
let worker2 = worker.clone(); let worker2 = worker.clone();
let sync_job2 = sync_job.clone(); let sync_job2 = sync_job.clone();
let worker_future = async move { let worker_future = async move {
let pull_params = PullParameters::try_from(&sync_job)?; let pull_params = PullParameters::try_from(&sync_job)?;
let client = pull_params.client().await?; let client = pull_params.client().await?;
@ -109,9 +115,11 @@ pub fn do_sync_job(
Ok(()) Ok(())
}; };
let mut abort_future = worker2.abort_future().map(|_| Err(format_err!("sync aborted"))); let mut abort_future = worker2
.abort_future()
.map(|_| Err(format_err!("sync aborted")));
let result = select!{ let result = select! {
worker = worker_future.fuse() => worker, worker = worker_future.fuse() => worker,
abort = abort_future => abort, abort = abort_future => abort,
}; };
@ -119,20 +127,23 @@ pub fn do_sync_job(
let status = worker2.create_state(&result); let status = worker2.create_state(&result);
match job.finish(status) { match job.finish(status) {
Ok(_) => {}, Ok(_) => {}
Err(err) => { Err(err) => {
eprintln!("could not finish job state: {}", err); eprintln!("could not finish job state: {}", err);
} }
} }
if let Some(email) = email { if let Some(email) = email {
if let Err(err) = crate::server::send_sync_status(&email, notify, &sync_job2, &result) { if let Err(err) =
crate::server::send_sync_status(&email, notify, &sync_job2, &result)
{
eprintln!("send sync notification failed: {}", err); eprintln!("send sync notification failed: {}", err);
} }
} }
result result
})?; },
)?;
Ok(upid_str) Ok(upid_str)
} }
@ -173,7 +184,7 @@ The delete flag additionally requires the Datastore.Prune privilege on '/datasto
}, },
)] )]
/// Sync store from other repository /// Sync store from other repository
async fn pull ( async fn pull(
store: String, store: String,
remote: String, remote: String,
remote_store: String, remote_store: String,
@ -183,7 +194,6 @@ async fn pull (
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> { ) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let delete = remove_vanished.unwrap_or(false); let delete = remove_vanished.unwrap_or(false);
@ -201,25 +211,29 @@ async fn pull (
let client = pull_params.client().await?; let client = pull_params.client().await?;
// fixme: set to_stdout to false? // fixme: set to_stdout to false?
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), auth_id.to_string(), true, move |worker| async move { let upid_str = WorkerTask::spawn(
"sync",
Some(store.clone()),
auth_id.to_string(),
true,
move |worker| async move {
task_log!(worker, "sync datastore '{}' start", store);
task_log!(worker, "sync datastore '{}' start", store); let pull_future = pull_store(&worker, &client, &pull_params);
let future = select! {
success = pull_future.fuse() => success,
abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,
};
let pull_future = pull_store(&worker, &client, &pull_params); let _ = future?;
let future = select!{
success = pull_future.fuse() => success,
abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,
};
let _ = future?; task_log!(worker, "sync datastore '{}' end", store);
task_log!(worker, "sync datastore '{}' end", store); Ok(())
},
Ok(()) )?;
})?;
Ok(upid_str) Ok(upid_str)
} }
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new().post(&API_METHOD_PULL);
.post(&API_METHOD_PULL);

View File

@ -1,13 +1,13 @@
use std::sync::{Arc,RwLock};
use std::collections::HashSet; use std::collections::HashSet;
use std::sync::{Arc, RwLock};
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_router::{RpcEnvironment, RpcEnvironmentType}; use proxmox_router::{RpcEnvironment, RpcEnvironmentType};
use pbs_api_types::Authid;
use pbs_datastore::backup_info::BackupDir; use pbs_datastore::backup_info::BackupDir;
use pbs_datastore::DataStore; use pbs_datastore::DataStore;
use pbs_api_types::Authid;
use proxmox_rest_server::formatter::*; use proxmox_rest_server::formatter::*;
use proxmox_rest_server::WorkerTask; use proxmox_rest_server::WorkerTask;
@ -22,7 +22,7 @@ pub struct ReaderEnvironment {
pub worker: Arc<WorkerTask>, pub worker: Arc<WorkerTask>,
pub datastore: Arc<DataStore>, pub datastore: Arc<DataStore>,
pub backup_dir: BackupDir, pub backup_dir: BackupDir,
allowed_chunks: Arc<RwLock<HashSet<[u8;32]>>>, allowed_chunks: Arc<RwLock<HashSet<[u8; 32]>>>,
} }
impl ReaderEnvironment { impl ReaderEnvironment {
@ -33,8 +33,6 @@ impl ReaderEnvironment {
datastore: Arc<DataStore>, datastore: Arc<DataStore>,
backup_dir: BackupDir, backup_dir: BackupDir,
) -> Self { ) -> Self {
Self { Self {
result_attributes: json!({}), result_attributes: json!({}),
env_type, env_type,
@ -53,22 +51,22 @@ impl ReaderEnvironment {
} }
pub fn debug<S: AsRef<str>>(&self, msg: S) { pub fn debug<S: AsRef<str>>(&self, msg: S) {
if self.debug { self.worker.log_message(msg); } if self.debug {
self.worker.log_message(msg);
}
} }
pub fn register_chunk(&self, digest: [u8; 32]) {
pub fn register_chunk(&self, digest: [u8;32]) {
let mut allowed_chunks = self.allowed_chunks.write().unwrap(); let mut allowed_chunks = self.allowed_chunks.write().unwrap();
allowed_chunks.insert(digest); allowed_chunks.insert(digest);
} }
pub fn check_chunk_access(&self, digest: [u8;32]) -> bool { pub fn check_chunk_access(&self, digest: [u8; 32]) -> bool {
self.allowed_chunks.read().unwrap().contains(&digest) self.allowed_chunks.read().unwrap().contains(&digest)
} }
} }
impl RpcEnvironment for ReaderEnvironment { impl RpcEnvironment for ReaderEnvironment {
fn result_attrib_mut(&mut self) -> &mut Value { fn result_attrib_mut(&mut self) -> &mut Value {
&mut self.result_attributes &mut self.result_attributes
} }

View File

@ -2,58 +2,66 @@
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use futures::*; use futures::*;
use hex::FromHex;
use hyper::header::{self, HeaderValue, UPGRADE}; use hyper::header::{self, HeaderValue, UPGRADE};
use hyper::http::request::Parts; use hyper::http::request::Parts;
use hyper::{Body, Response, Request, StatusCode}; use hyper::{Body, Request, Response, StatusCode};
use serde_json::Value; use serde_json::Value;
use hex::FromHex;
use proxmox_sys::sortable;
use proxmox_router::{ use proxmox_router::{
http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission, http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
Router, RpcEnvironment, SubdirMap, Router, RpcEnvironment, SubdirMap,
}; };
use proxmox_schema::{BooleanSchema, ObjectSchema}; use proxmox_schema::{BooleanSchema, ObjectSchema};
use proxmox_sys::sortable;
use pbs_api_types::{ use pbs_api_types::{
Authid, Operation, DATASTORE_SCHEMA, BACKUP_TYPE_SCHEMA, BACKUP_TIME_SCHEMA, Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
BACKUP_ID_SCHEMA, CHUNK_DIGEST_SCHEMA, PRIV_DATASTORE_READ, PRIV_DATASTORE_BACKUP, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
BACKUP_ARCHIVE_NAME_SCHEMA, PRIV_DATASTORE_READ,
}; };
use proxmox_sys::fs::lock_dir_noblock_shared; use pbs_config::CachedUserInfo;
use pbs_tools::json::{required_integer_param, required_string_param};
use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
use pbs_datastore::backup_info::BackupDir; use pbs_datastore::backup_info::BackupDir;
use pbs_datastore::index::IndexFile; use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{archive_type, ArchiveType}; use pbs_datastore::manifest::{archive_type, ArchiveType};
use pbs_config::CachedUserInfo; use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
use proxmox_rest_server::{WorkerTask, H2Service}; use pbs_tools::json::{required_integer_param, required_string_param};
use proxmox_rest_server::{H2Service, WorkerTask};
use proxmox_sys::fs::lock_dir_noblock_shared;
use crate::api2::helpers; use crate::api2::helpers;
mod environment; mod environment;
use environment::*; use environment::*;
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new().upgrade(&API_METHOD_UPGRADE_BACKUP);
.upgrade(&API_METHOD_UPGRADE_BACKUP);
#[sortable] #[sortable]
pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new( pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
&ApiHandler::AsyncHttp(&upgrade_to_backup_reader_protocol), &ApiHandler::AsyncHttp(&upgrade_to_backup_reader_protocol),
&ObjectSchema::new( &ObjectSchema::new(
concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!(), "')."), concat!(
"Upgraded to backup protocol ('",
PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!(),
"')."
),
&sorted!([ &sorted!([
("store", false, &DATASTORE_SCHEMA), ("store", false, &DATASTORE_SCHEMA),
("backup-type", false, &BACKUP_TYPE_SCHEMA), ("backup-type", false, &BACKUP_TYPE_SCHEMA),
("backup-id", false, &BACKUP_ID_SCHEMA), ("backup-id", false, &BACKUP_ID_SCHEMA),
("backup-time", false, &BACKUP_TIME_SCHEMA), ("backup-time", false, &BACKUP_TIME_SCHEMA),
("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()), (
"debug",
true,
&BooleanSchema::new("Enable verbose debug logging.").schema()
),
]), ]),
) ),
).access( )
.access(
// Note: parameter 'store' is no uri parameter, so we need to test inside function body // Note: parameter 'store' is no uri parameter, so we need to test inside function body
Some("The user needs Datastore.Read privilege on /datastore/{store}."), Some("The user needs Datastore.Read privilege on /datastore/{store}."),
&Permission::Anybody &Permission::Anybody,
); );
fn upgrade_to_backup_reader_protocol( fn upgrade_to_backup_reader_protocol(
@ -63,7 +71,6 @@ fn upgrade_to_backup_reader_protocol(
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>, rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture { ) -> ApiResponseFuture {
async move { async move {
let debug = param["debug"].as_bool().unwrap_or(false); let debug = param["debug"].as_bool().unwrap_or(false);
@ -91,14 +98,17 @@ fn upgrade_to_backup_reader_protocol(
.headers .headers
.get("UPGRADE") .get("UPGRADE")
.ok_or_else(|| format_err!("missing Upgrade header"))? .ok_or_else(|| format_err!("missing Upgrade header"))?
.to_str()?; .to_str()?;
if protocols != PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!() { if protocols != PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!() {
bail!("invalid protocol name"); bail!("invalid protocol name");
} }
if parts.version >= http::version::Version::HTTP_2 { if parts.version >= http::version::Version::HTTP_2 {
bail!("unexpected http version '{:?}' (expected version < 2)", parts.version); bail!(
"unexpected http version '{:?}' (expected version < 2)",
parts.version
);
} }
let env_type = rpcenv.env_type(); let env_type = rpcenv.env_type();
@ -107,8 +117,7 @@ fn upgrade_to_backup_reader_protocol(
if !priv_read { if !priv_read {
let owner = datastore.get_owner(backup_dir.group())?; let owner = datastore.get_owner(backup_dir.group())?;
let correct_owner = owner == auth_id let correct_owner = owner == auth_id
|| (owner.is_token() || (owner.is_token() && Authid::from(owner.user().clone()) == auth_id);
&& Authid::from(owner.user().clone()) == auth_id);
if !correct_owner { if !correct_owner {
bail!("backup owner check failed!"); bail!("backup owner check failed!");
} }
@ -117,83 +126,100 @@ fn upgrade_to_backup_reader_protocol(
let _guard = lock_dir_noblock_shared( let _guard = lock_dir_noblock_shared(
&datastore.snapshot_path(&backup_dir), &datastore.snapshot_path(&backup_dir),
"snapshot", "snapshot",
"locked by another operation")?; "locked by another operation",
)?;
let path = datastore.base_path(); let path = datastore.base_path();
//let files = BackupInfo::list_files(&path, &backup_dir)?; //let files = BackupInfo::list_files(&path, &backup_dir)?;
let worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_dir.backup_time()); let worker_id = format!(
"{}:{}/{}/{:08X}",
store,
backup_type,
backup_id,
backup_dir.backup_time()
);
WorkerTask::spawn("reader", Some(worker_id), auth_id.to_string(), true, move |worker| async move { WorkerTask::spawn(
let _guard = _guard; "reader",
Some(worker_id),
auth_id.to_string(),
true,
move |worker| async move {
let _guard = _guard;
let mut env = ReaderEnvironment::new( let mut env = ReaderEnvironment::new(
env_type, env_type,
auth_id, auth_id,
worker.clone(), worker.clone(),
datastore, datastore,
backup_dir, backup_dir,
); );
env.debug = debug; env.debug = debug;
env.log(format!("starting new backup reader datastore '{}': {:?}", store, path)); env.log(format!(
"starting new backup reader datastore '{}': {:?}",
store, path
));
let service = H2Service::new(env.clone(), worker.clone(), &READER_API_ROUTER, debug); let service =
H2Service::new(env.clone(), worker.clone(), &READER_API_ROUTER, debug);
let mut abort_future = worker.abort_future() let mut abort_future = worker
.map(|_| Err(format_err!("task aborted"))); .abort_future()
.map(|_| Err(format_err!("task aborted")));
let env2 = env.clone(); let env2 = env.clone();
let req_fut = async move { let req_fut = async move {
let conn = hyper::upgrade::on(Request::from_parts(parts, req_body)).await?; let conn = hyper::upgrade::on(Request::from_parts(parts, req_body)).await?;
env2.debug("protocol upgrade done"); env2.debug("protocol upgrade done");
let mut http = hyper::server::conn::Http::new(); let mut http = hyper::server::conn::Http::new();
http.http2_only(true); http.http2_only(true);
// increase window size: todo - find optiomal size // increase window size: todo - find optiomal size
let window_size = 32*1024*1024; // max = (1 << 31) - 2 let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2
http.http2_initial_stream_window_size(window_size); http.http2_initial_stream_window_size(window_size);
http.http2_initial_connection_window_size(window_size); http.http2_initial_connection_window_size(window_size);
http.http2_max_frame_size(4*1024*1024); http.http2_max_frame_size(4 * 1024 * 1024);
http.serve_connection(conn, service) http.serve_connection(conn, service)
.map_err(Error::from).await .map_err(Error::from)
}; .await
};
futures::select!{ futures::select! {
req = req_fut.fuse() => req?, req = req_fut.fuse() => req?,
abort = abort_future => abort?, abort = abort_future => abort?,
}; };
env.log("reader finished successfully"); env.log("reader finished successfully");
Ok(()) Ok(())
})?; },
)?;
let response = Response::builder() let response = Response::builder()
.status(StatusCode::SWITCHING_PROTOCOLS) .status(StatusCode::SWITCHING_PROTOCOLS)
.header(UPGRADE, HeaderValue::from_static(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!())) .header(
UPGRADE,
HeaderValue::from_static(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!()),
)
.body(Body::empty())?; .body(Body::empty())?;
Ok(response) Ok(response)
}.boxed() }
.boxed()
} }
const READER_API_SUBDIRS: SubdirMap = &[ const READER_API_SUBDIRS: SubdirMap = &[
("chunk", &Router::new().download(&API_METHOD_DOWNLOAD_CHUNK)),
( (
"chunk", &Router::new() "download",
.download(&API_METHOD_DOWNLOAD_CHUNK) &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
),
(
"download", &Router::new()
.download(&API_METHOD_DOWNLOAD_FILE)
),
(
"speedtest", &Router::new()
.download(&API_METHOD_SPEEDTEST)
), ),
("speedtest", &Router::new().download(&API_METHOD_SPEEDTEST)),
]; ];
pub const READER_API_ROUTER: Router = Router::new() pub const READER_API_ROUTER: Router = Router::new()
@ -205,10 +231,8 @@ pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
&ApiHandler::AsyncHttp(&download_file), &ApiHandler::AsyncHttp(&download_file),
&ObjectSchema::new( &ObjectSchema::new(
"Download specified file.", "Download specified file.",
&sorted!([ &sorted!([("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),]),
("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA), ),
]),
)
); );
fn download_file( fn download_file(
@ -218,7 +242,6 @@ fn download_file(
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>, rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture { ) -> ApiResponseFuture {
async move { async move {
let env: &ReaderEnvironment = rpcenv.as_ref(); let env: &ReaderEnvironment = rpcenv.as_ref();
@ -239,11 +262,14 @@ fn download_file(
let index = env.datastore.open_dynamic_reader(&path)?; let index = env.datastore.open_dynamic_reader(&path)?;
Some(Box::new(index)) Some(Box::new(index))
} }
_ => { None } _ => None,
}; };
if let Some(index) = index { if let Some(index) = index {
env.log(format!("register chunks in '{}' as downloadable.", file_name)); env.log(format!(
"register chunks in '{}' as downloadable.",
file_name
));
for pos in 0..index.index_count() { for pos in 0..index.index_count() {
let info = index.chunk_info(pos).unwrap(); let info = index.chunk_info(pos).unwrap();
@ -252,7 +278,8 @@ fn download_file(
} }
helpers::create_download_response(path).await helpers::create_download_response(path).await
}.boxed() }
.boxed()
} }
#[sortable] #[sortable]
@ -260,10 +287,8 @@ pub const API_METHOD_DOWNLOAD_CHUNK: ApiMethod = ApiMethod::new(
&ApiHandler::AsyncHttp(&download_chunk), &ApiHandler::AsyncHttp(&download_chunk),
&ObjectSchema::new( &ObjectSchema::new(
"Download specified chunk.", "Download specified chunk.",
&sorted!([ &sorted!([("digest", false, &CHUNK_DIGEST_SCHEMA),]),
("digest", false, &CHUNK_DIGEST_SCHEMA), ),
]),
)
); );
fn download_chunk( fn download_chunk(
@ -273,7 +298,6 @@ fn download_chunk(
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>, rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture { ) -> ApiResponseFuture {
async move { async move {
let env: &ReaderEnvironment = rpcenv.as_ref(); let env: &ReaderEnvironment = rpcenv.as_ref();
@ -281,8 +305,15 @@ fn download_chunk(
let digest = <[u8; 32]>::from_hex(digest_str)?; let digest = <[u8; 32]>::from_hex(digest_str)?;
if !env.check_chunk_access(digest) { if !env.check_chunk_access(digest) {
env.log(format!("attempted to download chunk {} which is not in registered chunk list", digest_str)); env.log(format!(
return Err(http_err!(UNAUTHORIZED, "download chunk {} not allowed", digest_str)); "attempted to download chunk {} which is not in registered chunk list",
digest_str
));
return Err(http_err!(
UNAUTHORIZED,
"download chunk {} not allowed",
digest_str
));
} }
let (path, _) = env.datastore.chunk_path(&digest); let (path, _) = env.datastore.chunk_path(&digest);
@ -290,18 +321,21 @@ fn download_chunk(
env.debug(format!("download chunk {:?}", path)); env.debug(format!("download chunk {:?}", path));
let data = proxmox_async::runtime::block_in_place(|| std::fs::read(path)) let data =
.map_err(move |err| http_err!(BAD_REQUEST, "reading file {:?} failed: {}", path2, err))?; proxmox_async::runtime::block_in_place(|| std::fs::read(path)).map_err(move |err| {
http_err!(BAD_REQUEST, "reading file {:?} failed: {}", path2, err)
})?;
let body = Body::from(data); let body = Body::from(data);
// fixme: set other headers ? // fixme: set other headers ?
Ok(Response::builder() Ok(Response::builder()
.status(StatusCode::OK) .status(StatusCode::OK)
.header(header::CONTENT_TYPE, "application/octet-stream") .header(header::CONTENT_TYPE, "application/octet-stream")
.body(body) .body(body)
.unwrap()) .unwrap())
}.boxed() }
.boxed()
} }
/* this is too slow /* this is too slow
@ -347,7 +381,7 @@ fn download_chunk_old(
pub const API_METHOD_SPEEDTEST: ApiMethod = ApiMethod::new( pub const API_METHOD_SPEEDTEST: ApiMethod = ApiMethod::new(
&ApiHandler::AsyncHttp(&speedtest), &ApiHandler::AsyncHttp(&speedtest),
&ObjectSchema::new("Test 1M block download speed.", &[]) &ObjectSchema::new("Test 1M block download speed.", &[]),
); );
fn speedtest( fn speedtest(
@ -357,8 +391,7 @@ fn speedtest(
_info: &ApiMethod, _info: &ApiMethod,
_rpcenv: Box<dyn RpcEnvironment>, _rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture { ) -> ApiResponseFuture {
let buffer = vec![65u8; 1024 * 1024]; // nonsense [A,A,A...]
let buffer = vec![65u8; 1024*1024]; // nonsense [A,A,A...]
let body = Body::from(buffer); let body = Body::from(buffer);

View File

@ -3,26 +3,20 @@
use anyhow::Error; use anyhow::Error;
use serde_json::Value; use serde_json::Value;
use proxmox_schema::api;
use proxmox_router::{
ApiMethod,
Permission,
Router,
RpcEnvironment,
SubdirMap,
};
use proxmox_router::list_subdirs_api_method; use proxmox_router::list_subdirs_api_method;
use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment, SubdirMap};
use proxmox_schema::api;
use pbs_api_types::{ use pbs_api_types::{
Authid, DataStoreStatusListItem, Operation, RRDMode, RRDTimeFrame, Authid, DataStoreStatusListItem, Operation, RRDMode, RRDTimeFrame, PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_BACKUP,
}; };
use pbs_datastore::DataStore;
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use pbs_datastore::DataStore;
use crate::tools::statistics::{linear_regression};
use crate::rrd_cache::extract_rrd_data; use crate::rrd_cache::extract_rrd_data;
use crate::tools::statistics::linear_regression;
#[api( #[api(
returns: { returns: {
@ -41,8 +35,7 @@ pub fn datastore_status(
_param: Value, _param: Value,
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<DataStoreStatusListItem>, Error> { ) -> Result<Vec<DataStoreStatusListItem>, Error> {
let (config, _digest) = pbs_config::datastore::config()?; let (config, _digest) = pbs_config::datastore::config()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@ -52,7 +45,7 @@ pub fn datastore_status(
for (store, (_, _)) in &config.sections { for (store, (_, _)) in &config.sections {
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]); let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0; let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
if !allowed { if !allowed {
continue; continue;
} }
@ -90,12 +83,8 @@ pub fn datastore_status(
let rrd_dir = format!("datastore/{}", store); let rrd_dir = format!("datastore/{}", store);
let get_rrd = |what: &str| extract_rrd_data( let get_rrd =
&rrd_dir, |what: &str| extract_rrd_data(&rrd_dir, what, RRDTimeFrame::Month, RRDMode::Average);
what,
RRDTimeFrame::Month,
RRDMode::Average,
);
let total_res = get_rrd("total")?; let total_res = get_rrd("total")?;
let used_res = get_rrd("used")?; let used_res = get_rrd("used")?;
@ -114,14 +103,12 @@ pub fn datastore_status(
match (total, used) { match (total, used) {
(Some(total), Some(used)) if total != 0.0 => { (Some(total), Some(used)) if total != 0.0 => {
time_list.push(start + (idx as u64)*reso); time_list.push(start + (idx as u64) * reso);
let usage = used/total; let usage = used / total;
usage_list.push(usage); usage_list.push(usage);
history.push(Some(usage)); history.push(Some(usage));
},
_ => {
history.push(None)
} }
_ => history.push(None),
} }
} }
@ -145,9 +132,10 @@ pub fn datastore_status(
Ok(list.into()) Ok(list.into())
} }
const SUBDIRS: SubdirMap = &[ const SUBDIRS: SubdirMap = &[(
("datastore-usage", &Router::new().get(&API_METHOD_DATASTORE_STATUS)), "datastore-usage",
]; &Router::new().get(&API_METHOD_DATASTORE_STATUS),
)];
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SUBDIRS)) .get(&list_subdirs_api_method!(SUBDIRS))

View File

@ -1,11 +1,9 @@
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json::Value; use serde_json::Value;
use proxmox_schema::{api, ApiType, Schema, StringSchema, ApiStringFormat}; use proxmox_schema::{api, ApiStringFormat, ApiType, Schema, StringSchema};
use pbs_api_types::{ use pbs_api_types::{DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, PROXMOX_SAFE_ID_FORMAT};
DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, PROXMOX_SAFE_ID_FORMAT,
};
#[api( #[api(
properties: { properties: {
@ -41,10 +39,10 @@ pub struct AcmeDomain {
pub plugin: Option<String>, pub plugin: Option<String>,
} }
pub const ACME_DOMAIN_PROPERTY_SCHEMA: Schema = StringSchema::new( pub const ACME_DOMAIN_PROPERTY_SCHEMA: Schema =
"ACME domain configuration string") StringSchema::new("ACME domain configuration string")
.format(&ApiStringFormat::PropertyString(&AcmeDomain::API_SCHEMA)) .format(&ApiStringFormat::PropertyString(&AcmeDomain::API_SCHEMA))
.schema(); .schema();
#[api( #[api(
properties: { properties: {

View File

@ -21,12 +21,10 @@ pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
Ok(()) Ok(())
}); });
// Regression tests // Regression tests
#[test] #[test]
fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> { fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
let schema = pbs_api_types::CERT_FINGERPRINT_SHA256_SCHEMA; let schema = pbs_api_types::CERT_FINGERPRINT_SHA256_SCHEMA;
let invalid_fingerprints = [ let invalid_fingerprints = [
@ -40,7 +38,10 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
for fingerprint in invalid_fingerprints.iter() { for fingerprint in invalid_fingerprints.iter() {
if schema.parse_simple_value(fingerprint).is_ok() { if schema.parse_simple_value(fingerprint).is_ok() {
bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint); bail!(
"test fingerprint '{}' failed - got Ok() while exception an error.",
fingerprint
);
} }
} }
@ -58,7 +59,11 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
}; };
if v != serde_json::json!(fingerprint) { if v != serde_json::json!(fingerprint) {
bail!("unable to parse fingerprint '{}' - got wrong value {:?}", fingerprint, v); bail!(
"unable to parse fingerprint '{}' - got wrong value {:?}",
fingerprint,
v
);
} }
} }
@ -67,24 +72,26 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
#[test] #[test]
fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> { fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
use pbs_api_types::Userid; use pbs_api_types::Userid;
let invalid_user_ids = [ let invalid_user_ids = [
"x", // too short "x", // too short
"xx", // too short "xx", // too short
"xxx", // no realm "xxx", // no realm
"xxx@", // no realm "xxx@", // no realm
"xx x@test", // contains space "xx x@test", // contains space
"xx\nx@test", // contains control character "xx\nx@test", // contains control character
"x:xx@test", // contains collon "x:xx@test", // contains collon
"xx/x@test", // contains slash "xx/x@test", // contains slash
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@test", // too long "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@test", // too long
]; ];
for name in invalid_user_ids.iter() { for name in invalid_user_ids.iter() {
if Userid::API_SCHEMA.parse_simple_value(name).is_ok() { if Userid::API_SCHEMA.parse_simple_value(name).is_ok() {
bail!("test userid '{}' failed - got Ok() while exception an error.", name); bail!(
"test userid '{}' failed - got Ok() while exception an error.",
name
);
} }
} }
@ -105,7 +112,11 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
}; };
if v != serde_json::json!(name) { if v != serde_json::json!(name) {
bail!("unable to parse userid '{}' - got wrong value {:?}", name, v); bail!(
"unable to parse userid '{}' - got wrong value {:?}",
name,
v
);
} }
} }
@ -139,7 +150,7 @@ pub struct NodeSwapCounters {
} }
#[api] #[api]
#[derive(Serialize,Deserialize,Default)] #[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Contains general node information such as the fingerprint` /// Contains general node information such as the fingerprint`
pub struct NodeInformation { pub struct NodeInformation {
@ -207,13 +218,13 @@ pub struct NodeStatus {
pub info: NodeInformation, pub info: NodeInformation,
} }
pub const HTTP_PROXY_SCHEMA: Schema = StringSchema::new( pub const HTTP_PROXY_SCHEMA: Schema =
"HTTP proxy configuration [http://]<host>[:port]") StringSchema::new("HTTP proxy configuration [http://]<host>[:port]")
.format(&ApiStringFormat::VerifyFn(|s| { .format(&ApiStringFormat::VerifyFn(|s| {
proxmox_http::ProxyConfig::parse_proxy_url(s)?; proxmox_http::ProxyConfig::parse_proxy_url(s)?;
Ok(()) Ok(())
})) }))
.min_length(1) .min_length(1)
.max_length(128) .max_length(128)
.type_text("[http://]<host>[:port]") .type_text("[http://]<host>[:port]")
.schema(); .schema();

View File

@ -1,9 +1,9 @@
//! Version information //! Version information
use anyhow::{Error}; use anyhow::Error;
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_router::{ApiHandler, ApiMethod, Router, RpcEnvironment, Permission}; use proxmox_router::{ApiHandler, ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::ObjectSchema; use proxmox_schema::ObjectSchema;
fn get_version( fn get_version(
@ -11,7 +11,6 @@ fn get_version(
_info: &ApiMethod, _info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment, _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
Ok(json!({ Ok(json!({
"version": pbs_buildcfg::PROXMOX_PKG_VERSION, "version": pbs_buildcfg::PROXMOX_PKG_VERSION,
"release": pbs_buildcfg::PROXMOX_PKG_RELEASE, "release": pbs_buildcfg::PROXMOX_PKG_RELEASE,
@ -19,11 +18,10 @@ fn get_version(
})) }))
} }
pub const ROUTER: Router = Router::new() pub const ROUTER: Router = Router::new().get(
.get( &ApiMethod::new(
&ApiMethod::new( &ApiHandler::Sync(&get_version),
&ApiHandler::Sync(&get_version), &ObjectSchema::new("Proxmox Backup Server API version.", &[]),
&ObjectSchema::new("Proxmox Backup Server API version.", &[]) )
).access(None, &Permission::Anybody) .access(None, &Permission::Anybody),
); );