| @ -1,12 +1,12 @@ | ||||
| use anyhow::{Error, bail, format_err}; | ||||
| use anyhow::{bail, format_err, Error}; | ||||
| use serde_json::{json, Value}; | ||||
| use std::collections::HashMap; | ||||
|  | ||||
| use proxmox_sys::fs::{replace_file, CreateOptions}; | ||||
| use proxmox_router::{ | ||||
|     list_subdirs_api_method, RpcEnvironment, RpcEnvironmentType, Permission, Router, SubdirMap | ||||
|     list_subdirs_api_method, Permission, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap, | ||||
| }; | ||||
| use proxmox_schema::api; | ||||
| use proxmox_sys::fs::{replace_file, CreateOptions}; | ||||
|  | ||||
| use proxmox_apt::repositories::{ | ||||
|     APTRepositoryFile, APTRepositoryFileError, APTRepositoryHandle, APTRepositoryInfo, | ||||
| @ -15,17 +15,13 @@ use proxmox_apt::repositories::{ | ||||
| use proxmox_http::ProxyConfig; | ||||
|  | ||||
| use pbs_api_types::{ | ||||
|     APTUpdateInfo, NODE_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA, | ||||
|     PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, | ||||
|     APTUpdateInfo, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, | ||||
|     UPID_SCHEMA, | ||||
| }; | ||||
|  | ||||
| use crate::config::node; | ||||
| use crate::tools::{apt, pbs_simple_http, subscription}; | ||||
| use proxmox_rest_server::WorkerTask; | ||||
| use crate::tools::{ | ||||
|     apt, | ||||
|     pbs_simple_http, | ||||
|     subscription, | ||||
| }; | ||||
|  | ||||
| #[api( | ||||
|     input: { | ||||
| @ -49,7 +45,6 @@ use crate::tools::{ | ||||
| )] | ||||
| /// List available APT updates | ||||
| fn apt_update_available(_param: Value) -> Result<Value, Error> { | ||||
|  | ||||
|     if let Ok(false) = apt::pkg_cache_expired() { | ||||
|         if let Ok(Some(cache)) = apt::read_pkg_state() { | ||||
|             return Ok(json!(cache.package_status)); | ||||
| @ -62,7 +57,6 @@ fn apt_update_available(_param: Value) -> Result<Value, Error> { | ||||
| } | ||||
|  | ||||
| pub fn update_apt_proxy_config(proxy_config: Option<&ProxyConfig>) -> Result<(), Error> { | ||||
|  | ||||
|     const PROXY_CFG_FN: &str = "/etc/apt/apt.conf.d/76pveproxy"; // use same file as PVE | ||||
|  | ||||
|     if let Some(proxy_config) = proxy_config { | ||||
| @ -90,7 +84,9 @@ fn read_and_update_proxy_config() -> Result<Option<ProxyConfig>, Error> { | ||||
| } | ||||
|  | ||||
| fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> { | ||||
|     if !quiet { worker.log_message("starting apt-get update") } | ||||
|     if !quiet { | ||||
|         worker.log_message("starting apt-get update") | ||||
|     } | ||||
|  | ||||
|     read_and_update_proxy_config()?; | ||||
|  | ||||
| @ -98,7 +94,8 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> { | ||||
|     command.arg("update"); | ||||
|  | ||||
|     // apt "errors" quite easily, and run_command is a bit rigid, so handle this inline for now. | ||||
|     let output = command.output() | ||||
|     let output = command | ||||
|         .output() | ||||
|         .map_err(|err| format_err!("failed to execute {:?} - {}", command, err))?; | ||||
|  | ||||
|     if !quiet { | ||||
| @ -109,7 +106,13 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> { | ||||
|     if !output.status.success() { | ||||
|         if output.status.code().is_some() { | ||||
|             let msg = String::from_utf8(output.stderr) | ||||
|                 .map(|m| if m.is_empty() { String::from("no error message") } else { m }) | ||||
|                 .map(|m| { | ||||
|                     if m.is_empty() { | ||||
|                         String::from("no error message") | ||||
|                     } else { | ||||
|                         m | ||||
|                     } | ||||
|                 }) | ||||
|                 .unwrap_or_else(|_| String::from("non utf8 error message (suppressed)")); | ||||
|             worker.log_warning(msg); | ||||
|         } else { | ||||
| @ -154,7 +157,6 @@ pub fn apt_update_database( | ||||
|     quiet: bool, | ||||
|     rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<String, Error> { | ||||
|  | ||||
|     let auth_id = rpcenv.get_auth_id().unwrap(); | ||||
|     let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; | ||||
|  | ||||
| @ -176,7 +178,7 @@ pub fn apt_update_database( | ||||
|                         if notified_version != pkg.version { | ||||
|                             to_notify.push(pkg); | ||||
|                         } | ||||
|                     }, | ||||
|                     } | ||||
|                     None => to_notify.push(pkg), | ||||
|                 } | ||||
|             } | ||||
| @ -220,19 +222,17 @@ pub fn apt_update_database( | ||||
|     }, | ||||
| )] | ||||
| /// Retrieve the changelog of the specified package. | ||||
| fn apt_get_changelog( | ||||
|     param: Value, | ||||
| ) -> Result<Value, Error> { | ||||
|  | ||||
| fn apt_get_changelog(param: Value) -> Result<Value, Error> { | ||||
|     let name = pbs_tools::json::required_string_param(¶m, "name")?.to_owned(); | ||||
|     let version = param["version"].as_str(); | ||||
|  | ||||
|     let pkg_info = apt::list_installed_apt_packages(|data| { | ||||
|         match version { | ||||
|     let pkg_info = apt::list_installed_apt_packages( | ||||
|         |data| match version { | ||||
|             Some(version) => version == data.active_version, | ||||
|             None => data.active_version == data.candidate_version | ||||
|         } | ||||
|     }, Some(&name)); | ||||
|             None => data.active_version == data.candidate_version, | ||||
|         }, | ||||
|         Some(&name), | ||||
|     ); | ||||
|  | ||||
|     if pkg_info.is_empty() { | ||||
|         bail!("Package '{}' not found", name); | ||||
| @ -245,33 +245,47 @@ fn apt_get_changelog( | ||||
|     // FIXME: use 'apt-get changelog' for proxmox packages as well, once repo supports it | ||||
|     if changelog_url.starts_with("http://download.proxmox.com/") { | ||||
|         let changelog = proxmox_async::runtime::block_on(client.get_string(changelog_url, None)) | ||||
|             .map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?; | ||||
|             .map_err(|err| { | ||||
|                 format_err!( | ||||
|                     "Error downloading changelog from '{}': {}", | ||||
|                     changelog_url, | ||||
|                     err | ||||
|                 ) | ||||
|             })?; | ||||
|         Ok(json!(changelog)) | ||||
|  | ||||
|     } else if changelog_url.starts_with("https://enterprise.proxmox.com/") { | ||||
|         let sub = match subscription::read_subscription()? { | ||||
|             Some(sub) => sub, | ||||
|             None => bail!("cannot retrieve changelog from enterprise repo: no subscription info found") | ||||
|             None => { | ||||
|                 bail!("cannot retrieve changelog from enterprise repo: no subscription info found") | ||||
|             } | ||||
|         }; | ||||
|         let (key, id) = match sub.key { | ||||
|             Some(key) => { | ||||
|                 match sub.serverid { | ||||
|                     Some(id) => (key, id), | ||||
|                     None => | ||||
|                         bail!("cannot retrieve changelog from enterprise repo: no server id found") | ||||
|                 } | ||||
|             Some(key) => match sub.serverid { | ||||
|                 Some(id) => (key, id), | ||||
|                 None => bail!("cannot retrieve changelog from enterprise repo: no server id found"), | ||||
|             }, | ||||
|             None => bail!("cannot retrieve changelog from enterprise repo: no subscription key found") | ||||
|             None => { | ||||
|                 bail!("cannot retrieve changelog from enterprise repo: no subscription key found") | ||||
|             } | ||||
|         }; | ||||
|  | ||||
|         let mut auth_header = HashMap::new(); | ||||
|         auth_header.insert("Authorization".to_owned(), | ||||
|             format!("Basic {}", base64::encode(format!("{}:{}", key, id)))); | ||||
|         auth_header.insert( | ||||
|             "Authorization".to_owned(), | ||||
|             format!("Basic {}", base64::encode(format!("{}:{}", key, id))), | ||||
|         ); | ||||
|  | ||||
|         let changelog = proxmox_async::runtime::block_on(client.get_string(changelog_url, Some(&auth_header))) | ||||
|             .map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?; | ||||
|         let changelog = | ||||
|             proxmox_async::runtime::block_on(client.get_string(changelog_url, Some(&auth_header))) | ||||
|                 .map_err(|err| { | ||||
|                     format_err!( | ||||
|                         "Error downloading changelog from '{}': {}", | ||||
|                         changelog_url, | ||||
|                         err | ||||
|                     ) | ||||
|                 })?; | ||||
|         Ok(json!(changelog)) | ||||
|  | ||||
|     } else { | ||||
|         let mut command = std::process::Command::new("apt-get"); | ||||
|         command.arg("changelog"); | ||||
| @ -348,23 +362,35 @@ pub fn get_versions() -> Result<Vec<APTUpdateInfo>, Error> { | ||||
|         "running kernel: {}", | ||||
|         nix::sys::utsname::uname().release().to_owned() | ||||
|     ); | ||||
|     if let Some(proxmox_backup) = pbs_packages.iter().find(|pkg| pkg.package == "proxmox-backup") { | ||||
|     if let Some(proxmox_backup) = pbs_packages | ||||
|         .iter() | ||||
|         .find(|pkg| pkg.package == "proxmox-backup") | ||||
|     { | ||||
|         let mut proxmox_backup = proxmox_backup.clone(); | ||||
|         proxmox_backup.extra_info = Some(running_kernel); | ||||
|         packages.push(proxmox_backup); | ||||
|     } else { | ||||
|         packages.push(unknown_package("proxmox-backup".into(), Some(running_kernel))); | ||||
|         packages.push(unknown_package( | ||||
|             "proxmox-backup".into(), | ||||
|             Some(running_kernel), | ||||
|         )); | ||||
|     } | ||||
|  | ||||
|     let version = pbs_buildcfg::PROXMOX_PKG_VERSION; | ||||
|     let release = pbs_buildcfg::PROXMOX_PKG_RELEASE; | ||||
|     let daemon_version_info = Some(format!("running version: {}.{}", version, release)); | ||||
|     if let Some(pkg) = pbs_packages.iter().find(|pkg| pkg.package == "proxmox-backup-server") { | ||||
|     if let Some(pkg) = pbs_packages | ||||
|         .iter() | ||||
|         .find(|pkg| pkg.package == "proxmox-backup-server") | ||||
|     { | ||||
|         let mut pkg = pkg.clone(); | ||||
|         pkg.extra_info = daemon_version_info; | ||||
|         packages.push(pkg); | ||||
|     } else { | ||||
|         packages.push(unknown_package("proxmox-backup".into(), daemon_version_info)); | ||||
|         packages.push(unknown_package( | ||||
|             "proxmox-backup".into(), | ||||
|             daemon_version_info, | ||||
|         )); | ||||
|     } | ||||
|  | ||||
|     let mut kernel_pkgs: Vec<APTUpdateInfo> = pbs_packages | ||||
| @ -609,15 +635,22 @@ pub fn change_repository( | ||||
| } | ||||
|  | ||||
| const SUBDIRS: SubdirMap = &[ | ||||
|     ("changelog", &Router::new().get(&API_METHOD_APT_GET_CHANGELOG)), | ||||
|     ("repositories", &Router::new() | ||||
|         .get(&API_METHOD_GET_REPOSITORIES) | ||||
|         .post(&API_METHOD_CHANGE_REPOSITORY) | ||||
|         .put(&API_METHOD_ADD_REPOSITORY) | ||||
|     ( | ||||
|         "changelog", | ||||
|         &Router::new().get(&API_METHOD_APT_GET_CHANGELOG), | ||||
|     ), | ||||
|     ("update", &Router::new() | ||||
|         .get(&API_METHOD_APT_UPDATE_AVAILABLE) | ||||
|         .post(&API_METHOD_APT_UPDATE_DATABASE) | ||||
|     ( | ||||
|         "repositories", | ||||
|         &Router::new() | ||||
|             .get(&API_METHOD_GET_REPOSITORIES) | ||||
|             .post(&API_METHOD_CHANGE_REPOSITORY) | ||||
|             .put(&API_METHOD_ADD_REPOSITORY), | ||||
|     ), | ||||
|     ( | ||||
|         "update", | ||||
|         &Router::new() | ||||
|             .get(&API_METHOD_APT_UPDATE_AVAILABLE) | ||||
|             .post(&API_METHOD_APT_UPDATE_DATABASE), | ||||
|     ), | ||||
|     ("versions", &Router::new().get(&API_METHOD_GET_VERSIONS)), | ||||
| ]; | ||||
|  | ||||
| @ -7,9 +7,9 @@ use openssl::pkey::PKey; | ||||
| use openssl::x509::X509; | ||||
| use serde::{Deserialize, Serialize}; | ||||
|  | ||||
| use proxmox_router::list_subdirs_api_method; | ||||
| use proxmox_router::SubdirMap; | ||||
| use proxmox_router::{Permission, Router, RpcEnvironment}; | ||||
| use proxmox_router::list_subdirs_api_method; | ||||
| use proxmox_schema::api; | ||||
| use proxmox_sys::{task_log, task_warn}; | ||||
|  | ||||
| @ -305,7 +305,10 @@ async fn order_certificate( | ||||
|     }; | ||||
|  | ||||
|     if domains.is_empty() { | ||||
|         task_log!(worker, "No domains configured to be ordered from an ACME server."); | ||||
|         task_log!( | ||||
|             worker, | ||||
|             "No domains configured to be ordered from an ACME server." | ||||
|         ); | ||||
|         return Ok(None); | ||||
|     } | ||||
|  | ||||
| @ -363,7 +366,9 @@ async fn order_certificate( | ||||
|             task_warn!( | ||||
|                 worker, | ||||
|                 "Failed to teardown plugin '{}' for domain '{}' - {}", | ||||
|                 plugin_id, domain, err | ||||
|                 plugin_id, | ||||
|                 domain, | ||||
|                 err | ||||
|             ); | ||||
|         } | ||||
|  | ||||
| @ -453,7 +458,10 @@ async fn request_validation( | ||||
|         let auth = acme.get_authorization(auth_url).await?; | ||||
|         match auth.status { | ||||
|             Status::Pending => { | ||||
|                 task_log!(worker, "Status is still 'pending', trying again in 10 seconds"); | ||||
|                 task_log!( | ||||
|                     worker, | ||||
|                     "Status is still 'pending', trying again in 10 seconds" | ||||
|                 ); | ||||
|                 tokio::time::sleep(Duration::from_secs(10)).await; | ||||
|             } | ||||
|             Status::Valid => return Ok(()), | ||||
| @ -574,7 +582,10 @@ pub fn revoke_acme_cert(rpcenv: &mut dyn RpcEnvironment) -> Result<String, Error | ||||
|             let mut acme = node_config.acme_client().await?; | ||||
|             task_log!(worker, "Revoking old certificate"); | ||||
|             acme.revoke_certificate(cert_pem.as_bytes(), None).await?; | ||||
|             task_log!(worker, "Deleting certificate and regenerating a self-signed one"); | ||||
|             task_log!( | ||||
|                 worker, | ||||
|                 "Deleting certificate and regenerating a self-signed one" | ||||
|             ); | ||||
|             delete_custom_certificate().await?; | ||||
|             Ok(()) | ||||
|         }, | ||||
|  | ||||
| @ -1,5 +1,5 @@ | ||||
| use anyhow::Error; | ||||
| use ::serde::{Deserialize, Serialize}; | ||||
| use anyhow::Error; | ||||
| use hex::FromHex; | ||||
|  | ||||
| use proxmox_router::{Permission, Router, RpcEnvironment}; | ||||
| @ -36,7 +36,7 @@ pub fn get_node_config(mut rpcenv: &mut dyn RpcEnvironment) -> Result<NodeConfig | ||||
|  | ||||
| #[api()] | ||||
| #[derive(Serialize, Deserialize)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| #[serde(rename_all = "kebab-case")] | ||||
| #[allow(non_camel_case_types)] | ||||
| /// Deletable property name | ||||
| pub enum DeletableProperty { | ||||
| @ -57,10 +57,10 @@ pub enum DeletableProperty { | ||||
|     /// Delete the email-from property. | ||||
|     email_from, | ||||
|     /// Delete the ciphers-tls-1.3 property. | ||||
|     #[serde(rename="ciphers-tls-1.3")] | ||||
|     #[serde(rename = "ciphers-tls-1.3")] | ||||
|     ciphers_tls_1_3, | ||||
|     /// Delete the ciphers-tls-1.2 property. | ||||
|     #[serde(rename="ciphers-tls-1.2")] | ||||
|     #[serde(rename = "ciphers-tls-1.2")] | ||||
|     ciphers_tls_1_2, | ||||
|     /// Delete the default-lang property. | ||||
|     default_lang, | ||||
| @ -117,36 +117,88 @@ pub fn update_node_config( | ||||
|     if let Some(delete) = delete { | ||||
|         for delete_prop in delete { | ||||
|             match delete_prop { | ||||
|                 DeletableProperty::acme => { config.acme = None; }, | ||||
|                 DeletableProperty::acmedomain0 => { config.acmedomain0 = None; }, | ||||
|                 DeletableProperty::acmedomain1 => { config.acmedomain1 = None; }, | ||||
|                 DeletableProperty::acmedomain2 => { config.acmedomain2 = None; }, | ||||
|                 DeletableProperty::acmedomain3 => { config.acmedomain3 = None; }, | ||||
|                 DeletableProperty::acmedomain4 => { config.acmedomain4 = None; }, | ||||
|                 DeletableProperty::http_proxy => { config.http_proxy = None; }, | ||||
|                 DeletableProperty::email_from => { config.email_from = None; }, | ||||
|                 DeletableProperty::ciphers_tls_1_3 => { config.ciphers_tls_1_3 = None; }, | ||||
|                 DeletableProperty::ciphers_tls_1_2 => { config.ciphers_tls_1_2 = None; }, | ||||
|                 DeletableProperty::default_lang => { config.default_lang = None; }, | ||||
|                 DeletableProperty::description => { config.description = None; }, | ||||
|                 DeletableProperty::task_log_max_days => { config.task_log_max_days = None; }, | ||||
|                 DeletableProperty::acme => { | ||||
|                     config.acme = None; | ||||
|                 } | ||||
|                 DeletableProperty::acmedomain0 => { | ||||
|                     config.acmedomain0 = None; | ||||
|                 } | ||||
|                 DeletableProperty::acmedomain1 => { | ||||
|                     config.acmedomain1 = None; | ||||
|                 } | ||||
|                 DeletableProperty::acmedomain2 => { | ||||
|                     config.acmedomain2 = None; | ||||
|                 } | ||||
|                 DeletableProperty::acmedomain3 => { | ||||
|                     config.acmedomain3 = None; | ||||
|                 } | ||||
|                 DeletableProperty::acmedomain4 => { | ||||
|                     config.acmedomain4 = None; | ||||
|                 } | ||||
|                 DeletableProperty::http_proxy => { | ||||
|                     config.http_proxy = None; | ||||
|                 } | ||||
|                 DeletableProperty::email_from => { | ||||
|                     config.email_from = None; | ||||
|                 } | ||||
|                 DeletableProperty::ciphers_tls_1_3 => { | ||||
|                     config.ciphers_tls_1_3 = None; | ||||
|                 } | ||||
|                 DeletableProperty::ciphers_tls_1_2 => { | ||||
|                     config.ciphers_tls_1_2 = None; | ||||
|                 } | ||||
|                 DeletableProperty::default_lang => { | ||||
|                     config.default_lang = None; | ||||
|                 } | ||||
|                 DeletableProperty::description => { | ||||
|                     config.description = None; | ||||
|                 } | ||||
|                 DeletableProperty::task_log_max_days => { | ||||
|                     config.task_log_max_days = None; | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     if update.acme.is_some() { config.acme = update.acme; } | ||||
|     if update.acmedomain0.is_some() { config.acmedomain0 = update.acmedomain0; } | ||||
|     if update.acmedomain1.is_some() { config.acmedomain1 = update.acmedomain1; } | ||||
|     if update.acmedomain2.is_some() { config.acmedomain2 = update.acmedomain2; } | ||||
|     if update.acmedomain3.is_some() { config.acmedomain3 = update.acmedomain3; } | ||||
|     if update.acmedomain4.is_some() { config.acmedomain4 = update.acmedomain4; } | ||||
|     if update.http_proxy.is_some() { config.http_proxy = update.http_proxy; } | ||||
|     if update.email_from.is_some() { config.email_from = update.email_from; } | ||||
|     if update.ciphers_tls_1_3.is_some() { config.ciphers_tls_1_3 = update.ciphers_tls_1_3; } | ||||
|     if update.ciphers_tls_1_2.is_some() { config.ciphers_tls_1_2 = update.ciphers_tls_1_2; } | ||||
|     if update.default_lang.is_some() { config.default_lang = update.default_lang; } | ||||
|     if update.description.is_some() { config.description = update.description; } | ||||
|     if update.task_log_max_days.is_some() { config.task_log_max_days = update.task_log_max_days; } | ||||
|     if update.acme.is_some() { | ||||
|         config.acme = update.acme; | ||||
|     } | ||||
|     if update.acmedomain0.is_some() { | ||||
|         config.acmedomain0 = update.acmedomain0; | ||||
|     } | ||||
|     if update.acmedomain1.is_some() { | ||||
|         config.acmedomain1 = update.acmedomain1; | ||||
|     } | ||||
|     if update.acmedomain2.is_some() { | ||||
|         config.acmedomain2 = update.acmedomain2; | ||||
|     } | ||||
|     if update.acmedomain3.is_some() { | ||||
|         config.acmedomain3 = update.acmedomain3; | ||||
|     } | ||||
|     if update.acmedomain4.is_some() { | ||||
|         config.acmedomain4 = update.acmedomain4; | ||||
|     } | ||||
|     if update.http_proxy.is_some() { | ||||
|         config.http_proxy = update.http_proxy; | ||||
|     } | ||||
|     if update.email_from.is_some() { | ||||
|         config.email_from = update.email_from; | ||||
|     } | ||||
|     if update.ciphers_tls_1_3.is_some() { | ||||
|         config.ciphers_tls_1_3 = update.ciphers_tls_1_3; | ||||
|     } | ||||
|     if update.ciphers_tls_1_2.is_some() { | ||||
|         config.ciphers_tls_1_2 = update.ciphers_tls_1_2; | ||||
|     } | ||||
|     if update.default_lang.is_some() { | ||||
|         config.default_lang = update.default_lang; | ||||
|     } | ||||
|     if update.description.is_some() { | ||||
|         config.description = update.description; | ||||
|     } | ||||
|     if update.task_log_max_days.is_some() { | ||||
|         config.task_log_max_days = update.task_log_max_days; | ||||
|     } | ||||
|  | ||||
|     crate::config::node::save_config(&config)?; | ||||
|  | ||||
|  | ||||
| @ -1,20 +1,20 @@ | ||||
| use ::serde::{Deserialize, Serialize}; | ||||
| use anyhow::{bail, Error}; | ||||
| use serde_json::json; | ||||
| use ::serde::{Deserialize, Serialize}; | ||||
|  | ||||
| use proxmox_router::{Router, RpcEnvironment, RpcEnvironmentType, Permission}; | ||||
| use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType}; | ||||
| use proxmox_schema::api; | ||||
| use proxmox_section_config::SectionConfigData; | ||||
| use proxmox_sys::task_log; | ||||
|  | ||||
| use pbs_api_types::{ | ||||
|     DataStoreConfig, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA, | ||||
|     DATASTORE_SCHEMA, UPID_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, | ||||
|     DataStoreConfig, BLOCKDEVICE_NAME_SCHEMA, DATASTORE_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT, | ||||
|     PRIV_SYS_MODIFY, UPID_SCHEMA, | ||||
| }; | ||||
|  | ||||
| use crate::tools::disks::{ | ||||
|     DiskManage, FileSystemType, DiskUsageType, | ||||
|     create_file_system, create_single_linux_partition, get_fs_uuid, get_disk_usage_info, | ||||
|     create_file_system, create_single_linux_partition, get_disk_usage_info, get_fs_uuid, | ||||
|     DiskManage, DiskUsageType, FileSystemType, | ||||
| }; | ||||
| use crate::tools::systemd::{self, types::*}; | ||||
|  | ||||
| @ -31,7 +31,7 @@ const BASE_MOUNT_DIR: &str = "/mnt/datastore/"; | ||||
|     }, | ||||
| )] | ||||
| #[derive(Debug, Serialize, Deserialize)] | ||||
| #[serde(rename_all="kebab-case")] | ||||
| #[serde(rename_all = "kebab-case")] | ||||
| /// Datastore mount info. | ||||
| pub struct DatastoreMountInfo { | ||||
|     /// The path of the mount unit. | ||||
| @ -69,8 +69,7 @@ pub struct DatastoreMountInfo { | ||||
|     }, | ||||
| )] | ||||
| /// List systemd datastore mount units. | ||||
| pub fn  list_datastore_mounts() -> Result<Vec<DatastoreMountInfo>, Error> { | ||||
|  | ||||
| pub fn list_datastore_mounts() -> Result<Vec<DatastoreMountInfo>, Error> { | ||||
|     lazy_static::lazy_static! { | ||||
|         static ref MOUNT_NAME_REGEX: regex::Regex = regex::Regex::new(r"^mnt-datastore-(.+)\.mount$").unwrap(); | ||||
|     } | ||||
| @ -144,7 +143,6 @@ pub fn create_datastore_disk( | ||||
|     filesystem: Option<FileSystemType>, | ||||
|     rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<String, Error> { | ||||
|  | ||||
|     let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; | ||||
|  | ||||
|     let auth_id = rpcenv.get_auth_id().unwrap(); | ||||
| @ -161,15 +159,18 @@ pub fn create_datastore_disk( | ||||
|     let default_path = std::path::PathBuf::from(&mount_point); | ||||
|  | ||||
|     match std::fs::metadata(&default_path) { | ||||
|         Err(_) => {}, // path does not exist | ||||
|         Err(_) => {} // path does not exist | ||||
|         Ok(_) => { | ||||
|             bail!("path {:?} already exists", default_path); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     let upid_str = WorkerTask::new_thread( | ||||
|         "dircreate", Some(name.clone()), auth_id, to_stdout, move |worker| | ||||
|         { | ||||
|         "dircreate", | ||||
|         Some(name.clone()), | ||||
|         auth_id, | ||||
|         to_stdout, | ||||
|         move |worker| { | ||||
|             task_log!(worker, "create datastore '{}' on disk {}", name, disk); | ||||
|  | ||||
|             let add_datastore = add_datastore.unwrap_or(false); | ||||
| @ -185,7 +186,8 @@ pub fn create_datastore_disk( | ||||
|             let uuid = get_fs_uuid(&partition)?; | ||||
|             let uuid_path = format!("/dev/disk/by-uuid/{}", uuid); | ||||
|  | ||||
|             let mount_unit_name = create_datastore_mount_unit(&name, &mount_point, filesystem, &uuid_path)?; | ||||
|             let mount_unit_name = | ||||
|                 create_datastore_mount_unit(&name, &mount_point, filesystem, &uuid_path)?; | ||||
|  | ||||
|             crate::tools::systemd::reload_daemon()?; | ||||
|             crate::tools::systemd::enable_unit(&mount_unit_name)?; | ||||
| @ -202,11 +204,17 @@ pub fn create_datastore_disk( | ||||
|                     bail!("datastore '{}' already exists.", datastore.name); | ||||
|                 } | ||||
|  | ||||
|                 crate::api2::config::datastore::do_create_datastore(lock, config, datastore, Some(&worker))?; | ||||
|                 crate::api2::config::datastore::do_create_datastore( | ||||
|                     lock, | ||||
|                     config, | ||||
|                     datastore, | ||||
|                     Some(&worker), | ||||
|                 )?; | ||||
|             } | ||||
|  | ||||
|             Ok(()) | ||||
|         })?; | ||||
|         }, | ||||
|     )?; | ||||
|  | ||||
|     Ok(upid_str) | ||||
| } | ||||
| @ -229,17 +237,19 @@ pub fn create_datastore_disk( | ||||
| )] | ||||
| /// Remove a Filesystem mounted under '/mnt/datastore/<name>'.". | ||||
| pub fn delete_datastore_disk(name: String) -> Result<(), Error> { | ||||
|  | ||||
|     let path = format!("{}{}", BASE_MOUNT_DIR, name); | ||||
|     // path of datastore cannot be changed | ||||
|     let (config, _) = pbs_config::datastore::config()?; | ||||
|     let datastores: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?; | ||||
|     let conflicting_datastore: Option<DataStoreConfig> = datastores.into_iter() | ||||
|         .find(|ds| ds.path == path); | ||||
|     let conflicting_datastore: Option<DataStoreConfig> = | ||||
|         datastores.into_iter().find(|ds| ds.path == path); | ||||
|  | ||||
|     if let Some(conflicting_datastore) = conflicting_datastore { | ||||
|         bail!("Can't remove '{}' since it's required by datastore '{}'", | ||||
|               conflicting_datastore.path, conflicting_datastore.name); | ||||
|         bail!( | ||||
|             "Can't remove '{}' since it's required by datastore '{}'", | ||||
|             conflicting_datastore.path, | ||||
|             conflicting_datastore.name | ||||
|         ); | ||||
|     } | ||||
|  | ||||
|     // disable systemd mount-unit | ||||
| @ -262,33 +272,33 @@ pub fn delete_datastore_disk(name: String) -> Result<(), Error> { | ||||
|              until the next reboot or until unmounted manually!", | ||||
|             path | ||||
|         ), | ||||
|         Ok(_) => Ok(()) | ||||
|         Ok(_) => Ok(()), | ||||
|     } | ||||
| } | ||||
|  | ||||
| const ITEM_ROUTER: Router = Router::new() | ||||
|     .delete(&API_METHOD_DELETE_DATASTORE_DISK); | ||||
| const ITEM_ROUTER: Router = Router::new().delete(&API_METHOD_DELETE_DATASTORE_DISK); | ||||
|  | ||||
| pub const ROUTER: Router = Router::new() | ||||
|     .get(&API_METHOD_LIST_DATASTORE_MOUNTS) | ||||
|     .post(&API_METHOD_CREATE_DATASTORE_DISK) | ||||
|     .match_all("name", &ITEM_ROUTER); | ||||
|  | ||||
|  | ||||
| fn create_datastore_mount_unit( | ||||
|     datastore_name: &str, | ||||
|     mount_point: &str, | ||||
|     fs_type: FileSystemType, | ||||
|     what: &str, | ||||
| ) -> Result<String, Error> { | ||||
|  | ||||
|     let mut mount_unit_name = proxmox_sys::systemd::escape_unit(mount_point, true); | ||||
|     mount_unit_name.push_str(".mount"); | ||||
|  | ||||
|     let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name); | ||||
|  | ||||
|     let unit = SystemdUnitSection { | ||||
|         Description: format!("Mount datatstore '{}' under '{}'", datastore_name, mount_point), | ||||
|         Description: format!( | ||||
|             "Mount datatstore '{}' under '{}'", | ||||
|             datastore_name, mount_point | ||||
|         ), | ||||
|         ..Default::default() | ||||
|     }; | ||||
|  | ||||
|  | ||||
| @ -1,25 +1,22 @@ | ||||
| use anyhow::{bail, Error}; | ||||
| use serde_json::{json, Value}; | ||||
|  | ||||
| use proxmox_router::{Router, RpcEnvironment, RpcEnvironmentType, Permission}; | ||||
| use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType}; | ||||
| use proxmox_schema::api; | ||||
| use proxmox_sys::task_log; | ||||
|  | ||||
| use pbs_api_types::{ | ||||
|     ZpoolListItem, ZfsRaidLevel, ZfsCompressionType, DataStoreConfig, | ||||
|     NODE_SCHEMA, ZPOOL_NAME_SCHEMA, DATASTORE_SCHEMA, DISK_ARRAY_SCHEMA, | ||||
|     DISK_LIST_SCHEMA, ZFS_ASHIFT_SCHEMA, UPID_SCHEMA, | ||||
|     PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, | ||||
|     DataStoreConfig, ZfsCompressionType, ZfsRaidLevel, ZpoolListItem, DATASTORE_SCHEMA, | ||||
|     DISK_ARRAY_SCHEMA, DISK_LIST_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, UPID_SCHEMA, | ||||
|     ZFS_ASHIFT_SCHEMA, ZPOOL_NAME_SCHEMA, | ||||
| }; | ||||
|  | ||||
| use crate::tools::disks::{ | ||||
|     zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree, | ||||
|     DiskUsageType, | ||||
|     parse_zpool_status_config_tree, vdev_list_to_tree, zpool_list, zpool_status, DiskUsageType, | ||||
| }; | ||||
|  | ||||
| use proxmox_rest_server::WorkerTask; | ||||
|  | ||||
|  | ||||
| #[api( | ||||
|     protected: true, | ||||
|     input: { | ||||
| @ -42,7 +39,6 @@ use proxmox_rest_server::WorkerTask; | ||||
| )] | ||||
| /// List zfs pools. | ||||
| pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> { | ||||
|  | ||||
|     let data = zpool_list(None, false)?; | ||||
|  | ||||
|     let mut list = Vec::new(); | ||||
| @ -87,15 +83,12 @@ pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> { | ||||
|     }, | ||||
| )] | ||||
| /// Get zpool status details. | ||||
| pub fn zpool_details( | ||||
|     name: String, | ||||
| ) -> Result<Value, Error> { | ||||
|  | ||||
| pub fn zpool_details(name: String) -> Result<Value, Error> { | ||||
|     let key_value_list = zpool_status(&name)?; | ||||
|  | ||||
|     let config = match key_value_list.iter().find(|(k, _)| k == "config") { | ||||
|         Some((_, v)) => v, | ||||
|         None =>  bail!("got zpool status without config key"), | ||||
|         None => bail!("got zpool status without config key"), | ||||
|     }; | ||||
|  | ||||
|     let vdev_list = parse_zpool_status_config_tree(config)?; | ||||
| @ -107,11 +100,12 @@ pub fn zpool_details( | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     tree["name"] = tree.as_object_mut().unwrap() | ||||
|     tree["name"] = tree | ||||
|         .as_object_mut() | ||||
|         .unwrap() | ||||
|         .remove("pool") | ||||
|         .unwrap_or_else(|| name.into()); | ||||
|  | ||||
|  | ||||
|     Ok(tree) | ||||
| } | ||||
|  | ||||
| @ -163,7 +157,6 @@ pub fn create_zpool( | ||||
|     add_datastore: Option<bool>, | ||||
|     rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<String, Error> { | ||||
|  | ||||
|     let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; | ||||
|  | ||||
|     let auth_id = rpcenv.get_auth_id().unwrap(); | ||||
| @ -174,8 +167,12 @@ pub fn create_zpool( | ||||
|  | ||||
|     let devices_text = devices.clone(); | ||||
|     let devices = DISK_ARRAY_SCHEMA.parse_property_string(&devices)?; | ||||
|     let devices: Vec<String> = devices.as_array().unwrap().iter() | ||||
|         .map(|v| v.as_str().unwrap().to_string()).collect(); | ||||
|     let devices: Vec<String> = devices | ||||
|         .as_array() | ||||
|         .unwrap() | ||||
|         .iter() | ||||
|         .map(|v| v.as_str().unwrap().to_string()) | ||||
|         .collect(); | ||||
|  | ||||
|     let disk_map = crate::tools::disks::get_disks(None, true)?; | ||||
|     for disk in devices.iter() { | ||||
| @ -220,20 +217,35 @@ pub fn create_zpool( | ||||
|     let default_path = std::path::PathBuf::from(&mount_point); | ||||
|  | ||||
|     match std::fs::metadata(&default_path) { | ||||
|         Err(_) => {}, // path does not exist | ||||
|         Err(_) => {} // path does not exist | ||||
|         Ok(_) => { | ||||
|             bail!("path {:?} already exists", default_path); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|      let upid_str = WorkerTask::new_thread( | ||||
|         "zfscreate", Some(name.clone()), auth_id, to_stdout, move |worker| | ||||
|         { | ||||
|             task_log!(worker, "create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text); | ||||
|  | ||||
|     let upid_str = WorkerTask::new_thread( | ||||
|         "zfscreate", | ||||
|         Some(name.clone()), | ||||
|         auth_id, | ||||
|         to_stdout, | ||||
|         move |worker| { | ||||
|             task_log!( | ||||
|                 worker, | ||||
|                 "create {:?} zpool '{}' on devices '{}'", | ||||
|                 raidlevel, | ||||
|                 name, | ||||
|                 devices_text | ||||
|             ); | ||||
|  | ||||
|             let mut command = std::process::Command::new("zpool"); | ||||
|             command.args(&["create", "-o", &format!("ashift={}", ashift), "-m", &mount_point, &name]); | ||||
|             command.args(&[ | ||||
|                 "create", | ||||
|                 "-o", | ||||
|                 &format!("ashift={}", ashift), | ||||
|                 "-m", | ||||
|                 &mount_point, | ||||
|                 &name, | ||||
|             ]); | ||||
|  | ||||
|             match raidlevel { | ||||
|                 ZfsRaidLevel::Single => { | ||||
| @ -244,10 +256,10 @@ pub fn create_zpool( | ||||
|                     command.args(devices); | ||||
|                 } | ||||
|                 ZfsRaidLevel::Raid10 => { | ||||
|                      devices.chunks(2).for_each(|pair| { | ||||
|                          command.arg("mirror"); | ||||
|                          command.args(pair); | ||||
|                      }); | ||||
|                     devices.chunks(2).for_each(|pair| { | ||||
|                         command.arg("mirror"); | ||||
|                         command.args(pair); | ||||
|                     }); | ||||
|                 } | ||||
|                 ZfsRaidLevel::RaidZ => { | ||||
|                     command.arg("raidz"); | ||||
| @ -269,7 +281,10 @@ pub fn create_zpool( | ||||
|             task_log!(worker, "{}", output); | ||||
|  | ||||
|             if std::path::Path::new("/lib/systemd/system/zfs-import@.service").exists() { | ||||
|                 let import_unit = format!("zfs-import@{}.service", proxmox_sys::systemd::escape_unit(&name, false)); | ||||
|                 let import_unit = format!( | ||||
|                     "zfs-import@{}.service", | ||||
|                     proxmox_sys::systemd::escape_unit(&name, false) | ||||
|                 ); | ||||
|                 crate::tools::systemd::enable_unit(&import_unit)?; | ||||
|             } | ||||
|  | ||||
| @ -294,17 +309,22 @@ pub fn create_zpool( | ||||
|                     bail!("datastore '{}' already exists.", datastore.name); | ||||
|                 } | ||||
|  | ||||
|                 crate::api2::config::datastore::do_create_datastore(lock, config, datastore, Some(&worker))?; | ||||
|                 crate::api2::config::datastore::do_create_datastore( | ||||
|                     lock, | ||||
|                     config, | ||||
|                     datastore, | ||||
|                     Some(&worker), | ||||
|                 )?; | ||||
|             } | ||||
|  | ||||
|             Ok(()) | ||||
|         })?; | ||||
|         }, | ||||
|     )?; | ||||
|  | ||||
|     Ok(upid_str) | ||||
| } | ||||
|  | ||||
| pub const POOL_ROUTER: Router = Router::new() | ||||
|     .get(&API_METHOD_ZPOOL_DETAILS); | ||||
| pub const POOL_ROUTER: Router = Router::new().get(&API_METHOD_ZPOOL_DETAILS); | ||||
|  | ||||
| pub const ROUTER: Router = Router::new() | ||||
|     .get(&API_METHOD_LIST_ZPOOLS) | ||||
|  | ||||
| @ -1,21 +1,21 @@ | ||||
| use std::sync::{Arc, Mutex}; | ||||
|  | ||||
| use anyhow::{Error}; | ||||
| use ::serde::{Deserialize, Serialize}; | ||||
| use anyhow::Error; | ||||
| use lazy_static::lazy_static; | ||||
| use openssl::sha; | ||||
| use regex::Regex; | ||||
| use serde_json::{json, Value}; | ||||
| use ::serde::{Deserialize, Serialize}; | ||||
|  | ||||
| use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission}; | ||||
| use pbs_api_types::{IPRE, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE}; | ||||
| use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment}; | ||||
| use proxmox_schema::api; | ||||
| use proxmox_sys::fs::{file_get_contents, replace_file, CreateOptions}; | ||||
| use pbs_api_types::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32}; | ||||
|  | ||||
| use pbs_api_types::{ | ||||
|     PROXMOX_CONFIG_DIGEST_SCHEMA, FIRST_DNS_SERVER_SCHEMA, SECOND_DNS_SERVER_SCHEMA, | ||||
|     THIRD_DNS_SERVER_SCHEMA, NODE_SCHEMA, SEARCH_DOMAIN_SCHEMA, | ||||
|     PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, | ||||
|     FIRST_DNS_SERVER_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, | ||||
|     PROXMOX_CONFIG_DIGEST_SCHEMA, SEARCH_DOMAIN_SCHEMA, SECOND_DNS_SERVER_SCHEMA, | ||||
|     THIRD_DNS_SERVER_SCHEMA, | ||||
| }; | ||||
|  | ||||
| static RESOLV_CONF_FN: &str = "/etc/resolv.conf"; | ||||
| @ -34,7 +34,6 @@ pub enum DeletableProperty { | ||||
| } | ||||
|  | ||||
| pub fn read_etc_resolv_conf() -> Result<Value, Error> { | ||||
|  | ||||
|     let mut result = json!({}); | ||||
|  | ||||
|     let mut nscount = 0; | ||||
| @ -47,24 +46,27 @@ pub fn read_etc_resolv_conf() -> Result<Value, Error> { | ||||
|  | ||||
|     lazy_static! { | ||||
|         static ref DOMAIN_REGEX: Regex = Regex::new(r"^\s*(?:search|domain)\s+(\S+)\s*").unwrap(); | ||||
|         static ref SERVER_REGEX: Regex = Regex::new( | ||||
|             concat!(r"^\s*nameserver\s+(", IPRE!(),  r")\s*")).unwrap(); | ||||
|         static ref SERVER_REGEX: Regex = | ||||
|             Regex::new(concat!(r"^\s*nameserver\s+(", IPRE!(), r")\s*")).unwrap(); | ||||
|     } | ||||
|  | ||||
|     let mut options = String::new(); | ||||
|  | ||||
|     for line in data.lines() { | ||||
|  | ||||
|         if let Some(caps) = DOMAIN_REGEX.captures(line) { | ||||
|             result["search"] = Value::from(&caps[1]); | ||||
|         } else if let Some(caps) = SERVER_REGEX.captures(line) { | ||||
|             nscount += 1; | ||||
|             if nscount > 3 { continue }; | ||||
|             if nscount > 3 { | ||||
|                 continue; | ||||
|             }; | ||||
|             let nameserver = &caps[1]; | ||||
|             let id = format!("dns{}", nscount); | ||||
|             result[id] = Value::from(nameserver); | ||||
|         } else { | ||||
|             if !options.is_empty() { options.push('\n'); } | ||||
|             if !options.is_empty() { | ||||
|                 options.push('\n'); | ||||
|             } | ||||
|             options.push_str(line); | ||||
|         } | ||||
|     } | ||||
| @ -127,7 +129,6 @@ pub fn update_dns( | ||||
|     delete: Option<Vec<DeletableProperty>>, | ||||
|     digest: Option<String>, | ||||
| ) -> Result<Value, Error> { | ||||
|  | ||||
|     lazy_static! { | ||||
|         static ref MUTEX: Arc<Mutex<()>> = Arc::new(Mutex::new(())); | ||||
|     } | ||||
| @ -145,17 +146,31 @@ pub fn update_dns( | ||||
|         for delete_prop in delete { | ||||
|             let config = config.as_object_mut().unwrap(); | ||||
|             match delete_prop { | ||||
|                 DeletableProperty::dns1 => { config.remove("dns1"); }, | ||||
|                 DeletableProperty::dns2 => { config.remove("dns2"); }, | ||||
|                 DeletableProperty::dns3 => { config.remove("dns3"); }, | ||||
|                 DeletableProperty::dns1 => { | ||||
|                     config.remove("dns1"); | ||||
|                 } | ||||
|                 DeletableProperty::dns2 => { | ||||
|                     config.remove("dns2"); | ||||
|                 } | ||||
|                 DeletableProperty::dns3 => { | ||||
|                     config.remove("dns3"); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     if let Some(search) = search { config["search"] = search.into(); } | ||||
|     if let Some(dns1) = dns1 { config["dns1"] = dns1.into(); } | ||||
|     if let Some(dns2) = dns2 { config["dns2"] = dns2.into(); } | ||||
|     if let Some(dns3) = dns3 { config["dns3"] = dns3.into(); } | ||||
|     if let Some(search) = search { | ||||
|         config["search"] = search.into(); | ||||
|     } | ||||
|     if let Some(dns1) = dns1 { | ||||
|         config["dns1"] = dns1.into(); | ||||
|     } | ||||
|     if let Some(dns2) = dns2 { | ||||
|         config["dns2"] = dns2.into(); | ||||
|     } | ||||
|     if let Some(dns3) = dns3 { | ||||
|         config["dns3"] = dns3.into(); | ||||
|     } | ||||
|  | ||||
|     let mut data = String::new(); | ||||
|  | ||||
| @ -219,7 +234,6 @@ pub fn get_dns( | ||||
|     _info: &ApiMethod, | ||||
|     _rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<Value, Error> { | ||||
|  | ||||
|     read_etc_resolv_conf() | ||||
| } | ||||
|  | ||||
|  | ||||
| @ -1,10 +1,10 @@ | ||||
| use std::process::{Command, Stdio}; | ||||
|  | ||||
| use anyhow::{Error}; | ||||
| use anyhow::Error; | ||||
| use serde_json::{json, Value}; | ||||
| use std::io::{BufRead,BufReader}; | ||||
| use std::io::{BufRead, BufReader}; | ||||
|  | ||||
| use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission}; | ||||
| use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment}; | ||||
| use proxmox_schema::api; | ||||
|  | ||||
| use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_AUDIT}; | ||||
| @ -69,7 +69,6 @@ fn get_journal( | ||||
|     _info: &ApiMethod, | ||||
|     _rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<Value, Error> { | ||||
|  | ||||
|     let mut args = vec![]; | ||||
|  | ||||
|     if let Some(lastentries) = lastentries { | ||||
| @ -127,5 +126,4 @@ fn get_journal( | ||||
|     Ok(json!(lines)) | ||||
| } | ||||
|  | ||||
| pub const ROUTER: Router = Router::new() | ||||
|     .get(&API_METHOD_GET_JOURNAL); | ||||
| pub const ROUTER: Router = Router::new().get(&API_METHOD_GET_JOURNAL); | ||||
|  | ||||
| @ -12,23 +12,23 @@ use hyper::Request; | ||||
| use serde_json::{json, Value}; | ||||
| use tokio::io::{AsyncBufReadExt, BufReader}; | ||||
|  | ||||
| use proxmox_sys::sortable; | ||||
| use proxmox_sys::fd::fd_change_cloexec; | ||||
| use proxmox_sys::sortable; | ||||
|  | ||||
| use proxmox_http::websocket::WebSocket; | ||||
| use proxmox_router::list_subdirs_api_method; | ||||
| use proxmox_router::{ | ||||
|     ApiHandler, ApiMethod, ApiResponseFuture, Permission, RpcEnvironment, Router, SubdirMap, | ||||
|     ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment, SubdirMap, | ||||
| }; | ||||
| use proxmox_schema::*; | ||||
| use proxmox_router::list_subdirs_api_method; | ||||
| use proxmox_http::websocket::WebSocket; | ||||
|  | ||||
| use proxmox_rest_server::WorkerTask; | ||||
|  | ||||
| use pbs_api_types::{Authid, NODE_SCHEMA, PRIV_SYS_CONSOLE}; | ||||
| use pbs_tools::ticket::{self, Empty, Ticket}; | ||||
|  | ||||
| use crate::tools; | ||||
| use crate::auth_helpers::private_auth_key; | ||||
| use crate::tools; | ||||
|  | ||||
| pub mod apt; | ||||
| pub mod certificates; | ||||
| @ -303,7 +303,7 @@ fn upgrade_to_websocket( | ||||
|                 .map_err(Error::from) | ||||
|                 .await | ||||
|             { | ||||
|                Ok(upgraded) => upgraded, | ||||
|                 Ok(upgraded) => upgraded, | ||||
|                 _ => bail!("error"), | ||||
|             }; | ||||
|  | ||||
|  | ||||
| @ -1,16 +1,16 @@ | ||||
| use anyhow::{Error, bail}; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| use serde_json::{Value, to_value}; | ||||
| use anyhow::{bail, Error}; | ||||
| use hex::FromHex; | ||||
| use serde::{Deserialize, Serialize}; | ||||
| use serde_json::{to_value, Value}; | ||||
|  | ||||
| use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission}; | ||||
| use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment}; | ||||
| use proxmox_schema::api; | ||||
|  | ||||
| use pbs_api_types::{ | ||||
|     Authid, Interface, NetworkInterfaceType, LinuxBondMode, NetworkConfigMethod, BondXmitHashPolicy, | ||||
|     Authid, BondXmitHashPolicy, Interface, LinuxBondMode, NetworkConfigMethod, | ||||
|     NetworkInterfaceType, CIDR_V4_SCHEMA, CIDR_V6_SCHEMA, IP_V4_SCHEMA, IP_V6_SCHEMA, | ||||
|     NETWORK_INTERFACE_ARRAY_SCHEMA, NETWORK_INTERFACE_LIST_SCHEMA, NETWORK_INTERFACE_NAME_SCHEMA, | ||||
|     CIDR_V4_SCHEMA, CIDR_V6_SCHEMA, IP_V4_SCHEMA, IP_V6_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA, | ||||
|     NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, | ||||
|     NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, | ||||
| }; | ||||
| use pbs_config::network::{self, NetworkConfig}; | ||||
|  | ||||
| @ -18,41 +18,57 @@ use proxmox_rest_server::WorkerTask; | ||||
|  | ||||
| fn split_interface_list(list: &str) -> Result<Vec<String>, Error> { | ||||
|     let value = NETWORK_INTERFACE_ARRAY_SCHEMA.parse_property_string(list)?; | ||||
|     Ok(value.as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_string()).collect()) | ||||
|     Ok(value | ||||
|         .as_array() | ||||
|         .unwrap() | ||||
|         .iter() | ||||
|         .map(|v| v.as_str().unwrap().to_string()) | ||||
|         .collect()) | ||||
| } | ||||
|  | ||||
| fn check_duplicate_gateway_v4(config: &NetworkConfig, iface: &str) -> Result<(), Error> { | ||||
|  | ||||
|     let current_gateway_v4 = config.interfaces.iter() | ||||
|     let current_gateway_v4 = config | ||||
|         .interfaces | ||||
|         .iter() | ||||
|         .find(|(_, interface)| interface.gateway.is_some()) | ||||
|         .map(|(name, _)| name.to_string()); | ||||
|  | ||||
|     if let Some(current_gateway_v4) = current_gateway_v4 { | ||||
|         if current_gateway_v4 != iface { | ||||
|             bail!("Default IPv4 gateway already exists on interface '{}'", current_gateway_v4); | ||||
|             bail!( | ||||
|                 "Default IPv4 gateway already exists on interface '{}'", | ||||
|                 current_gateway_v4 | ||||
|             ); | ||||
|         } | ||||
|     } | ||||
|     Ok(()) | ||||
| } | ||||
|  | ||||
| fn check_duplicate_gateway_v6(config: &NetworkConfig, iface: &str) -> Result<(), Error> { | ||||
|  | ||||
|     let current_gateway_v6 = config.interfaces.iter() | ||||
|     let current_gateway_v6 = config | ||||
|         .interfaces | ||||
|         .iter() | ||||
|         .find(|(_, interface)| interface.gateway6.is_some()) | ||||
|         .map(|(name, _)| name.to_string()); | ||||
|  | ||||
|     if let Some(current_gateway_v6) = current_gateway_v6 { | ||||
|         if current_gateway_v6 != iface { | ||||
|             bail!("Default IPv6 gateway already exists on interface '{}'", current_gateway_v6); | ||||
|             bail!( | ||||
|                 "Default IPv6 gateway already exists on interface '{}'", | ||||
|                 current_gateway_v6 | ||||
|             ); | ||||
|         } | ||||
|     } | ||||
|     Ok(()) | ||||
| } | ||||
|  | ||||
|  | ||||
| fn set_bridge_ports(iface: &mut Interface, ports: Vec<String>) -> Result<(), Error> { | ||||
|     if iface.interface_type != NetworkInterfaceType::Bridge { | ||||
|         bail!("interface '{}' is no bridge (type is {:?})", iface.name, iface.interface_type); | ||||
|         bail!( | ||||
|             "interface '{}' is no bridge (type is {:?})", | ||||
|             iface.name, | ||||
|             iface.interface_type | ||||
|         ); | ||||
|     } | ||||
|     iface.bridge_ports = Some(ports); | ||||
|     Ok(()) | ||||
| @ -60,7 +76,11 @@ fn set_bridge_ports(iface: &mut Interface, ports: Vec<String>) -> Result<(), Err | ||||
|  | ||||
| fn set_bond_slaves(iface: &mut Interface, slaves: Vec<String>) -> Result<(), Error> { | ||||
|     if iface.interface_type != NetworkInterfaceType::Bond { | ||||
|         bail!("interface '{}' is no bond (type is {:?})", iface.name, iface.interface_type); | ||||
|         bail!( | ||||
|             "interface '{}' is no bond (type is {:?})", | ||||
|             iface.name, | ||||
|             iface.interface_type | ||||
|         ); | ||||
|     } | ||||
|     iface.slaves = Some(slaves); | ||||
|     Ok(()) | ||||
| @ -91,14 +111,15 @@ pub fn list_network_devices( | ||||
|     _info: &ApiMethod, | ||||
|     mut rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<Value, Error> { | ||||
|  | ||||
|     let (config, digest) = network::config()?; | ||||
|     let digest = hex::encode(&digest); | ||||
|  | ||||
|     let mut list = Vec::new(); | ||||
|  | ||||
|     for (iface, interface) in config.interfaces.iter() { | ||||
|         if iface == "lo" { continue; } // do not list lo | ||||
|         if iface == "lo" { | ||||
|             continue; | ||||
|         } // do not list lo | ||||
|         let mut item: Value = to_value(interface)?; | ||||
|         item["digest"] = digest.clone().into(); | ||||
|         item["iface"] = iface.to_string().into(); | ||||
| @ -131,7 +152,6 @@ pub fn list_network_devices( | ||||
| )] | ||||
| /// Read a network interface configuration. | ||||
| pub fn read_interface(iface: String) -> Result<Value, Error> { | ||||
|  | ||||
|     let (config, digest) = network::config()?; | ||||
|  | ||||
|     let interface = config.lookup(&iface)?; | ||||
| @ -142,7 +162,6 @@ pub fn read_interface(iface: String) -> Result<Value, Error> { | ||||
|     Ok(data) | ||||
| } | ||||
|  | ||||
|  | ||||
| #[api( | ||||
|     protected: true, | ||||
|     input: { | ||||
| @ -256,7 +275,6 @@ pub fn create_interface( | ||||
|     slaves: Option<String>, | ||||
|     param: Value, | ||||
| ) -> Result<(), Error> { | ||||
|  | ||||
|     let interface_type = pbs_tools::json::required_string_param(¶m, "type")?; | ||||
|     let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.into())?; | ||||
|  | ||||
| @ -271,35 +289,55 @@ pub fn create_interface( | ||||
|     let mut interface = Interface::new(iface.clone()); | ||||
|     interface.interface_type = interface_type; | ||||
|  | ||||
|     if let Some(autostart) = autostart { interface.autostart = autostart; } | ||||
|     if method.is_some() { interface.method = method; } | ||||
|     if method6.is_some() { interface.method6 = method6; } | ||||
|     if mtu.is_some() { interface.mtu = mtu; } | ||||
|     if comments.is_some() { interface.comments = comments; } | ||||
|     if comments6.is_some() { interface.comments6 = comments6; } | ||||
|     if let Some(autostart) = autostart { | ||||
|         interface.autostart = autostart; | ||||
|     } | ||||
|     if method.is_some() { | ||||
|         interface.method = method; | ||||
|     } | ||||
|     if method6.is_some() { | ||||
|         interface.method6 = method6; | ||||
|     } | ||||
|     if mtu.is_some() { | ||||
|         interface.mtu = mtu; | ||||
|     } | ||||
|     if comments.is_some() { | ||||
|         interface.comments = comments; | ||||
|     } | ||||
|     if comments6.is_some() { | ||||
|         interface.comments6 = comments6; | ||||
|     } | ||||
|  | ||||
|     if let Some(cidr) = cidr { | ||||
|         let (_, _, is_v6) = network::parse_cidr(&cidr)?; | ||||
|         if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); } | ||||
|         if is_v6 { | ||||
|             bail!("invalid address type (expected IPv4, got IPv6)"); | ||||
|         } | ||||
|         interface.cidr = Some(cidr); | ||||
|     } | ||||
|  | ||||
|     if let Some(cidr6) = cidr6 { | ||||
|         let (_, _, is_v6) = network::parse_cidr(&cidr6)?; | ||||
|         if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); } | ||||
|         if !is_v6 { | ||||
|             bail!("invalid address type (expected IPv6, got IPv4)"); | ||||
|         } | ||||
|         interface.cidr6 = Some(cidr6); | ||||
|     } | ||||
|  | ||||
|     if let Some(gateway) = gateway { | ||||
|         let is_v6 = gateway.contains(':'); | ||||
|         if is_v6 {  bail!("invalid address type (expected IPv4, got IPv6)"); } | ||||
|         if is_v6 { | ||||
|             bail!("invalid address type (expected IPv4, got IPv6)"); | ||||
|         } | ||||
|         check_duplicate_gateway_v4(&config, &iface)?; | ||||
|         interface.gateway = Some(gateway); | ||||
|     } | ||||
|  | ||||
|     if let Some(gateway6) = gateway6 { | ||||
|         let is_v6 = gateway6.contains(':'); | ||||
|         if !is_v6 {  bail!("invalid address type (expected IPv6, got IPv4)"); } | ||||
|         if !is_v6 { | ||||
|             bail!("invalid address type (expected IPv6, got IPv4)"); | ||||
|         } | ||||
|         check_duplicate_gateway_v6(&config, &iface)?; | ||||
|         interface.gateway6 = Some(gateway6); | ||||
|     } | ||||
| @ -310,7 +348,9 @@ pub fn create_interface( | ||||
|                 let ports = split_interface_list(&ports)?; | ||||
|                 set_bridge_ports(&mut interface, ports)?; | ||||
|             } | ||||
|             if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; } | ||||
|             if bridge_vlan_aware.is_some() { | ||||
|                 interface.bridge_vlan_aware = bridge_vlan_aware; | ||||
|             } | ||||
|         } | ||||
|         NetworkInterfaceType::Bond => { | ||||
|             if let Some(mode) = bond_mode { | ||||
| @ -322,9 +362,7 @@ pub fn create_interface( | ||||
|                     interface.bond_primary = bond_primary; | ||||
|                 } | ||||
|                 if bond_xmit_hash_policy.is_some() { | ||||
|                     if mode != LinuxBondMode::ieee802_3ad && | ||||
|                        mode != LinuxBondMode::balance_xor | ||||
|                     { | ||||
|                     if mode != LinuxBondMode::ieee802_3ad && mode != LinuxBondMode::balance_xor { | ||||
|                         bail!("bond_xmit_hash_policy is only valid with LACP(802.3ad) or balance-xor mode"); | ||||
|                     } | ||||
|                     interface.bond_xmit_hash_policy = bond_xmit_hash_policy; | ||||
| @ -335,7 +373,10 @@ pub fn create_interface( | ||||
|                 set_bond_slaves(&mut interface, slaves)?; | ||||
|             } | ||||
|         } | ||||
|         _ => bail!("creating network interface type '{:?}' is not supported", interface_type), | ||||
|         _ => bail!( | ||||
|             "creating network interface type '{:?}' is not supported", | ||||
|             interface_type | ||||
|         ), | ||||
|     } | ||||
|  | ||||
|     if interface.cidr.is_some() || interface.gateway.is_some() { | ||||
| @ -395,7 +436,6 @@ pub enum DeletableProperty { | ||||
|     bond_xmit_hash_policy, | ||||
| } | ||||
|  | ||||
|  | ||||
| #[api( | ||||
|     protected: true, | ||||
|     input: { | ||||
| @ -523,7 +563,6 @@ pub fn update_interface( | ||||
|     digest: Option<String>, | ||||
|     param: Value, | ||||
| ) -> Result<(), Error> { | ||||
|  | ||||
|     let _lock = network::lock_config()?; | ||||
|  | ||||
|     let (mut config, expected_digest) = network::config()?; | ||||
| @ -533,49 +572,95 @@ pub fn update_interface( | ||||
|         crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; | ||||
|     } | ||||
|  | ||||
|     if gateway.is_some() { check_duplicate_gateway_v4(&config, &iface)?; } | ||||
|     if gateway6.is_some() { check_duplicate_gateway_v6(&config, &iface)?; } | ||||
|     if gateway.is_some() { | ||||
|         check_duplicate_gateway_v4(&config, &iface)?; | ||||
|     } | ||||
|     if gateway6.is_some() { | ||||
|         check_duplicate_gateway_v6(&config, &iface)?; | ||||
|     } | ||||
|  | ||||
|     let interface = config.lookup_mut(&iface)?; | ||||
|  | ||||
|     if let Some(interface_type) = param.get("type") { | ||||
|         let interface_type = NetworkInterfaceType::deserialize(interface_type)?; | ||||
|         if  interface_type != interface.interface_type { | ||||
|             bail!("got unexpected interface type ({:?} != {:?})", interface_type, interface.interface_type); | ||||
|         if interface_type != interface.interface_type { | ||||
|             bail!( | ||||
|                 "got unexpected interface type ({:?} != {:?})", | ||||
|                 interface_type, | ||||
|                 interface.interface_type | ||||
|             ); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     if let Some(delete) = delete { | ||||
|         for delete_prop in delete { | ||||
|             match delete_prop { | ||||
|                 DeletableProperty::cidr => { interface.cidr = None; }, | ||||
|                 DeletableProperty::cidr6 => { interface.cidr6 = None; }, | ||||
|                 DeletableProperty::gateway => { interface.gateway = None; }, | ||||
|                 DeletableProperty::gateway6 => { interface.gateway6 = None; }, | ||||
|                 DeletableProperty::method => { interface.method = None; }, | ||||
|                 DeletableProperty::method6 => { interface.method6 = None; }, | ||||
|                 DeletableProperty::comments => { interface.comments = None; }, | ||||
|                 DeletableProperty::comments6 => { interface.comments6 = None; }, | ||||
|                 DeletableProperty::mtu => { interface.mtu = None; }, | ||||
|                 DeletableProperty::autostart => { interface.autostart = false; }, | ||||
|                 DeletableProperty::bridge_ports => { set_bridge_ports(interface, Vec::new())?; } | ||||
|                 DeletableProperty::bridge_vlan_aware => { interface.bridge_vlan_aware = None; } | ||||
|                 DeletableProperty::slaves => { set_bond_slaves(interface, Vec::new())?; } | ||||
|                 DeletableProperty::bond_primary => { interface.bond_primary = None; } | ||||
|                 DeletableProperty::bond_xmit_hash_policy => { interface.bond_xmit_hash_policy = None } | ||||
|                 DeletableProperty::cidr => { | ||||
|                     interface.cidr = None; | ||||
|                 } | ||||
|                 DeletableProperty::cidr6 => { | ||||
|                     interface.cidr6 = None; | ||||
|                 } | ||||
|                 DeletableProperty::gateway => { | ||||
|                     interface.gateway = None; | ||||
|                 } | ||||
|                 DeletableProperty::gateway6 => { | ||||
|                     interface.gateway6 = None; | ||||
|                 } | ||||
|                 DeletableProperty::method => { | ||||
|                     interface.method = None; | ||||
|                 } | ||||
|                 DeletableProperty::method6 => { | ||||
|                     interface.method6 = None; | ||||
|                 } | ||||
|                 DeletableProperty::comments => { | ||||
|                     interface.comments = None; | ||||
|                 } | ||||
|                 DeletableProperty::comments6 => { | ||||
|                     interface.comments6 = None; | ||||
|                 } | ||||
|                 DeletableProperty::mtu => { | ||||
|                     interface.mtu = None; | ||||
|                 } | ||||
|                 DeletableProperty::autostart => { | ||||
|                     interface.autostart = false; | ||||
|                 } | ||||
|                 DeletableProperty::bridge_ports => { | ||||
|                     set_bridge_ports(interface, Vec::new())?; | ||||
|                 } | ||||
|                 DeletableProperty::bridge_vlan_aware => { | ||||
|                     interface.bridge_vlan_aware = None; | ||||
|                 } | ||||
|                 DeletableProperty::slaves => { | ||||
|                     set_bond_slaves(interface, Vec::new())?; | ||||
|                 } | ||||
|                 DeletableProperty::bond_primary => { | ||||
|                     interface.bond_primary = None; | ||||
|                 } | ||||
|                 DeletableProperty::bond_xmit_hash_policy => interface.bond_xmit_hash_policy = None, | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     if let Some(autostart) = autostart { interface.autostart = autostart; } | ||||
|     if method.is_some() { interface.method = method; } | ||||
|     if method6.is_some() { interface.method6 = method6; } | ||||
|     if mtu.is_some() { interface.mtu = mtu; } | ||||
|     if let Some(autostart) = autostart { | ||||
|         interface.autostart = autostart; | ||||
|     } | ||||
|     if method.is_some() { | ||||
|         interface.method = method; | ||||
|     } | ||||
|     if method6.is_some() { | ||||
|         interface.method6 = method6; | ||||
|     } | ||||
|     if mtu.is_some() { | ||||
|         interface.mtu = mtu; | ||||
|     } | ||||
|     if let Some(ports) = bridge_ports { | ||||
|         let ports = split_interface_list(&ports)?; | ||||
|         set_bridge_ports(interface, ports)?; | ||||
|     } | ||||
|     if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; } | ||||
|     if bridge_vlan_aware.is_some() { | ||||
|         interface.bridge_vlan_aware = bridge_vlan_aware; | ||||
|     } | ||||
|     if let Some(slaves) = slaves { | ||||
|         let slaves = split_interface_list(&slaves)?; | ||||
|         set_bond_slaves(interface, slaves)?; | ||||
| @ -589,9 +674,7 @@ pub fn update_interface( | ||||
|             interface.bond_primary = bond_primary; | ||||
|         } | ||||
|         if bond_xmit_hash_policy.is_some() { | ||||
|             if mode != LinuxBondMode::ieee802_3ad && | ||||
|                mode != LinuxBondMode::balance_xor | ||||
|             { | ||||
|             if mode != LinuxBondMode::ieee802_3ad && mode != LinuxBondMode::balance_xor { | ||||
|                 bail!("bond_xmit_hash_policy is only valid with LACP(802.3ad) or balance-xor mode"); | ||||
|             } | ||||
|             interface.bond_xmit_hash_policy = bond_xmit_hash_policy; | ||||
| @ -600,30 +683,42 @@ pub fn update_interface( | ||||
|  | ||||
|     if let Some(cidr) = cidr { | ||||
|         let (_, _, is_v6) = network::parse_cidr(&cidr)?; | ||||
|         if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); } | ||||
|         if is_v6 { | ||||
|             bail!("invalid address type (expected IPv4, got IPv6)"); | ||||
|         } | ||||
|         interface.cidr = Some(cidr); | ||||
|     } | ||||
|  | ||||
|     if let Some(cidr6) = cidr6 { | ||||
|         let (_, _, is_v6) = network::parse_cidr(&cidr6)?; | ||||
|         if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); } | ||||
|         if !is_v6 { | ||||
|             bail!("invalid address type (expected IPv6, got IPv4)"); | ||||
|         } | ||||
|         interface.cidr6 = Some(cidr6); | ||||
|     } | ||||
|  | ||||
|     if let Some(gateway) = gateway { | ||||
|         let is_v6 = gateway.contains(':'); | ||||
|         if is_v6 {  bail!("invalid address type (expected IPv4, got IPv6)"); } | ||||
|         if is_v6 { | ||||
|             bail!("invalid address type (expected IPv4, got IPv6)"); | ||||
|         } | ||||
|         interface.gateway = Some(gateway); | ||||
|     } | ||||
|  | ||||
|     if let Some(gateway6) = gateway6 { | ||||
|         let is_v6 = gateway6.contains(':'); | ||||
|         if !is_v6 {  bail!("invalid address type (expected IPv6, got IPv4)"); } | ||||
|         if !is_v6 { | ||||
|             bail!("invalid address type (expected IPv6, got IPv4)"); | ||||
|         } | ||||
|         interface.gateway6 = Some(gateway6); | ||||
|     } | ||||
|  | ||||
|     if comments.is_some() { interface.comments = comments; } | ||||
|     if comments6.is_some() { interface.comments6 = comments6; } | ||||
|     if comments.is_some() { | ||||
|         interface.comments = comments; | ||||
|     } | ||||
|     if comments6.is_some() { | ||||
|         interface.comments6 = comments6; | ||||
|     } | ||||
|  | ||||
|     if interface.cidr.is_some() || interface.gateway.is_some() { | ||||
|         interface.method = Some(NetworkConfigMethod::Static); | ||||
| @ -696,21 +791,26 @@ pub fn delete_interface(iface: String, digest: Option<String>) -> Result<(), Err | ||||
|     }, | ||||
| )] | ||||
| /// Reload network configuration (requires ifupdown2). | ||||
| pub async fn reload_network_config( | ||||
|     rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<String, Error> { | ||||
|  | ||||
| pub async fn reload_network_config(rpcenv: &mut dyn RpcEnvironment) -> Result<String, Error> { | ||||
|     network::assert_ifupdown2_installed()?; | ||||
|  | ||||
|     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | ||||
|  | ||||
|     let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), auth_id.to_string(), true, |_worker| async { | ||||
|     let upid_str = WorkerTask::spawn( | ||||
|         "srvreload", | ||||
|         Some(String::from("networking")), | ||||
|         auth_id.to_string(), | ||||
|         true, | ||||
|         |_worker| async { | ||||
|             let _ = std::fs::rename( | ||||
|                 network::NETWORK_INTERFACES_NEW_FILENAME, | ||||
|                 network::NETWORK_INTERFACES_FILENAME, | ||||
|             ); | ||||
|  | ||||
|         let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME); | ||||
|  | ||||
|         network::network_reload()?; | ||||
|         Ok(()) | ||||
|     })?; | ||||
|             network::network_reload()?; | ||||
|             Ok(()) | ||||
|         }, | ||||
|     )?; | ||||
|  | ||||
|     Ok(upid_str) | ||||
| } | ||||
| @ -730,7 +830,6 @@ pub async fn reload_network_config( | ||||
| )] | ||||
| /// Revert network configuration (rm /etc/network/interfaces.new). | ||||
| pub fn revert_network_config() -> Result<(), Error> { | ||||
|  | ||||
|     let _ = std::fs::remove_file(network::NETWORK_INTERFACES_NEW_FILENAME); | ||||
|  | ||||
|     Ok(()) | ||||
|  | ||||
| @ -33,5 +33,4 @@ fn get_report( | ||||
|     Ok(json!(generate_report())) | ||||
| } | ||||
|  | ||||
| pub const ROUTER: Router = Router::new() | ||||
|     .get(&API_METHOD_GET_REPORT); | ||||
| pub const ROUTER: Router = Router::new().get(&API_METHOD_GET_REPORT); | ||||
|  | ||||
| @ -1,13 +1,11 @@ | ||||
| use anyhow::{bail, Error}; | ||||
| use serde_json::{Value, json}; | ||||
| use serde_json::{json, Value}; | ||||
| use std::collections::BTreeMap; | ||||
|  | ||||
| use proxmox_router::{Permission, Router}; | ||||
| use proxmox_schema::api; | ||||
|  | ||||
| use pbs_api_types::{ | ||||
|     NODE_SCHEMA, RRDMode, RRDTimeFrame, PRIV_SYS_AUDIT, | ||||
| }; | ||||
| use pbs_api_types::{RRDMode, RRDTimeFrame, NODE_SCHEMA, PRIV_SYS_AUDIT}; | ||||
|  | ||||
| use crate::rrd_cache::extract_rrd_data; | ||||
|  | ||||
| @ -17,7 +15,6 @@ pub fn create_value_from_rrd( | ||||
|     timeframe: RRDTimeFrame, | ||||
|     mode: RRDMode, | ||||
| ) -> Result<Value, Error> { | ||||
|  | ||||
|     let mut result: Vec<Value> = Vec::new(); | ||||
|  | ||||
|     let mut timemap = BTreeMap::new(); | ||||
| @ -30,9 +27,13 @@ pub fn create_value_from_rrd( | ||||
|             None => continue, | ||||
|         }; | ||||
|  | ||||
|         if let Some(expected_resolution) = last_resolution  { | ||||
|         if let Some(expected_resolution) = last_resolution { | ||||
|             if reso != expected_resolution { | ||||
|                 bail!("got unexpected RRD resolution ({} != {})", reso, expected_resolution); | ||||
|                 bail!( | ||||
|                     "got unexpected RRD resolution ({} != {})", | ||||
|                     reso, | ||||
|                     expected_resolution | ||||
|                 ); | ||||
|             } | ||||
|         } else { | ||||
|             last_resolution = Some(reso); | ||||
| @ -75,29 +76,30 @@ pub fn create_value_from_rrd( | ||||
|     }, | ||||
| )] | ||||
| /// Read node stats | ||||
| fn get_node_stats( | ||||
|     timeframe: RRDTimeFrame, | ||||
|     cf: RRDMode, | ||||
|     _param: Value, | ||||
| ) -> Result<Value, Error> { | ||||
|  | ||||
| fn get_node_stats(timeframe: RRDTimeFrame, cf: RRDMode, _param: Value) -> Result<Value, Error> { | ||||
|     create_value_from_rrd( | ||||
|         "host", | ||||
|         &[ | ||||
|             "cpu", "iowait", | ||||
|             "memtotal", "memused", | ||||
|             "swaptotal", "swapused", | ||||
|             "netin", "netout", | ||||
|             "cpu", | ||||
|             "iowait", | ||||
|             "memtotal", | ||||
|             "memused", | ||||
|             "swaptotal", | ||||
|             "swapused", | ||||
|             "netin", | ||||
|             "netout", | ||||
|             "loadavg", | ||||
|             "total", "used", | ||||
|             "read_ios", "read_bytes", | ||||
|             "write_ios", "write_bytes", | ||||
|             "total", | ||||
|             "used", | ||||
|             "read_ios", | ||||
|             "read_bytes", | ||||
|             "write_ios", | ||||
|             "write_bytes", | ||||
|             "io_ticks", | ||||
|          ], | ||||
|         ], | ||||
|         timeframe, | ||||
|         cf, | ||||
|     ) | ||||
| } | ||||
|  | ||||
| pub const ROUTER: Router = Router::new() | ||||
|     .get(&API_METHOD_GET_NODE_STATS); | ||||
| pub const ROUTER: Router = Router::new().get(&API_METHOD_GET_NODE_STATS); | ||||
|  | ||||
| @ -3,11 +3,11 @@ use std::process::{Command, Stdio}; | ||||
| use anyhow::{bail, Error}; | ||||
| use serde_json::{json, Value}; | ||||
|  | ||||
| use proxmox_sys::sortable; | ||||
| use proxmox_router::{list_subdirs_api_method, Router, Permission, RpcEnvironment, SubdirMap}; | ||||
| use proxmox_router::{list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap}; | ||||
| use proxmox_schema::api; | ||||
| use proxmox_sys::sortable; | ||||
|  | ||||
| use pbs_api_types::{Authid, NODE_SCHEMA, SERVICE_ID_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY}; | ||||
| use pbs_api_types::{Authid, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, SERVICE_ID_SCHEMA}; | ||||
|  | ||||
| use proxmox_rest_server::WorkerTask; | ||||
|  | ||||
| @ -22,7 +22,6 @@ static SERVICE_NAME_LIST: [&str; 7] = [ | ||||
| ]; | ||||
|  | ||||
| pub fn real_service_name(service: &str) -> &str { | ||||
|  | ||||
|     // since postfix package 3.1.0-3.1 the postfix unit is only here | ||||
|     // to manage subinstances, of which the default is called "-". | ||||
|     // This is where we look for the daemon status | ||||
| @ -35,7 +34,6 @@ pub fn real_service_name(service: &str) -> &str { | ||||
| } | ||||
|  | ||||
| fn get_full_service_state(service: &str) -> Result<Value, Error> { | ||||
|  | ||||
|     let real_service_name = real_service_name(service); | ||||
|  | ||||
|     let mut child = Command::new("systemctl") | ||||
| @ -43,7 +41,7 @@ fn get_full_service_state(service: &str) -> Result<Value, Error> { | ||||
|         .stdout(Stdio::piped()) | ||||
|         .spawn()?; | ||||
|  | ||||
|     use std::io::{BufRead,BufReader}; | ||||
|     use std::io::{BufRead, BufReader}; | ||||
|  | ||||
|     let mut result = json!({}); | ||||
|  | ||||
| @ -76,7 +74,6 @@ fn get_full_service_state(service: &str) -> Result<Value, Error> { | ||||
| } | ||||
|  | ||||
| fn json_service_state(service: &str, status: Value) -> Value { | ||||
|  | ||||
|     if let Some(desc) = status["Description"].as_str() { | ||||
|         let name = status["Name"].as_str().unwrap_or(service); | ||||
|         let state = status["SubState"].as_str().unwrap_or("unknown"); | ||||
| @ -128,10 +125,7 @@ fn json_service_state(service: &str, status: Value) -> Value { | ||||
|     }, | ||||
| )] | ||||
| /// Service list. | ||||
| fn list_services( | ||||
|     _param: Value, | ||||
| ) -> Result<Value, Error> { | ||||
|  | ||||
| fn list_services(_param: Value) -> Result<Value, Error> { | ||||
|     let mut list = vec![]; | ||||
|  | ||||
|     for service in &SERVICE_NAME_LIST { | ||||
| @ -165,11 +159,7 @@ fn list_services( | ||||
|     }, | ||||
| )] | ||||
| /// Read service properties. | ||||
| fn get_service_state( | ||||
|     service: String, | ||||
|     _param: Value, | ||||
| ) -> Result<Value, Error> { | ||||
|  | ||||
| fn get_service_state(service: String, _param: Value) -> Result<Value, Error> { | ||||
|     let service = service.as_str(); | ||||
|  | ||||
|     if !SERVICE_NAME_LIST.contains(&service) { | ||||
| @ -182,11 +172,10 @@ fn get_service_state( | ||||
| } | ||||
|  | ||||
| fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Value, Error> { | ||||
|  | ||||
|     let workerid = format!("srv{}", &cmd); | ||||
|  | ||||
|     let cmd = match cmd { | ||||
|         "start"|"stop"|"restart"=> cmd.to_string(), | ||||
|         "start" | "stop" | "restart" => cmd.to_string(), | ||||
|         "reload" => "try-reload-or-restart".to_string(), // some services do not implement reload | ||||
|         _ => bail!("unknown service command '{}'", cmd), | ||||
|     }; | ||||
| @ -198,9 +187,12 @@ fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Valu | ||||
|         auth_id.to_string(), | ||||
|         false, | ||||
|         move |_worker| { | ||||
|  | ||||
|             if service == "proxmox-backup" && cmd == "stop" { | ||||
|                 bail!("invalid service cmd '{} {}' cannot stop essential service!", service, cmd); | ||||
|                 bail!( | ||||
|                     "invalid service cmd '{} {}' cannot stop essential service!", | ||||
|                     service, | ||||
|                     cmd | ||||
|                 ); | ||||
|             } | ||||
|  | ||||
|             let real_service_name = real_service_name(&service); | ||||
| @ -214,7 +206,7 @@ fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Valu | ||||
|             } | ||||
|  | ||||
|             Ok(()) | ||||
|         } | ||||
|         }, | ||||
|     )?; | ||||
|  | ||||
|     Ok(upid.into()) | ||||
| @ -242,7 +234,6 @@ fn start_service( | ||||
|     _param: Value, | ||||
|     rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<Value, Error> { | ||||
|  | ||||
|     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | ||||
|  | ||||
|     log::info!("starting service {}", service); | ||||
| @ -271,8 +262,7 @@ fn stop_service( | ||||
|     service: String, | ||||
|     _param: Value, | ||||
|     rpcenv: &mut dyn RpcEnvironment, | ||||
|  ) -> Result<Value, Error> { | ||||
|  | ||||
| ) -> Result<Value, Error> { | ||||
|     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | ||||
|  | ||||
|     log::info!("stopping service {}", service); | ||||
| @ -302,7 +292,6 @@ fn restart_service( | ||||
|     _param: Value, | ||||
|     rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<Value, Error> { | ||||
|  | ||||
|     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | ||||
|  | ||||
|     log::info!("re-starting service {}", service); | ||||
| @ -337,7 +326,6 @@ fn reload_service( | ||||
|     _param: Value, | ||||
|     rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<Value, Error> { | ||||
|  | ||||
|     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | ||||
|  | ||||
|     log::info!("reloading service {}", service); | ||||
| @ -347,26 +335,11 @@ fn reload_service( | ||||
|  | ||||
| #[sortable] | ||||
| const SERVICE_SUBDIRS: SubdirMap = &sorted!([ | ||||
|     ( | ||||
|         "reload", &Router::new() | ||||
|             .post(&API_METHOD_RELOAD_SERVICE) | ||||
|     ), | ||||
|     ( | ||||
|         "restart", &Router::new() | ||||
|             .post(&API_METHOD_RESTART_SERVICE) | ||||
|     ), | ||||
|     ( | ||||
|         "start", &Router::new() | ||||
|             .post(&API_METHOD_START_SERVICE) | ||||
|     ), | ||||
|     ( | ||||
|         "state", &Router::new() | ||||
|             .get(&API_METHOD_GET_SERVICE_STATE) | ||||
|     ), | ||||
|     ( | ||||
|         "stop", &Router::new() | ||||
|             .post(&API_METHOD_STOP_SERVICE) | ||||
|     ), | ||||
|     ("reload", &Router::new().post(&API_METHOD_RELOAD_SERVICE)), | ||||
|     ("restart", &Router::new().post(&API_METHOD_RESTART_SERVICE)), | ||||
|     ("start", &Router::new().post(&API_METHOD_START_SERVICE)), | ||||
|     ("state", &Router::new().get(&API_METHOD_GET_SERVICE_STATE)), | ||||
|     ("stop", &Router::new().post(&API_METHOD_STOP_SERVICE)), | ||||
| ]); | ||||
|  | ||||
| const SERVICE_ROUTER: Router = Router::new() | ||||
|  | ||||
| @ -1,18 +1,18 @@ | ||||
| use std::process::Command; | ||||
| use std::path::Path; | ||||
| use std::process::Command; | ||||
|  | ||||
| use anyhow::{Error, format_err, bail}; | ||||
| use anyhow::{bail, format_err, Error}; | ||||
| use serde_json::Value; | ||||
|  | ||||
| use proxmox_sys::linux::procfs; | ||||
|  | ||||
| use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission}; | ||||
| use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment}; | ||||
| use proxmox_schema::api; | ||||
|  | ||||
| use pbs_api_types::{NODE_SCHEMA, NodePowerCommand, PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT}; | ||||
| use pbs_api_types::{NodePowerCommand, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT}; | ||||
|  | ||||
| use crate::api2::types::{ | ||||
|     NodeCpuInformation, NodeStatus, NodeMemoryCounters, NodeSwapCounters, NodeInformation, | ||||
|     NodeCpuInformation, NodeInformation, NodeMemoryCounters, NodeStatus, NodeSwapCounters, | ||||
| }; | ||||
|  | ||||
| impl std::convert::From<procfs::ProcFsCPUInfo> for NodeCpuInformation { | ||||
| @ -111,7 +111,6 @@ fn get_status( | ||||
| )] | ||||
| /// Reboot or shutdown the node. | ||||
| fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> { | ||||
|  | ||||
|     let systemctl_command = match command { | ||||
|         NodePowerCommand::Reboot => "reboot", | ||||
|         NodePowerCommand::Shutdown => "poweroff", | ||||
| @ -126,7 +125,13 @@ fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> { | ||||
|         match output.status.code() { | ||||
|             Some(code) => { | ||||
|                 let msg = String::from_utf8(output.stderr) | ||||
|                     .map(|m| if m.is_empty() { String::from("no error message") } else { m }) | ||||
|                     .map(|m| { | ||||
|                         if m.is_empty() { | ||||
|                             String::from("no error message") | ||||
|                         } else { | ||||
|                             m | ||||
|                         } | ||||
|                     }) | ||||
|                     .unwrap_or_else(|_| String::from("non utf8 error message (suppressed)")); | ||||
|                 bail!("diff failed with status code: {} - {}", code, msg); | ||||
|             } | ||||
|  | ||||
| @ -1,16 +1,15 @@ | ||||
| use anyhow::{Error, format_err, bail}; | ||||
| use anyhow::{bail, format_err, Error}; | ||||
| use serde_json::Value; | ||||
|  | ||||
| use proxmox_router::{Router, RpcEnvironment, Permission}; | ||||
| use proxmox_router::{Permission, Router, RpcEnvironment}; | ||||
| use proxmox_schema::api; | ||||
|  | ||||
| use pbs_api_types::{ | ||||
|     NODE_SCHEMA, SUBSCRIPTION_KEY_SCHEMA, Authid, | ||||
|     PRIV_SYS_AUDIT,PRIV_SYS_MODIFY, | ||||
|     Authid, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, SUBSCRIPTION_KEY_SCHEMA, | ||||
| }; | ||||
|  | ||||
| use crate::tools; | ||||
| use crate::tools::subscription::{self, SubscriptionStatus, SubscriptionInfo}; | ||||
| use crate::tools::subscription::{self, SubscriptionInfo, SubscriptionStatus}; | ||||
| use pbs_config::CachedUserInfo; | ||||
|  | ||||
| #[api( | ||||
| @ -33,9 +32,7 @@ use pbs_config::CachedUserInfo; | ||||
|     }, | ||||
| )] | ||||
| /// Check and update subscription status. | ||||
| pub fn check_subscription( | ||||
|     force: bool, | ||||
| ) -> Result<(), Error> { | ||||
| pub fn check_subscription(force: bool) -> Result<(), Error> { | ||||
|     let info = match subscription::read_subscription() { | ||||
|         Err(err) => bail!("could not read subscription status: {}", err), | ||||
|         Ok(Some(info)) => info, | ||||
| @ -93,7 +90,7 @@ pub fn get_subscription( | ||||
|             status: SubscriptionStatus::NOTFOUND, | ||||
|             message: Some("There is no subscription key".into()), | ||||
|             serverid: Some(tools::get_hardware_address()?), | ||||
|             url:  Some(url.into()), | ||||
|             url: Some(url.into()), | ||||
|             ..Default::default() | ||||
|         }, | ||||
|     }; | ||||
| @ -132,10 +129,7 @@ pub fn get_subscription( | ||||
|     }, | ||||
| )] | ||||
| /// Set a subscription key and check it. | ||||
| pub fn set_subscription( | ||||
|     key: String, | ||||
| ) -> Result<(), Error> { | ||||
|  | ||||
| pub fn set_subscription(key: String) -> Result<(), Error> { | ||||
|     let server_id = tools::get_hardware_address()?; | ||||
|  | ||||
|     let info = subscription::check_subscription(key, server_id)?; | ||||
| @ -161,7 +155,6 @@ pub fn set_subscription( | ||||
| )] | ||||
| /// Delete subscription info. | ||||
| pub fn delete_subscription() -> Result<(), Error> { | ||||
|  | ||||
|     subscription::delete_subscription() | ||||
|         .map_err(|err| format_err!("Deleting subscription failed: {}", err))?; | ||||
|  | ||||
|  | ||||
| @ -1,12 +1,12 @@ | ||||
| use std::process::{Command, Stdio}; | ||||
|  | ||||
| use anyhow::{Error}; | ||||
| use anyhow::Error; | ||||
| use serde_json::{json, Value}; | ||||
|  | ||||
| use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission}; | ||||
| use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment}; | ||||
| use proxmox_schema::api; | ||||
|  | ||||
| use pbs_api_types::{NODE_SCHEMA, SYSTEMD_DATETIME_FORMAT, PRIV_SYS_AUDIT}; | ||||
| use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_AUDIT, SYSTEMD_DATETIME_FORMAT}; | ||||
|  | ||||
| fn dump_journal( | ||||
|     start: Option<u64>, | ||||
| @ -15,12 +15,17 @@ fn dump_journal( | ||||
|     until: Option<&str>, | ||||
|     service: Option<&str>, | ||||
| ) -> Result<(u64, Vec<Value>), Error> { | ||||
|  | ||||
|     let mut args = vec!["-o", "short", "--no-pager"]; | ||||
|  | ||||
|     if let Some(service) = service { args.extend(&["--unit", service]); } | ||||
|     if let Some(since) = since { args.extend(&["--since", since]); } | ||||
|     if let Some(until) = until { args.extend(&["--until", until]); } | ||||
|     if let Some(service) = service { | ||||
|         args.extend(&["--unit", service]); | ||||
|     } | ||||
|     if let Some(since) = since { | ||||
|         args.extend(&["--since", since]); | ||||
|     } | ||||
|     if let Some(until) = until { | ||||
|         args.extend(&["--until", until]); | ||||
|     } | ||||
|  | ||||
|     let mut lines: Vec<Value> = vec![]; | ||||
|     let mut limit = limit.unwrap_or(50); | ||||
| @ -32,15 +37,19 @@ fn dump_journal( | ||||
|         .stdout(Stdio::piped()) | ||||
|         .spawn()?; | ||||
|  | ||||
|     use std::io::{BufRead,BufReader}; | ||||
|     use std::io::{BufRead, BufReader}; | ||||
|  | ||||
|     if let Some(ref mut stdout) = child.stdout { | ||||
|         for line in BufReader::new(stdout).lines() { | ||||
|             match line { | ||||
|                 Ok(line) => { | ||||
|                     count += 1; | ||||
|                     if count < start { continue }; | ||||
| 	            if limit == 0 { continue }; | ||||
|                     if count < start { | ||||
|                         continue; | ||||
|                     }; | ||||
|                     if limit == 0 { | ||||
|                         continue; | ||||
|                     }; | ||||
|  | ||||
|                     lines.push(json!({ "n": count, "t": line })); | ||||
|  | ||||
| @ -64,7 +73,7 @@ fn dump_journal( | ||||
|     // so we add a line | ||||
|     if count == 0 { | ||||
|         count += 1; | ||||
| 	lines.push(json!({ "n": count, "t": "no content"})); | ||||
|         lines.push(json!({ "n": count, "t": "no content"})); | ||||
|     } | ||||
|  | ||||
|     Ok((count, lines)) | ||||
| @ -133,21 +142,21 @@ fn get_syslog( | ||||
|     _info: &ApiMethod, | ||||
|     mut rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<Value, Error> { | ||||
|  | ||||
|     let service = param["service"].as_str().map(|service| crate::api2::node::services::real_service_name(service)); | ||||
|     let service = param["service"] | ||||
|         .as_str() | ||||
|         .map(|service| crate::api2::node::services::real_service_name(service)); | ||||
|  | ||||
|     let (count, lines) = dump_journal( | ||||
|         param["start"].as_u64(), | ||||
|         param["limit"].as_u64(), | ||||
|         param["since"].as_str(), | ||||
|         param["until"].as_str(), | ||||
|         service)?; | ||||
|         service, | ||||
|     )?; | ||||
|  | ||||
|     rpcenv["total"] = Value::from(count); | ||||
|  | ||||
|     Ok(json!(lines)) | ||||
| } | ||||
|  | ||||
| pub const ROUTER: Router = Router::new() | ||||
|     .get(&API_METHOD_GET_SYSLOG); | ||||
|  | ||||
| pub const ROUTER: Router = Router::new().get(&API_METHOD_GET_SYSLOG); | ||||
|  | ||||
| @ -4,21 +4,20 @@ use std::io::{BufRead, BufReader}; | ||||
| use anyhow::{bail, Error}; | ||||
| use serde_json::{json, Value}; | ||||
|  | ||||
| use proxmox_sys::sortable; | ||||
| use proxmox_router::{list_subdirs_api_method, Router, RpcEnvironment, Permission, SubdirMap}; | ||||
| use proxmox_router::{list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap}; | ||||
| use proxmox_schema::api; | ||||
| use proxmox_sys::sortable; | ||||
|  | ||||
| use pbs_api_types::{ | ||||
|     Userid, Authid, Tokenname, TaskListItem, TaskStateType, UPID, | ||||
|     NODE_SCHEMA, UPID_SCHEMA, VERIFICATION_JOB_WORKER_ID_REGEX, | ||||
|     SYNC_JOB_WORKER_ID_REGEX, DATASTORE_SCHEMA, | ||||
|     Authid, TaskListItem, TaskStateType, Tokenname, Userid, DATASTORE_SCHEMA, NODE_SCHEMA, | ||||
|     PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_VERIFY, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, | ||||
|     SYNC_JOB_WORKER_ID_REGEX, UPID, UPID_SCHEMA, VERIFICATION_JOB_WORKER_ID_REGEX, | ||||
| }; | ||||
|  | ||||
| use crate::api2::pull::check_pull_privs; | ||||
|  | ||||
| use proxmox_rest_server::{upid_log_path, upid_read_status, TaskState, TaskListInfoIterator}; | ||||
| use pbs_config::CachedUserInfo; | ||||
| use proxmox_rest_server::{upid_log_path, upid_read_status, TaskListInfoIterator, TaskState}; | ||||
|  | ||||
| // matches respective job execution privileges | ||||
| fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> Result<(), Error> { | ||||
| @ -26,13 +25,15 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> | ||||
|         ("verificationjob", Some(workerid)) => { | ||||
|             if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(workerid) { | ||||
|                 if let Some(store) = captures.get(1) { | ||||
|                     return user_info.check_privs(auth_id, | ||||
|                                                  &["datastore", store.as_str()], | ||||
|                                                  PRIV_DATASTORE_VERIFY, | ||||
|                                                  true); | ||||
|                     return user_info.check_privs( | ||||
|                         auth_id, | ||||
|                         &["datastore", store.as_str()], | ||||
|                         PRIV_DATASTORE_VERIFY, | ||||
|                         true, | ||||
|                     ); | ||||
|                 } | ||||
|             } | ||||
|         }, | ||||
|         } | ||||
|         ("syncjob", Some(workerid)) => { | ||||
|             if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(workerid) { | ||||
|                 let remote = captures.get(1); | ||||
| @ -40,29 +41,34 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> | ||||
|                 let local_store = captures.get(3); | ||||
|  | ||||
|                 if let (Some(remote), Some(remote_store), Some(local_store)) = | ||||
|                     (remote, remote_store, local_store) { | ||||
|  | ||||
|                     return check_pull_privs(auth_id, | ||||
|                                             local_store.as_str(), | ||||
|                                             remote.as_str(), | ||||
|                                             remote_store.as_str(), | ||||
|                                             false); | ||||
|                     (remote, remote_store, local_store) | ||||
|                 { | ||||
|                     return check_pull_privs( | ||||
|                         auth_id, | ||||
|                         local_store.as_str(), | ||||
|                         remote.as_str(), | ||||
|                         remote_store.as_str(), | ||||
|                         false, | ||||
|                     ); | ||||
|                 } | ||||
|             } | ||||
|         }, | ||||
|         } | ||||
|         ("garbage_collection", Some(workerid)) => { | ||||
|             return user_info.check_privs(auth_id, | ||||
|                                          &["datastore", workerid], | ||||
|                                          PRIV_DATASTORE_MODIFY, | ||||
|                                          true) | ||||
|         }, | ||||
|             return user_info.check_privs( | ||||
|                 auth_id, | ||||
|                 &["datastore", workerid], | ||||
|                 PRIV_DATASTORE_MODIFY, | ||||
|                 true, | ||||
|             ) | ||||
|         } | ||||
|         ("prune", Some(workerid)) => { | ||||
|             return user_info.check_privs(auth_id, | ||||
|                                          &["datastore", | ||||
|                                          workerid], | ||||
|                                          PRIV_DATASTORE_MODIFY, | ||||
|                                          true); | ||||
|         }, | ||||
|             return user_info.check_privs( | ||||
|                 auth_id, | ||||
|                 &["datastore", workerid], | ||||
|                 PRIV_DATASTORE_MODIFY, | ||||
|                 true, | ||||
|             ); | ||||
|         } | ||||
|         _ => bail!("not a scheduled job task"), | ||||
|     }; | ||||
|  | ||||
| @ -102,7 +108,8 @@ fn check_job_store(upid: &UPID, store: &str) -> bool { | ||||
| fn check_task_access(auth_id: &Authid, upid: &UPID) -> Result<(), Error> { | ||||
|     let task_auth_id: Authid = upid.auth_id.parse()?; | ||||
|     if auth_id == &task_auth_id | ||||
|         || (task_auth_id.is_token() && &Authid::from(task_auth_id.user().clone()) == auth_id) { | ||||
|         || (task_auth_id.is_token() && &Authid::from(task_auth_id.user().clone()) == auth_id) | ||||
|     { | ||||
|         // task owner can always read | ||||
|         Ok(()) | ||||
|     } else { | ||||
| @ -111,7 +118,8 @@ fn check_task_access(auth_id: &Authid, upid: &UPID) -> Result<(), Error> { | ||||
|         // access to all tasks | ||||
|         // or task == job which the user/token could have configured/manually executed | ||||
|  | ||||
|         user_info.check_privs(auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false) | ||||
|         user_info | ||||
|             .check_privs(auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false) | ||||
|             .or_else(|_| check_job_privs(auth_id, &user_info, upid)) | ||||
|             .or_else(|_| bail!("task access not allowed")) | ||||
|     } | ||||
| @ -127,9 +135,10 @@ pub fn tasktype(state: &TaskState) -> TaskStateType { | ||||
| } | ||||
|  | ||||
| fn into_task_list_item(info: proxmox_rest_server::TaskListInfo) -> pbs_api_types::TaskListItem { | ||||
|     let (endtime, status) = info | ||||
|         .state | ||||
|         .map_or_else(|| (None, None), |a| (Some(a.endtime()), Some(a.to_string()))); | ||||
|     let (endtime, status) = info.state.map_or_else( | ||||
|         || (None, None), | ||||
|         |a| (Some(a.endtime()), Some(a.to_string())), | ||||
|     ); | ||||
|  | ||||
|     pbs_api_types::TaskListItem { | ||||
|         upid: info.upid_str, | ||||
| @ -210,11 +219,7 @@ fn into_task_list_item(info: proxmox_rest_server::TaskListInfo) -> pbs_api_types | ||||
|     }, | ||||
| )] | ||||
| /// Get task status. | ||||
| async fn get_task_status( | ||||
|     param: Value, | ||||
|     rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<Value, Error> { | ||||
|  | ||||
| async fn get_task_status(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> { | ||||
|     let upid = extract_upid(¶m)?; | ||||
|  | ||||
|     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | ||||
| @ -249,7 +254,6 @@ async fn get_task_status( | ||||
| } | ||||
|  | ||||
| fn extract_upid(param: &Value) -> Result<UPID, Error> { | ||||
|  | ||||
|     let upid_str = pbs_tools::json::required_string_param(param, "upid")?; | ||||
|  | ||||
|     upid_str.parse::<UPID>() | ||||
| @ -289,11 +293,7 @@ fn extract_upid(param: &Value) -> Result<UPID, Error> { | ||||
|     }, | ||||
| )] | ||||
| /// Read task log. | ||||
| async fn read_task_log( | ||||
|     param: Value, | ||||
|     mut rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<Value, Error> { | ||||
|  | ||||
| async fn read_task_log(param: Value, mut rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> { | ||||
|     let upid = extract_upid(¶m)?; | ||||
|  | ||||
|     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | ||||
| @ -317,8 +317,12 @@ async fn read_task_log( | ||||
|         match line { | ||||
|             Ok(line) => { | ||||
|                 count += 1; | ||||
|                 if count < start { continue }; | ||||
| 	        if limit == 0 { continue }; | ||||
|                 if count < start { | ||||
|                     continue; | ||||
|                 }; | ||||
|                 if limit == 0 { | ||||
|                     continue; | ||||
|                 }; | ||||
|  | ||||
|                 lines.push(json!({ "n": count, "t": line })); | ||||
|  | ||||
| @ -359,11 +363,7 @@ async fn read_task_log( | ||||
|     }, | ||||
| )] | ||||
| /// Try to stop a task. | ||||
| fn stop_task( | ||||
|     param: Value, | ||||
|     rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<Value, Error> { | ||||
|  | ||||
| fn stop_task(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> { | ||||
|     let upid = extract_upid(¶m)?; | ||||
|  | ||||
|     let auth_id = rpcenv.get_auth_id().unwrap(); | ||||
| @ -465,7 +465,6 @@ pub fn list_tasks( | ||||
|     param: Value, | ||||
|     mut rpcenv: &mut dyn RpcEnvironment, | ||||
| ) -> Result<Vec<TaskListItem>, Error> { | ||||
|  | ||||
|     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | ||||
|     let user_info = CachedUserInfo::new()?; | ||||
|     let user_privs = user_info.lookup_privs(&auth_id, &["system", "tasks"]); | ||||
| @ -475,7 +474,11 @@ pub fn list_tasks( | ||||
|     let store = param["store"].as_str(); | ||||
|  | ||||
|     let list = TaskListInfoIterator::new(running)?; | ||||
|     let limit = if limit > 0 { limit as usize } else { usize::MAX }; | ||||
|     let limit = if limit > 0 { | ||||
|         limit as usize | ||||
|     } else { | ||||
|         usize::MAX | ||||
|     }; | ||||
|  | ||||
|     let mut skipped = 0; | ||||
|     let mut result: Vec<TaskListItem> = Vec::new(); | ||||
| @ -510,15 +513,21 @@ pub fn list_tasks( | ||||
|         } | ||||
|  | ||||
|         if let Some(needle) = &userfilter { | ||||
|             if !info.upid.auth_id.to_string().contains(needle) { continue; } | ||||
|             if !info.upid.auth_id.to_string().contains(needle) { | ||||
|                 continue; | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         if let Some(store) = store { | ||||
|             if !check_job_store(&info.upid, store) { continue; } | ||||
|             if !check_job_store(&info.upid, store) { | ||||
|                 continue; | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         if let Some(typefilter) = &typefilter { | ||||
|             if !info.upid.worker_type.contains(typefilter) { continue; } | ||||
|             if !info.upid.worker_type.contains(typefilter) { | ||||
|                 continue; | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         match (&info.state, &statusfilter) { | ||||
| @ -528,9 +537,9 @@ pub fn list_tasks( | ||||
|                 if !filters.contains(&tasktype(state)) { | ||||
|                     continue; | ||||
|                 } | ||||
|             }, | ||||
|             } | ||||
|             (None, Some(_)) => continue, | ||||
|             _ => {}, | ||||
|             _ => {} | ||||
|         } | ||||
|  | ||||
|         if skipped < start as usize { | ||||
| @ -546,7 +555,8 @@ pub fn list_tasks( | ||||
|     } | ||||
|  | ||||
|     let mut count = result.len() + start as usize; | ||||
|     if !result.is_empty() && result.len() >= limit { // we have a 'virtual' entry as long as we have any new | ||||
|     if !result.is_empty() && result.len() >= limit { | ||||
|         // we have a 'virtual' entry as long as we have any new | ||||
|         count += 1; | ||||
|     } | ||||
|  | ||||
| @ -557,14 +567,8 @@ pub fn list_tasks( | ||||
|  | ||||
| #[sortable] | ||||
| const UPID_API_SUBDIRS: SubdirMap = &sorted!([ | ||||
|     ( | ||||
|         "log", &Router::new() | ||||
|             .get(&API_METHOD_READ_TASK_LOG) | ||||
|     ), | ||||
|     ( | ||||
|         "status", &Router::new() | ||||
|             .get(&API_METHOD_GET_TASK_STATUS) | ||||
|     ) | ||||
|     ("log", &Router::new().get(&API_METHOD_READ_TASK_LOG)), | ||||
|     ("status", &Router::new().get(&API_METHOD_GET_TASK_STATUS)) | ||||
| ]); | ||||
|  | ||||
| pub const UPID_API_ROUTER: Router = Router::new() | ||||
|  | ||||
| @ -1,11 +1,11 @@ | ||||
| use anyhow::{bail, format_err, Error}; | ||||
| use serde_json::{json, Value}; | ||||
|  | ||||
| use proxmox_sys::fs::{file_read_firstline, replace_file, CreateOptions}; | ||||
| use proxmox_router::{Router, Permission}; | ||||
| use proxmox_router::{Permission, Router}; | ||||
| use proxmox_schema::api; | ||||
| use proxmox_sys::fs::{file_read_firstline, replace_file, CreateOptions}; | ||||
|  | ||||
| use pbs_api_types::{NODE_SCHEMA, TIME_ZONE_SCHEMA, PRIV_SYS_MODIFY}; | ||||
| use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_MODIFY, TIME_ZONE_SCHEMA}; | ||||
|  | ||||
| fn read_etc_localtime() -> Result<String, Error> { | ||||
|     // use /etc/timezone | ||||
| @ -14,8 +14,8 @@ fn read_etc_localtime() -> Result<String, Error> { | ||||
|     } | ||||
|  | ||||
|     // otherwise guess from the /etc/localtime symlink | ||||
|     let link = std::fs::read_link("/etc/localtime"). | ||||
|         map_err(|err| format_err!("failed to guess timezone - {}", err))?; | ||||
|     let link = std::fs::read_link("/etc/localtime") | ||||
|         .map_err(|err| format_err!("failed to guess timezone - {}", err))?; | ||||
|  | ||||
|     let link = link.to_string_lossy(); | ||||
|     match link.rfind("/zoneinfo/") { | ||||
| @ -87,17 +87,19 @@ fn get_time(_param: Value) -> Result<Value, Error> { | ||||
|     }, | ||||
| )] | ||||
| /// Set time zone | ||||
| fn set_timezone( | ||||
|     timezone: String, | ||||
|     _param: Value, | ||||
| ) -> Result<Value, Error> { | ||||
| fn set_timezone(timezone: String, _param: Value) -> Result<Value, Error> { | ||||
|     let path = std::path::PathBuf::from(format!("/usr/share/zoneinfo/{}", timezone)); | ||||
|  | ||||
|     if !path.exists() { | ||||
|         bail!("No such timezone."); | ||||
|     } | ||||
|  | ||||
|     replace_file("/etc/timezone", timezone.as_bytes(), CreateOptions::new(), true)?; | ||||
|     replace_file( | ||||
|         "/etc/timezone", | ||||
|         timezone.as_bytes(), | ||||
|         CreateOptions::new(), | ||||
|         true, | ||||
|     )?; | ||||
|  | ||||
|     let _ = std::fs::remove_file("/etc/localtime"); | ||||
|  | ||||
|  | ||||
		Reference in New Issue
	
	Block a user