move ApiConfig, FileLogger and CommandoSocket to proxmox-rest-server workspace

ApiConfig: avoid using  pbs_config::backup_user()
CommandoSocket: avoid using  pbs_config::backup_user()
FileLogger: avoid using  pbs_config::backup_user()
- use atomic_open_or_create_file()

Auth Trait: moved definitions to proxmox-rest-server/src/lib.rs
- removed CachedUserInfo patrameter
- return user as String (not Authid)

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Dietmar Maurer
2021-09-21 07:58:40 +02:00
committed by Thomas Lamprecht
parent 037f6b6d5e
commit fd6d243843
34 changed files with 222 additions and 176 deletions

View File

@ -1,11 +1,12 @@
//! Provides authentication primitives for the HTTP server
use anyhow::{format_err, Error};
use anyhow::format_err;
use std::sync::Arc;
use pbs_tools::ticket::{self, Ticket};
use pbs_config::{token_shadow, CachedUserInfo};
use pbs_api_types::{Authid, Userid};
use proxmox_rest_server::{ApiAuth, AuthError};
use crate::auth_helpers::*;
use crate::tools;
@ -13,26 +14,6 @@ use crate::tools;
use hyper::header;
use percent_encoding::percent_decode_str;
pub enum AuthError {
Generic(Error),
NoData,
}
impl From<Error> for AuthError {
fn from(err: Error) -> Self {
AuthError::Generic(err)
}
}
pub trait ApiAuth {
fn check_auth(
&self,
headers: &http::HeaderMap,
method: &hyper::Method,
user_info: &CachedUserInfo,
) -> Result<Authid, AuthError>;
}
struct UserAuthData {
ticket: String,
csrf_token: Option<String>,
@ -80,8 +61,10 @@ impl ApiAuth for UserApiAuth {
&self,
headers: &http::HeaderMap,
method: &hyper::Method,
user_info: &CachedUserInfo,
) -> Result<Authid, AuthError> {
) -> Result<String, AuthError> {
let user_info = CachedUserInfo::new()?;
let auth_data = Self::extract_auth_data(headers);
match auth_data {
Some(AuthData::User(user_auth_data)) => {
@ -111,7 +94,7 @@ impl ApiAuth for UserApiAuth {
}
}
Ok(auth_id)
Ok(auth_id.to_string())
}
Some(AuthData::ApiToken(api_token)) => {
let mut parts = api_token.splitn(2, ':');
@ -133,7 +116,7 @@ impl ApiAuth for UserApiAuth {
token_shadow::verify_secret(&tokenid, &tokensecret)?;
Ok(tokenid)
Ok(tokenid.to_string())
}
None => Err(AuthError::NoData),
}

View File

@ -1,220 +0,0 @@
use anyhow::{bail, format_err, Error};
use std::collections::HashMap;
use std::os::unix::io::AsRawFd;
use std::path::{PathBuf, Path};
use std::sync::Arc;
use futures::*;
use tokio::net::UnixListener;
use serde::Serialize;
use serde_json::Value;
use nix::sys::socket;
/// Listens on a Unix Socket to handle simple command asynchronously
fn create_control_socket<P, F>(path: P, func: F) -> Result<impl Future<Output = ()>, Error>
where
P: Into<PathBuf>,
F: Fn(Value) -> Result<Value, Error> + Send + Sync + 'static,
{
let path: PathBuf = path.into();
let backup_user = pbs_config::backup_user()?;
let backup_gid = backup_user.gid.as_raw();
let socket = UnixListener::bind(&path)?;
let func = Arc::new(func);
let control_future = async move {
loop {
let (conn, _addr) = match socket.accept().await {
Ok(data) => data,
Err(err) => {
eprintln!("failed to accept on control socket {:?}: {}", path, err);
continue;
}
};
let opt = socket::sockopt::PeerCredentials {};
let cred = match socket::getsockopt(conn.as_raw_fd(), opt) {
Ok(cred) => cred,
Err(err) => {
eprintln!("no permissions - unable to read peer credential - {}", err);
continue;
}
};
// check permissions (same gid, root user, or backup group)
let mygid = unsafe { libc::getgid() };
if !(cred.uid() == 0 || cred.gid() == mygid || cred.gid() == backup_gid) {
eprintln!("no permissions for {:?}", cred);
continue;
}
let (rx, mut tx) = tokio::io::split(conn);
let abort_future = super::last_worker_future().map(|_| ());
use tokio::io::{AsyncBufReadExt, AsyncWriteExt};
let func = Arc::clone(&func);
let path = path.clone();
tokio::spawn(futures::future::select(
async move {
let mut rx = tokio::io::BufReader::new(rx);
let mut line = String::new();
loop {
line.clear();
match rx.read_line({ line.clear(); &mut line }).await {
Ok(0) => break,
Ok(_) => (),
Err(err) => {
eprintln!("control socket {:?} read error: {}", path, err);
return;
}
}
let response = match line.parse::<Value>() {
Ok(param) => match func(param) {
Ok(res) => format!("OK: {}\n", res),
Err(err) => format!("ERROR: {}\n", err),
}
Err(err) => format!("ERROR: {}\n", err),
};
if let Err(err) = tx.write_all(response.as_bytes()).await {
eprintln!("control socket {:?} write response error: {}", path, err);
return;
}
}
}.boxed(),
abort_future,
).map(|_| ()));
}
}.boxed();
let abort_future = super::last_worker_future().map_err(|_| {});
let task = futures::future::select(
control_future,
abort_future,
).map(|_: futures::future::Either<(Result<(), Error>, _), _>| ());
Ok(task)
}
pub async fn send_command<P, T>(path: P, params: &T) -> Result<Value, Error>
where
P: AsRef<Path>,
T: ?Sized + Serialize,
{
let mut command_string = serde_json::to_string(params)?;
command_string.push('\n');
send_raw_command(path.as_ref(), &command_string).await
}
pub async fn send_raw_command<P>(path: P, command_string: &str) -> Result<Value, Error>
where
P: AsRef<Path>,
{
use tokio::io::{AsyncBufReadExt, AsyncWriteExt};
let mut conn = tokio::net::UnixStream::connect(path)
.map_err(move |err| format_err!("control socket connect failed - {}", err))
.await?;
conn.write_all(command_string.as_bytes()).await?;
if !command_string.as_bytes().ends_with(b"\n") {
conn.write_all(b"\n").await?;
}
AsyncWriteExt::shutdown(&mut conn).await?;
let mut rx = tokio::io::BufReader::new(conn);
let mut data = String::new();
if rx.read_line(&mut data).await? == 0 {
bail!("no response");
}
if let Some(res) = data.strip_prefix("OK: ") {
match res.parse::<Value>() {
Ok(v) => Ok(v),
Err(err) => bail!("unable to parse json response - {}", err),
}
} else if let Some(err) = data.strip_prefix("ERROR: ") {
bail!("{}", err);
} else {
bail!("unable to parse response: {}", data);
}
}
/// A callback for a specific commando socket.
pub type CommandoSocketFn = Box<(dyn Fn(Option<&Value>) -> Result<Value, Error> + Send + Sync + 'static)>;
/// Tooling to get a single control command socket where one can register multiple commands
/// dynamically.
/// You need to call `spawn()` to make the socket active.
pub struct CommandoSocket {
socket: PathBuf,
commands: HashMap<String, CommandoSocketFn>,
}
impl CommandoSocket {
pub fn new<P>(path: P) -> Self
where P: Into<PathBuf>,
{
CommandoSocket {
socket: path.into(),
commands: HashMap::new(),
}
}
/// Spawn the socket and consume self, meaning you cannot register commands anymore after
/// calling this.
pub fn spawn(self) -> Result<(), Error> {
let control_future = create_control_socket(self.socket.to_owned(), move |param| {
let param = param
.as_object()
.ok_or_else(|| format_err!("unable to parse parameters (expected json object)"))?;
let command = match param.get("command") {
Some(Value::String(command)) => command.as_str(),
None => bail!("no command"),
_ => bail!("unable to parse command"),
};
if !self.commands.contains_key(command) {
bail!("got unknown command '{}'", command);
}
match self.commands.get(command) {
None => bail!("got unknown command '{}'", command),
Some(handler) => {
let args = param.get("args"); //.unwrap_or(&Value::Null);
(handler)(args)
},
}
})?;
tokio::spawn(control_future);
Ok(())
}
/// Register a new command with a callback.
pub fn register_command<F>(
&mut self,
command: String,
handler: F,
) -> Result<(), Error>
where
F: Fn(Option<&Value>) -> Result<Value, Error> + Send + Sync + 'static,
{
if self.commands.contains_key(&command) {
bail!("command '{}' already exists!", command);
}
self.commands.insert(command, Box::new(handler));
Ok(())
}
}

View File

@ -1,171 +0,0 @@
use std::collections::HashMap;
use std::path::PathBuf;
use std::time::SystemTime;
use std::fs::metadata;
use std::sync::{Arc, Mutex, RwLock};
use anyhow::{bail, Error, format_err};
use hyper::Method;
use handlebars::Handlebars;
use serde::Serialize;
use proxmox::api::{ApiMethod, Router, RpcEnvironmentType};
use proxmox::tools::fs::{create_path, CreateOptions};
use crate::tools::{FileLogger, FileLogOptions};
use super::auth::ApiAuth;
pub struct ApiConfig {
basedir: PathBuf,
router: &'static Router,
aliases: HashMap<String, PathBuf>,
env_type: RpcEnvironmentType,
templates: RwLock<Handlebars<'static>>,
template_files: RwLock<HashMap<String, (SystemTime, PathBuf)>>,
request_log: Option<Arc<Mutex<FileLogger>>>,
pub api_auth: Arc<dyn ApiAuth + Send + Sync>,
}
impl ApiConfig {
pub fn new<B: Into<PathBuf>>(
basedir: B,
router: &'static Router,
env_type: RpcEnvironmentType,
api_auth: Arc<dyn ApiAuth + Send + Sync>,
) -> Result<Self, Error> {
Ok(Self {
basedir: basedir.into(),
router,
aliases: HashMap::new(),
env_type,
templates: RwLock::new(Handlebars::new()),
template_files: RwLock::new(HashMap::new()),
request_log: None,
api_auth,
})
}
pub fn find_method(
&self,
components: &[&str],
method: Method,
uri_param: &mut HashMap<String, String>,
) -> Option<&'static ApiMethod> {
self.router.find_method(components, method, uri_param)
}
pub fn find_alias(&self, components: &[&str]) -> PathBuf {
let mut prefix = String::new();
let mut filename = self.basedir.clone();
let comp_len = components.len();
if comp_len >= 1 {
prefix.push_str(components[0]);
if let Some(subdir) = self.aliases.get(&prefix) {
filename.push(subdir);
components.iter().skip(1).for_each(|comp| filename.push(comp));
} else {
components.iter().for_each(|comp| filename.push(comp));
}
}
filename
}
pub fn add_alias<S, P>(&mut self, alias: S, path: P)
where S: Into<String>,
P: Into<PathBuf>,
{
self.aliases.insert(alias.into(), path.into());
}
pub fn env_type(&self) -> RpcEnvironmentType {
self.env_type
}
pub fn register_template<P>(&self, name: &str, path: P) -> Result<(), Error>
where
P: Into<PathBuf>
{
if self.template_files.read().unwrap().contains_key(name) {
bail!("template already registered");
}
let path: PathBuf = path.into();
let metadata = metadata(&path)?;
let mtime = metadata.modified()?;
self.templates.write().unwrap().register_template_file(name, &path)?;
self.template_files.write().unwrap().insert(name.to_string(), (mtime, path));
Ok(())
}
/// Checks if the template was modified since the last rendering
/// if yes, it loads a the new version of the template
pub fn render_template<T>(&self, name: &str, data: &T) -> Result<String, Error>
where
T: Serialize,
{
let path;
let mtime;
{
let template_files = self.template_files.read().unwrap();
let (old_mtime, old_path) = template_files.get(name).ok_or_else(|| format_err!("template not found"))?;
mtime = metadata(old_path)?.modified()?;
if mtime <= *old_mtime {
return self.templates.read().unwrap().render(name, data).map_err(|err| format_err!("{}", err));
}
path = old_path.to_path_buf();
}
{
let mut template_files = self.template_files.write().unwrap();
let mut templates = self.templates.write().unwrap();
templates.register_template_file(name, &path)?;
template_files.insert(name.to_string(), (mtime, path));
templates.render(name, data).map_err(|err| format_err!("{}", err))
}
}
pub fn enable_file_log<P>(
&mut self,
path: P,
commando_sock: &mut super::CommandoSocket,
) -> Result<(), Error>
where
P: Into<PathBuf>
{
let path: PathBuf = path.into();
if let Some(base) = path.parent() {
if !base.exists() {
let backup_user = pbs_config::backup_user()?;
let opts = CreateOptions::new().owner(backup_user.uid).group(backup_user.gid);
create_path(base, None, Some(opts)).map_err(|err| format_err!("{}", err))?;
}
}
let logger_options = FileLogOptions {
append: true,
owned_by_backup: true,
..Default::default()
};
let request_log = Arc::new(Mutex::new(FileLogger::new(&path, logger_options)?));
self.request_log = Some(Arc::clone(&request_log));
commando_sock.register_command("api-access-log-reopen".into(), move |_args| {
println!("re-opening log file");
request_log.lock().unwrap().reopen()?;
Ok(serde_json::Value::Null)
})?;
Ok(())
}
pub fn get_file_log(&self) -> Option<&Arc<Mutex<FileLogger>>> {
self.request_log.as_ref()
}
}

View File

@ -52,21 +52,12 @@ pub use environment::*;
mod upid;
pub use upid::*;
mod state;
pub use state::*;
mod command_socket;
pub use command_socket::*;
mod worker_task;
pub use worker_task::*;
mod h2service;
pub use h2service::*;
pub mod config;
pub use config::*;
pub mod formatter;
#[macro_use]
@ -98,7 +89,7 @@ pub mod pull;
pub(crate) async fn reload_proxy_certificate() -> Result<(), Error> {
let proxy_pid = crate::server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
let sock = crate::server::ctrl_sock_from_pid(proxy_pid);
let _: Value = crate::server::send_raw_command(sock, "{\"command\":\"reload-certificate\"}\n")
let _: Value = proxmox_rest_server::send_raw_command(sock, "{\"command\":\"reload-certificate\"}\n")
.await?;
Ok(())
}
@ -106,7 +97,7 @@ pub(crate) async fn reload_proxy_certificate() -> Result<(), Error> {
pub(crate) async fn notify_datastore_removed() -> Result<(), Error> {
let proxy_pid = crate::server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
let sock = crate::server::ctrl_sock_from_pid(proxy_pid);
let _: Value = crate::server::send_raw_command(sock, "{\"command\":\"datastore-removed\"}\n")
let _: Value = proxmox_rest_server::send_raw_command(sock, "{\"command\":\"datastore-removed\"}\n")
.await?;
Ok(())
}

View File

@ -29,21 +29,20 @@ use proxmox::api::{
RpcEnvironmentType,
};
use proxmox::http_err;
use proxmox::tools::fs::CreateOptions;
use pbs_tools::compression::{DeflateEncoder, Level};
use pbs_tools::stream::AsyncReaderStream;
use pbs_api_types::{Authid, Userid};
use proxmox_rest_server::{ApiConfig, FileLogger, FileLogOptions, AuthError};
use super::auth::AuthError;
use super::environment::RestEnvironment;
use super::formatter::*;
use super::ApiConfig;
use crate::auth_helpers::*;
use pbs_config::CachedUserInfo;
use crate::tools;
use crate::tools::compression::CompressionMethod;
use crate::tools::FileLogger;
extern "C" {
fn tzset();
@ -196,10 +195,16 @@ fn log_response(
}
}
pub fn auth_logger() -> Result<FileLogger, Error> {
let logger_options = tools::FileLogOptions {
let backup_user = pbs_config::backup_user()?;
let file_opts = CreateOptions::new()
.owner(backup_user.uid)
.group(backup_user.gid);
let logger_options = FileLogOptions {
append: true,
prefix_time: true,
owned_by_backup: true,
file_opts,
..Default::default()
};
FileLogger::new(pbs_buildcfg::API_AUTH_LOG_FN, logger_options)
@ -681,7 +686,6 @@ async fn handle_request(
rpcenv.set_client_ip(Some(*peer));
let user_info = CachedUserInfo::new()?;
let auth = &api.api_auth;
let delay_unauth_time = std::time::Instant::now() + std::time::Duration::from_millis(3000);
@ -708,8 +712,8 @@ async fn handle_request(
}
if auth_required {
match auth.check_auth(&parts.headers, &method, &user_info) {
Ok(authid) => rpcenv.set_auth_id(Some(authid.to_string())),
match auth.check_auth(&parts.headers, &method) {
Ok(authid) => rpcenv.set_auth_id(Some(authid)),
Err(auth_err) => {
let err = match auth_err {
AuthError::Generic(err) => err,
@ -738,6 +742,8 @@ async fn handle_request(
}
Some(api_method) => {
let auth_id = rpcenv.get_auth_id();
let user_info = CachedUserInfo::new()?;
if !check_api_permission(
api_method.access.permission,
auth_id.as_deref(),
@ -779,8 +785,9 @@ async fn handle_request(
if comp_len == 0 {
let language = extract_lang_header(&parts.headers);
match auth.check_auth(&parts.headers, &method, &user_info) {
match auth.check_auth(&parts.headers, &method) {
Ok(auth_id) => {
let auth_id: Authid = auth_id.parse()?;
if !auth_id.is_token() {
let userid = auth_id.user();
let new_csrf_token = assemble_csrf_prevention_token(csrf_secret(), userid);

View File

@ -1,142 +0,0 @@
use anyhow::{Error};
use lazy_static::lazy_static;
use std::sync::Mutex;
use futures::*;
use tokio::signal::unix::{signal, SignalKind};
use pbs_tools::broadcast_future::BroadcastData;
#[derive(PartialEq, Copy, Clone, Debug)]
pub enum ServerMode {
Normal,
Shutdown,
}
pub struct ServerState {
pub mode: ServerMode,
pub shutdown_listeners: BroadcastData<()>,
pub last_worker_listeners: BroadcastData<()>,
pub worker_count: usize,
pub internal_task_count: usize,
pub reload_request: bool,
}
lazy_static! {
static ref SERVER_STATE: Mutex<ServerState> = Mutex::new(ServerState {
mode: ServerMode::Normal,
shutdown_listeners: BroadcastData::new(),
last_worker_listeners: BroadcastData::new(),
worker_count: 0,
internal_task_count: 0,
reload_request: false,
});
}
pub fn server_state_init() -> Result<(), Error> {
let mut stream = signal(SignalKind::interrupt())?;
let future = async move {
while stream.recv().await.is_some() {
println!("got shutdown request (SIGINT)");
SERVER_STATE.lock().unwrap().reload_request = false;
crate::tools::request_shutdown();
}
}.boxed();
let abort_future = last_worker_future().map_err(|_| {});
let task = futures::future::select(future, abort_future);
tokio::spawn(task.map(|_| ()));
let mut stream = signal(SignalKind::hangup())?;
let future = async move {
while stream.recv().await.is_some() {
println!("got reload request (SIGHUP)");
SERVER_STATE.lock().unwrap().reload_request = true;
crate::tools::request_shutdown();
}
}.boxed();
let abort_future = last_worker_future().map_err(|_| {});
let task = futures::future::select(future, abort_future);
tokio::spawn(task.map(|_| ()));
Ok(())
}
pub fn is_reload_request() -> bool {
let data = SERVER_STATE.lock().unwrap();
data.mode == ServerMode::Shutdown && data.reload_request
}
pub fn server_shutdown() {
let mut data = SERVER_STATE.lock().unwrap();
println!("SET SHUTDOWN MODE");
data.mode = ServerMode::Shutdown;
data.shutdown_listeners.notify_listeners(Ok(()));
drop(data); // unlock
check_last_worker();
}
pub fn shutdown_future() -> impl Future<Output = ()> {
let mut data = SERVER_STATE.lock().unwrap();
data
.shutdown_listeners
.listen()
.map(|_| ())
}
pub fn last_worker_future() -> impl Future<Output = Result<(), Error>> {
let mut data = SERVER_STATE.lock().unwrap();
data.last_worker_listeners.listen()
}
pub fn set_worker_count(count: usize) {
SERVER_STATE.lock().unwrap().worker_count = count;
check_last_worker();
}
pub fn check_last_worker() {
let mut data = SERVER_STATE.lock().unwrap();
if !(data.mode == ServerMode::Shutdown && data.worker_count == 0 && data.internal_task_count == 0) { return; }
data.last_worker_listeners.notify_listeners(Ok(()));
}
/// Spawns a tokio task that will be tracked for reload
/// and if it is finished, notify the last_worker_listener if we
/// are in shutdown mode
pub fn spawn_internal_task<T>(task: T)
where
T: Future + Send + 'static,
T::Output: Send + 'static,
{
let mut data = SERVER_STATE.lock().unwrap();
data.internal_task_count += 1;
tokio::spawn(async move {
let _ = tokio::spawn(task).await; // ignore errors
{ // drop mutex
let mut data = SERVER_STATE.lock().unwrap();
if data.internal_task_count > 0 {
data.internal_task_count -= 1;
}
}
check_last_worker();
});
}

View File

@ -20,12 +20,10 @@ use pbs_buildcfg;
use pbs_tools::logrotate::{LogRotate, LogRotateFiles};
use pbs_api_types::{Authid, TaskStateType, UPID};
use pbs_config::{open_backup_lockfile, BackupLockGuard};
use proxmox_rest_server::{CommandoSocket, FileLogger, FileLogOptions};
use super::UPIDExt;
use crate::server;
use crate::tools::{FileLogger, FileLogOptions};
macro_rules! taskdir {
($subdir:expr) => (concat!(pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!(), "/tasks", $subdir))
}
@ -41,7 +39,7 @@ lazy_static! {
/// checks if the task UPID refers to a worker from this process
fn is_local_worker(upid: &UPID) -> bool {
upid.pid == server::pid() && upid.pstart == server::pstart()
upid.pid == crate::server::pid() && upid.pstart == crate::server::pstart()
}
/// Test if the task is still running
@ -54,14 +52,14 @@ pub async fn worker_is_active(upid: &UPID) -> Result<bool, Error> {
return Ok(false);
}
let sock = server::ctrl_sock_from_pid(upid.pid);
let sock = crate::server::ctrl_sock_from_pid(upid.pid);
let cmd = json!({
"command": "worker-task-status",
"args": {
"upid": upid.to_string(),
},
});
let status = super::send_command(sock, &cmd).await?;
let status = proxmox_rest_server::send_command(sock, &cmd).await?;
if let Some(active) = status.as_bool() {
Ok(active)
@ -84,7 +82,7 @@ pub fn worker_is_active_local(upid: &UPID) -> bool {
}
pub fn register_task_control_commands(
commando_sock: &mut super::CommandoSocket,
commando_sock: &mut CommandoSocket,
) -> Result<(), Error> {
fn get_upid(args: Option<&Value>) -> Result<UPID, Error> {
let args = if let Some(args) = args { args } else { bail!("missing args") };
@ -128,14 +126,14 @@ pub fn abort_worker_async(upid: UPID) {
pub async fn abort_worker(upid: UPID) -> Result<(), Error> {
let sock = server::ctrl_sock_from_pid(upid.pid);
let sock = crate::server::ctrl_sock_from_pid(upid.pid);
let cmd = json!({
"command": "worker-task-abort",
"args": {
"upid": upid.to_string(),
},
});
super::send_command(sock, &cmd).map_ok(|_| ()).await
proxmox_rest_server::send_command(sock, &cmd).map_ok(|_| ()).await
}
fn parse_worker_status_line(line: &str) -> Result<(String, UPID, Option<TaskState>), Error> {
@ -579,7 +577,6 @@ impl Iterator for TaskListInfoIterator {
/// task/future. Each task can `log()` messages, which are stored
/// persistently to files. Task should poll the `abort_requested`
/// flag, and stop execution when requested.
#[derive(Debug)]
pub struct WorkerTask {
upid: UPID,
data: Mutex<WorkerTaskData>,
@ -593,7 +590,6 @@ impl std::fmt::Display for WorkerTask {
}
}
#[derive(Debug)]
struct WorkerTaskData {
logger: FileLogger,
progress: f64, // 0..1
@ -642,7 +638,7 @@ impl WorkerTask {
{
let mut hash = WORKER_TASK_LIST.lock().unwrap();
hash.insert(task_id, worker.clone());
super::set_worker_count(hash.len());
proxmox_rest_server::set_worker_count(hash.len());
}
update_active_workers(Some(&upid))?;
@ -729,7 +725,7 @@ impl WorkerTask {
WORKER_TASK_LIST.lock().unwrap().remove(&self.upid.task_id);
let _ = update_active_workers(None);
super::set_worker_count(WORKER_TASK_LIST.lock().unwrap().len());
proxmox_rest_server::set_worker_count(WORKER_TASK_LIST.lock().unwrap().len());
}
/// Log a message.