2020-09-28 13:32:08 +00:00
|
|
|
use std::collections::{HashMap, VecDeque};
|
2019-04-05 10:37:35 +00:00
|
|
|
use std::fs::File;
|
2020-09-28 13:32:07 +00:00
|
|
|
use std::io::{Read, Write, BufRead, BufReader};
|
2019-04-11 05:55:02 +00:00
|
|
|
use std::panic::UnwindSafe;
|
2020-01-16 07:51:52 +00:00
|
|
|
use std::sync::atomic::{AtomicBool, Ordering};
|
|
|
|
use std::sync::{Arc, Mutex};
|
2019-04-11 05:55:02 +00:00
|
|
|
|
2020-04-17 12:11:25 +00:00
|
|
|
use anyhow::{bail, format_err, Error};
|
2020-01-16 07:51:52 +00:00
|
|
|
use futures::*;
|
|
|
|
use lazy_static::lazy_static;
|
2019-04-10 10:42:24 +00:00
|
|
|
use serde_json::{json, Value};
|
2020-08-13 08:29:13 +00:00
|
|
|
use serde::{Serialize, Deserialize};
|
2020-01-16 07:51:52 +00:00
|
|
|
use tokio::sync::oneshot;
|
2019-04-04 06:58:39 +00:00
|
|
|
|
2020-01-16 07:58:54 +00:00
|
|
|
use proxmox::sys::linux::procfs;
|
2020-01-21 11:28:01 +00:00
|
|
|
use proxmox::try_block;
|
2021-07-20 11:51:54 +00:00
|
|
|
use proxmox::tools::fs::{create_path, replace_file, CreateOptions};
|
2019-08-03 11:05:38 +00:00
|
|
|
|
2021-07-07 11:47:17 +00:00
|
|
|
use super::{UPID, UPIDExt};
|
2019-04-08 16:43:26 +00:00
|
|
|
|
2021-07-06 09:56:35 +00:00
|
|
|
use pbs_buildcfg;
|
|
|
|
|
2020-11-02 18:13:36 +00:00
|
|
|
use crate::server;
|
2020-09-28 13:32:08 +00:00
|
|
|
use crate::tools::logrotate::{LogRotate, LogRotateFiles};
|
2020-10-15 15:49:18 +00:00
|
|
|
use crate::tools::{FileLogger, FileLogOptions};
|
2020-10-30 14:02:10 +00:00
|
|
|
use crate::api2::types::{Authid, TaskStateType};
|
2021-07-20 11:51:54 +00:00
|
|
|
use crate::backup::{open_backup_lockfile, BackupLockGuard};
|
2019-04-04 06:58:39 +00:00
|
|
|
|
2020-11-02 07:50:56 +00:00
|
|
|
macro_rules! taskdir {
|
2021-07-06 09:56:35 +00:00
|
|
|
($subdir:expr) => (concat!(pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!(), "/tasks", $subdir))
|
2020-11-02 07:50:56 +00:00
|
|
|
}
|
|
|
|
pub const PROXMOX_BACKUP_TASK_DIR: &str = taskdir!("/");
|
|
|
|
pub const PROXMOX_BACKUP_TASK_LOCK_FN: &str = taskdir!("/.active.lock");
|
|
|
|
pub const PROXMOX_BACKUP_ACTIVE_TASK_FN: &str = taskdir!("/active");
|
|
|
|
pub const PROXMOX_BACKUP_INDEX_TASK_FN: &str = taskdir!("/index");
|
|
|
|
pub const PROXMOX_BACKUP_ARCHIVE_TASK_FN: &str = taskdir!("/archive");
|
2020-09-28 13:32:06 +00:00
|
|
|
|
2019-04-04 06:58:39 +00:00
|
|
|
lazy_static! {
|
|
|
|
static ref WORKER_TASK_LIST: Mutex<HashMap<usize, Arc<WorkerTask>>> = Mutex::new(HashMap::new());
|
2020-11-02 18:13:36 +00:00
|
|
|
}
|
2019-04-09 10:15:06 +00:00
|
|
|
|
2020-11-02 18:13:36 +00:00
|
|
|
/// checks if the task UPID refers to a worker from this process
|
|
|
|
fn is_local_worker(upid: &UPID) -> bool {
|
|
|
|
upid.pid == server::pid() && upid.pstart == server::pstart()
|
2019-04-04 06:58:39 +00:00
|
|
|
}
|
|
|
|
|
2019-04-08 16:43:26 +00:00
|
|
|
/// Test if the task is still running
|
2020-05-07 06:30:38 +00:00
|
|
|
pub async fn worker_is_active(upid: &UPID) -> Result<bool, Error> {
|
2020-11-02 18:13:36 +00:00
|
|
|
if is_local_worker(upid) {
|
2020-05-07 06:30:38 +00:00
|
|
|
return Ok(WORKER_TASK_LIST.lock().unwrap().contains_key(&upid.task_id));
|
|
|
|
}
|
|
|
|
|
2021-01-19 09:27:59 +00:00
|
|
|
if procfs::check_process_running_pstart(upid.pid, upid.pstart).is_none() {
|
2020-05-07 06:30:38 +00:00
|
|
|
return Ok(false);
|
|
|
|
}
|
|
|
|
|
2020-11-02 18:13:36 +00:00
|
|
|
let sock = server::ctrl_sock_from_pid(upid.pid);
|
2020-05-07 06:30:38 +00:00
|
|
|
let cmd = json!({
|
2020-11-02 18:13:36 +00:00
|
|
|
"command": "worker-task-status",
|
2020-11-04 15:44:07 +00:00
|
|
|
"args": {
|
|
|
|
"upid": upid.to_string(),
|
|
|
|
},
|
2020-05-07 06:30:38 +00:00
|
|
|
});
|
2021-05-11 13:53:59 +00:00
|
|
|
let status = super::send_command(sock, &cmd).await?;
|
2019-04-07 11:51:46 +00:00
|
|
|
|
2020-05-07 06:30:38 +00:00
|
|
|
if let Some(active) = status.as_bool() {
|
|
|
|
Ok(active)
|
|
|
|
} else {
|
|
|
|
bail!("got unexpected result {:?} (expected bool)", status);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Test if the task is still running (fast but inaccurate implementation)
|
|
|
|
///
|
2020-11-02 18:13:36 +00:00
|
|
|
/// If the task is spawned from a different process, we simply return if
|
2020-05-07 06:30:38 +00:00
|
|
|
/// that process is still running. This information is good enough to detect
|
|
|
|
/// stale tasks...
|
2020-05-29 06:05:10 +00:00
|
|
|
pub fn worker_is_active_local(upid: &UPID) -> bool {
|
2020-11-02 18:13:36 +00:00
|
|
|
if is_local_worker(upid) {
|
2019-10-26 09:36:01 +00:00
|
|
|
WORKER_TASK_LIST.lock().unwrap().contains_key(&upid.task_id)
|
2019-04-08 16:43:26 +00:00
|
|
|
} else {
|
2019-10-26 09:36:01 +00:00
|
|
|
procfs::check_process_running_pstart(upid.pid, upid.pstart).is_some()
|
2019-04-04 06:58:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-02 18:13:36 +00:00
|
|
|
pub fn register_task_control_commands(
|
|
|
|
commando_sock: &mut super::CommandoSocket,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
fn get_upid(args: Option<&Value>) -> Result<UPID, Error> {
|
|
|
|
let args = if let Some(args) = args { args } else { bail!("missing args") };
|
|
|
|
let upid = match args.get("upid") {
|
|
|
|
Some(Value::String(upid)) => upid.parse::<UPID>()?,
|
|
|
|
None => bail!("no upid in args"),
|
|
|
|
_ => bail!("unable to parse upid"),
|
|
|
|
};
|
|
|
|
if !is_local_worker(&upid) {
|
2019-04-09 10:15:06 +00:00
|
|
|
bail!("upid does not belong to this process");
|
|
|
|
}
|
2020-11-02 18:13:36 +00:00
|
|
|
Ok(upid)
|
|
|
|
}
|
2019-04-09 10:15:06 +00:00
|
|
|
|
2020-11-02 18:13:36 +00:00
|
|
|
commando_sock.register_command("worker-task-abort".into(), move |args| {
|
|
|
|
let upid = get_upid(args)?;
|
|
|
|
|
|
|
|
if let Some(ref worker) = WORKER_TASK_LIST.lock().unwrap().get(&upid.task_id) {
|
|
|
|
worker.request_abort();
|
2019-04-09 10:15:06 +00:00
|
|
|
}
|
2020-11-02 18:13:36 +00:00
|
|
|
Ok(Value::Null)
|
2019-04-09 10:15:06 +00:00
|
|
|
})?;
|
2020-11-02 18:13:36 +00:00
|
|
|
commando_sock.register_command("worker-task-status".into(), move |args| {
|
|
|
|
let upid = get_upid(args)?;
|
2019-04-09 10:15:06 +00:00
|
|
|
|
2020-11-02 18:13:36 +00:00
|
|
|
let active = WORKER_TASK_LIST.lock().unwrap().contains_key(&upid.task_id);
|
|
|
|
|
|
|
|
Ok(active.into())
|
|
|
|
})?;
|
2019-04-09 10:15:06 +00:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-04-10 10:42:24 +00:00
|
|
|
pub fn abort_worker_async(upid: UPID) {
|
2019-08-27 07:18:39 +00:00
|
|
|
tokio::spawn(async move {
|
|
|
|
if let Err(err) = abort_worker(upid).await {
|
2019-04-10 10:42:24 +00:00
|
|
|
eprintln!("abort worker failed - {}", err);
|
|
|
|
}
|
2019-08-27 07:18:39 +00:00
|
|
|
});
|
2019-04-10 10:42:24 +00:00
|
|
|
}
|
|
|
|
|
2020-05-07 06:30:38 +00:00
|
|
|
pub async fn abort_worker(upid: UPID) -> Result<(), Error> {
|
2019-04-10 10:42:24 +00:00
|
|
|
|
2020-11-02 18:13:36 +00:00
|
|
|
let sock = server::ctrl_sock_from_pid(upid.pid);
|
2019-04-10 10:42:24 +00:00
|
|
|
let cmd = json!({
|
2020-11-02 18:13:36 +00:00
|
|
|
"command": "worker-task-abort",
|
2020-11-04 15:44:07 +00:00
|
|
|
"args": {
|
|
|
|
"upid": upid.to_string(),
|
|
|
|
},
|
2019-04-10 10:42:24 +00:00
|
|
|
});
|
2021-05-11 13:53:59 +00:00
|
|
|
super::send_command(sock, &cmd).map_ok(|_| ()).await
|
2019-04-10 10:42:24 +00:00
|
|
|
}
|
|
|
|
|
2020-08-13 12:30:17 +00:00
|
|
|
fn parse_worker_status_line(line: &str) -> Result<(String, UPID, Option<TaskState>), Error> {
|
2019-04-05 10:37:35 +00:00
|
|
|
|
|
|
|
let data = line.splitn(3, ' ').collect::<Vec<&str>>();
|
|
|
|
|
|
|
|
let len = data.len();
|
|
|
|
|
|
|
|
match len {
|
|
|
|
1 => Ok((data[0].to_owned(), data[0].parse::<UPID>()?, None)),
|
|
|
|
3 => {
|
|
|
|
let endtime = i64::from_str_radix(data[1], 16)?;
|
2020-08-13 12:30:17 +00:00
|
|
|
let state = TaskState::from_endtime_and_message(endtime, data[2])?;
|
|
|
|
Ok((data[0].to_owned(), data[0].parse::<UPID>()?, Some(state)))
|
2019-04-05 10:37:35 +00:00
|
|
|
}
|
|
|
|
_ => bail!("wrong number of components"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-06 15:53:12 +00:00
|
|
|
/// Create task log directory with correct permissions
|
2019-04-09 10:15:06 +00:00
|
|
|
pub fn create_task_log_dirs() -> Result<(), Error> {
|
2019-04-06 15:53:12 +00:00
|
|
|
|
|
|
|
try_block!({
|
2019-12-19 09:20:13 +00:00
|
|
|
let backup_user = crate::backup::backup_user()?;
|
2019-08-21 10:21:56 +00:00
|
|
|
let opts = CreateOptions::new()
|
2019-12-19 09:20:13 +00:00
|
|
|
.owner(backup_user.uid)
|
|
|
|
.group(backup_user.gid);
|
2019-04-06 15:53:12 +00:00
|
|
|
|
2021-07-06 09:56:35 +00:00
|
|
|
create_path(pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR, None, Some(opts.clone()))?;
|
2019-08-21 10:21:56 +00:00
|
|
|
create_path(PROXMOX_BACKUP_TASK_DIR, None, Some(opts.clone()))?;
|
2021-07-06 09:56:35 +00:00
|
|
|
create_path(pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR, None, Some(opts))?;
|
2019-04-06 15:53:12 +00:00
|
|
|
Ok(())
|
|
|
|
}).map_err(|err: Error| format_err!("unable to create task log dir - {}", err))?;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-08-13 08:29:14 +00:00
|
|
|
/// Read endtime (time of last log line) and exitstatus from task log file
|
|
|
|
/// If there is not a single line with at valid datetime, we assume the
|
|
|
|
/// starttime to be the endtime
|
2020-08-13 12:30:17 +00:00
|
|
|
pub fn upid_read_status(upid: &UPID) -> Result<TaskState, Error> {
|
2020-09-04 08:41:13 +00:00
|
|
|
|
|
|
|
let mut status = TaskState::Unknown { endtime: upid.starttime };
|
2019-04-05 10:37:35 +00:00
|
|
|
|
2019-04-07 11:51:46 +00:00
|
|
|
let path = upid.log_path();
|
2019-04-05 10:37:35 +00:00
|
|
|
|
2019-04-09 13:11:48 +00:00
|
|
|
let mut file = File::open(path)?;
|
|
|
|
|
|
|
|
/// speedup - only read tail
|
|
|
|
use std::io::Seek;
|
|
|
|
use std::io::SeekFrom;
|
|
|
|
let _ = file.seek(SeekFrom::End(-8192)); // ignore errors
|
|
|
|
|
2020-09-04 08:41:13 +00:00
|
|
|
let mut data = Vec::with_capacity(8192);
|
|
|
|
file.read_to_end(&mut data)?;
|
2019-04-05 10:37:35 +00:00
|
|
|
|
2021-01-25 11:32:29 +00:00
|
|
|
// strip newlines at the end of the task logs
|
|
|
|
while data.last() == Some(&b'\n') {
|
2020-09-07 12:50:01 +00:00
|
|
|
data.pop();
|
|
|
|
}
|
|
|
|
|
2021-01-25 11:32:29 +00:00
|
|
|
let last_line = match data.iter().rposition(|c| *c == b'\n') {
|
|
|
|
Some(start) if data.len() > (start+1) => &data[start+1..],
|
|
|
|
Some(_) => &data, // should not happen, since we removed all trailing newlines
|
|
|
|
None => &data,
|
2020-09-04 08:41:13 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
let last_line = std::str::from_utf8(last_line)
|
|
|
|
.map_err(|err| format_err!("upid_read_status: utf8 parse failed: {}", err))?;
|
|
|
|
|
|
|
|
let mut iter = last_line.splitn(2, ": ");
|
|
|
|
if let Some(time_str) = iter.next() {
|
2020-09-12 13:10:47 +00:00
|
|
|
if let Ok(endtime) = proxmox::tools::time::parse_rfc3339(time_str) {
|
2021-03-11 08:24:53 +00:00
|
|
|
// set the endtime even if we cannot parse the state
|
|
|
|
status = TaskState::Unknown { endtime };
|
2020-09-04 08:41:13 +00:00
|
|
|
if let Some(rest) = iter.next().and_then(|rest| rest.strip_prefix("TASK ")) {
|
2020-08-13 12:30:17 +00:00
|
|
|
if let Ok(state) = TaskState::from_endtime_and_message(endtime, rest) {
|
2020-08-13 08:29:13 +00:00
|
|
|
status = state;
|
2019-04-05 10:37:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-13 12:30:17 +00:00
|
|
|
Ok(status)
|
2019-04-05 10:37:35 +00:00
|
|
|
}
|
|
|
|
|
2020-08-13 08:29:13 +00:00
|
|
|
/// Task State
|
2020-08-13 12:30:17 +00:00
|
|
|
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
|
2020-08-13 08:29:13 +00:00
|
|
|
pub enum TaskState {
|
|
|
|
/// The Task ended with an undefined state
|
2020-08-13 12:30:17 +00:00
|
|
|
Unknown { endtime: i64 },
|
2020-08-13 08:29:13 +00:00
|
|
|
/// The Task ended and there were no errors or warnings
|
2020-08-13 12:30:17 +00:00
|
|
|
OK { endtime: i64 },
|
2020-08-13 08:29:13 +00:00
|
|
|
/// The Task had 'count' amount of warnings and no errors
|
2020-08-13 12:30:17 +00:00
|
|
|
Warning { count: u64, endtime: i64 },
|
2020-08-13 08:29:13 +00:00
|
|
|
/// The Task ended with the error described in 'message'
|
2020-08-13 12:30:17 +00:00
|
|
|
Error { message: String, endtime: i64 },
|
2020-08-13 08:29:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl TaskState {
|
2020-08-13 12:30:17 +00:00
|
|
|
pub fn endtime(&self) -> i64 {
|
|
|
|
match *self {
|
|
|
|
TaskState::Unknown { endtime } => endtime,
|
|
|
|
TaskState::OK { endtime } => endtime,
|
|
|
|
TaskState::Warning { endtime, .. } => endtime,
|
|
|
|
TaskState::Error { endtime, .. } => endtime,
|
2020-08-13 08:29:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-30 14:02:10 +00:00
|
|
|
pub fn tasktype(&self) -> TaskStateType {
|
|
|
|
match self {
|
|
|
|
TaskState::OK { .. } => TaskStateType::OK,
|
|
|
|
TaskState::Unknown { .. } => TaskStateType::Unknown,
|
|
|
|
TaskState::Error { .. } => TaskStateType::Error,
|
|
|
|
TaskState::Warning { .. } => TaskStateType::Warning,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-13 12:30:17 +00:00
|
|
|
fn result_text(&self) -> String {
|
2020-08-13 08:29:13 +00:00
|
|
|
match self {
|
2020-08-13 12:30:17 +00:00
|
|
|
TaskState::Error { message, .. } => format!("TASK ERROR: {}", message),
|
|
|
|
other => format!("TASK {}", other),
|
2020-08-13 08:29:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-13 12:30:17 +00:00
|
|
|
fn from_endtime_and_message(endtime: i64, s: &str) -> Result<Self, Error> {
|
2020-08-13 08:29:13 +00:00
|
|
|
if s == "unknown" {
|
2020-08-13 12:30:17 +00:00
|
|
|
Ok(TaskState::Unknown { endtime })
|
2020-08-13 08:29:13 +00:00
|
|
|
} else if s == "OK" {
|
2020-08-13 12:30:17 +00:00
|
|
|
Ok(TaskState::OK { endtime })
|
2021-01-18 12:50:28 +00:00
|
|
|
} else if let Some(warnings) = s.strip_prefix("WARNINGS: ") {
|
|
|
|
let count: u64 = warnings.parse()?;
|
2020-08-13 12:30:17 +00:00
|
|
|
Ok(TaskState::Warning{ count, endtime })
|
2021-01-19 09:27:59 +00:00
|
|
|
} else if !s.is_empty() {
|
2021-01-18 12:50:28 +00:00
|
|
|
let message = if let Some(err) = s.strip_prefix("ERROR: ") { err } else { s }.to_string();
|
2020-08-13 12:30:17 +00:00
|
|
|
Ok(TaskState::Error{ message, endtime })
|
2020-08-13 08:29:13 +00:00
|
|
|
} else {
|
|
|
|
bail!("unable to parse Task Status '{}'", s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-13 12:30:17 +00:00
|
|
|
impl std::cmp::PartialOrd for TaskState {
|
|
|
|
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
|
|
|
Some(self.endtime().cmp(&other.endtime()))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl std::cmp::Ord for TaskState {
|
|
|
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
|
|
|
self.endtime().cmp(&other.endtime())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl std::fmt::Display for TaskState {
|
|
|
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
|
|
match self {
|
|
|
|
TaskState::Unknown { .. } => write!(f, "unknown"),
|
|
|
|
TaskState::OK { .. }=> write!(f, "OK"),
|
|
|
|
TaskState::Warning { count, .. } => write!(f, "WARNINGS: {}", count),
|
|
|
|
TaskState::Error { message, .. } => write!(f, "{}", message),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-06 08:17:11 +00:00
|
|
|
/// Task details including parsed UPID
|
|
|
|
///
|
|
|
|
/// If there is no `state`, the task is still running.
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct TaskListInfo {
|
|
|
|
/// The parsed UPID
|
|
|
|
pub upid: UPID,
|
|
|
|
/// UPID string representation
|
|
|
|
pub upid_str: String,
|
|
|
|
/// Task `(endtime, status)` if already finished
|
2020-08-13 12:30:17 +00:00
|
|
|
pub state: Option<TaskState>, // endtime, status
|
2019-04-06 08:17:11 +00:00
|
|
|
}
|
|
|
|
|
2021-08-26 11:17:55 +00:00
|
|
|
impl Into<pbs_api_types::TaskListItem> for TaskListInfo {
|
|
|
|
fn into(self) -> pbs_api_types::TaskListItem {
|
|
|
|
let (endtime, status) = self
|
|
|
|
.state
|
|
|
|
.map_or_else(|| (None, None), |a| (Some(a.endtime()), Some(a.to_string())));
|
|
|
|
|
|
|
|
pbs_api_types::TaskListItem {
|
|
|
|
upid: self.upid_str,
|
|
|
|
node: "localhost".to_string(),
|
|
|
|
pid: self.upid.pid as i64,
|
|
|
|
pstart: self.upid.pstart,
|
|
|
|
starttime: self.upid.starttime,
|
|
|
|
worker_type: self.upid.worker_type,
|
|
|
|
worker_id: self.upid.worker_id,
|
|
|
|
user: self.upid.auth_id,
|
|
|
|
endtime,
|
|
|
|
status,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-20 11:51:54 +00:00
|
|
|
fn lock_task_list_files(exclusive: bool) -> Result<BackupLockGuard, Error> {
|
|
|
|
open_backup_lockfile(PROXMOX_BACKUP_TASK_LOCK_FN, None, exclusive)
|
2020-09-28 13:32:05 +00:00
|
|
|
}
|
|
|
|
|
2020-09-28 13:32:12 +00:00
|
|
|
/// checks if the Task Archive is bigger that 'size_threshold' bytes, and
|
|
|
|
/// rotates it if it is
|
|
|
|
pub fn rotate_task_log_archive(size_threshold: u64, compress: bool, max_files: Option<usize>) -> Result<bool, Error> {
|
|
|
|
let _lock = lock_task_list_files(true)?;
|
2020-10-02 13:20:10 +00:00
|
|
|
|
2020-10-20 08:32:15 +00:00
|
|
|
let mut logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, compress)
|
2021-01-19 13:04:46 +00:00
|
|
|
.ok_or_else(|| format_err!("could not get archive file names"))?;
|
2020-10-20 08:32:15 +00:00
|
|
|
|
|
|
|
logrotate.rotate(size_threshold, None, max_files)
|
2020-09-28 13:32:12 +00:00
|
|
|
}
|
|
|
|
|
2019-04-06 08:17:11 +00:00
|
|
|
// atomically read/update the task list, update status of finished tasks
|
|
|
|
// new_upid is added to the list when specified.
|
2020-09-28 13:32:11 +00:00
|
|
|
fn update_active_workers(new_upid: Option<&UPID>) -> Result<(), Error> {
|
2019-04-05 10:37:35 +00:00
|
|
|
|
2019-12-19 09:20:13 +00:00
|
|
|
let backup_user = crate::backup::backup_user()?;
|
2019-04-06 15:53:12 +00:00
|
|
|
|
2020-09-28 13:32:05 +00:00
|
|
|
let lock = lock_task_list_files(true)?;
|
2019-04-05 10:37:35 +00:00
|
|
|
|
2020-10-29 09:50:13 +00:00
|
|
|
// TODO remove with 1.x
|
2020-09-28 13:32:06 +00:00
|
|
|
let mut finish_list: Vec<TaskListInfo> = read_task_file_from_path(PROXMOX_BACKUP_INDEX_TASK_FN)?;
|
2020-10-29 09:50:13 +00:00
|
|
|
let had_index_file = !finish_list.is_empty();
|
|
|
|
|
2020-11-30 13:03:43 +00:00
|
|
|
// We use filter_map because one negative case wants to *move* the data into `finish_list`,
|
|
|
|
// clippy doesn't quite catch this!
|
|
|
|
#[allow(clippy::unnecessary_filter_map)]
|
2020-09-28 13:32:06 +00:00
|
|
|
let mut active_list: Vec<TaskListInfo> = read_task_file_from_path(PROXMOX_BACKUP_ACTIVE_TASK_FN)?
|
|
|
|
.into_iter()
|
|
|
|
.filter_map(|info| {
|
|
|
|
if info.state.is_some() {
|
|
|
|
// this can happen when the active file still includes finished tasks
|
|
|
|
finish_list.push(info);
|
|
|
|
return None;
|
2019-04-05 10:37:35 +00:00
|
|
|
}
|
|
|
|
|
2020-09-28 13:32:06 +00:00
|
|
|
if !worker_is_active_local(&info.upid) {
|
2020-12-29 09:53:16 +00:00
|
|
|
// println!("Detected stopped task '{}'", &info.upid_str);
|
2020-09-28 13:32:06 +00:00
|
|
|
let now = proxmox::tools::time::epoch_i64();
|
2021-01-15 14:21:34 +00:00
|
|
|
let status = upid_read_status(&info.upid).unwrap_or(TaskState::Unknown { endtime: now });
|
2020-09-28 13:32:06 +00:00
|
|
|
finish_list.push(TaskListInfo {
|
|
|
|
upid: info.upid,
|
|
|
|
upid_str: info.upid_str,
|
|
|
|
state: Some(status)
|
|
|
|
});
|
|
|
|
return None;
|
2019-04-05 10:37:35 +00:00
|
|
|
}
|
2020-09-28 13:32:06 +00:00
|
|
|
|
|
|
|
Some(info)
|
|
|
|
}).collect();
|
2019-04-05 10:37:35 +00:00
|
|
|
|
|
|
|
if let Some(upid) = new_upid {
|
|
|
|
active_list.push(TaskListInfo { upid: upid.clone(), upid_str: upid.to_string(), state: None });
|
|
|
|
}
|
|
|
|
|
2020-09-28 13:32:06 +00:00
|
|
|
let active_raw = render_task_list(&active_list);
|
2019-04-05 10:37:35 +00:00
|
|
|
|
2020-09-28 13:32:06 +00:00
|
|
|
replace_file(
|
|
|
|
PROXMOX_BACKUP_ACTIVE_TASK_FN,
|
|
|
|
active_raw.as_bytes(),
|
|
|
|
CreateOptions::new()
|
|
|
|
.owner(backup_user.uid)
|
|
|
|
.group(backup_user.gid),
|
|
|
|
)?;
|
2019-04-06 08:17:11 +00:00
|
|
|
|
2020-09-28 13:32:06 +00:00
|
|
|
finish_list.sort_unstable_by(|a, b| {
|
2019-04-05 10:37:35 +00:00
|
|
|
match (&a.state, &b.state) {
|
2020-08-13 12:30:17 +00:00
|
|
|
(Some(s1), Some(s2)) => s1.cmp(&s2),
|
2019-04-05 10:37:35 +00:00
|
|
|
(Some(_), None) => std::cmp::Ordering::Less,
|
|
|
|
(None, Some(_)) => std::cmp::Ordering::Greater,
|
|
|
|
_ => a.upid.starttime.cmp(&b.upid.starttime),
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2020-10-29 09:50:13 +00:00
|
|
|
if !finish_list.is_empty() {
|
2020-09-28 13:32:07 +00:00
|
|
|
match std::fs::OpenOptions::new().append(true).create(true).open(PROXMOX_BACKUP_ARCHIVE_TASK_FN) {
|
|
|
|
Ok(mut writer) => {
|
2020-10-29 09:50:13 +00:00
|
|
|
for info in &finish_list {
|
2020-09-28 13:32:07 +00:00
|
|
|
writer.write_all(render_task_line(&info).as_bytes())?;
|
|
|
|
}
|
|
|
|
},
|
|
|
|
Err(err) => bail!("could not write task archive - {}", err),
|
|
|
|
}
|
|
|
|
|
|
|
|
nix::unistd::chown(PROXMOX_BACKUP_ARCHIVE_TASK_FN, Some(backup_user.uid), Some(backup_user.gid))?;
|
|
|
|
}
|
|
|
|
|
2020-10-29 09:50:13 +00:00
|
|
|
// TODO Remove with 1.x
|
|
|
|
// for compatibility, if we had an INDEX file, we do not need it anymore
|
|
|
|
if had_index_file {
|
|
|
|
let _ = nix::unistd::unlink(PROXMOX_BACKUP_INDEX_TASK_FN);
|
|
|
|
}
|
|
|
|
|
2019-04-05 10:37:35 +00:00
|
|
|
drop(lock);
|
|
|
|
|
2020-09-28 13:32:11 +00:00
|
|
|
Ok(())
|
2019-04-06 08:17:11 +00:00
|
|
|
}
|
2019-04-05 10:37:35 +00:00
|
|
|
|
2020-09-25 14:13:19 +00:00
|
|
|
fn render_task_line(info: &TaskListInfo) -> String {
|
|
|
|
let mut raw = String::new();
|
|
|
|
if let Some(status) = &info.state {
|
|
|
|
raw.push_str(&format!("{} {:08X} {}\n", info.upid_str, status.endtime(), status));
|
|
|
|
} else {
|
|
|
|
raw.push_str(&info.upid_str);
|
|
|
|
raw.push('\n');
|
|
|
|
}
|
|
|
|
|
|
|
|
raw
|
|
|
|
}
|
|
|
|
|
|
|
|
fn render_task_list(list: &[TaskListInfo]) -> String {
|
|
|
|
let mut raw = String::new();
|
|
|
|
for info in list {
|
|
|
|
raw.push_str(&render_task_line(&info));
|
|
|
|
}
|
|
|
|
raw
|
|
|
|
}
|
|
|
|
|
2020-09-28 13:32:06 +00:00
|
|
|
// note this is not locked, caller has to make sure it is
|
|
|
|
// this will skip (and log) lines that are not valid status lines
|
|
|
|
fn read_task_file<R: Read>(reader: R) -> Result<Vec<TaskListInfo>, Error>
|
|
|
|
{
|
|
|
|
let reader = BufReader::new(reader);
|
|
|
|
let mut list = Vec::new();
|
|
|
|
for line in reader.lines() {
|
|
|
|
let line = line?;
|
|
|
|
match parse_worker_status_line(&line) {
|
|
|
|
Ok((upid_str, upid, state)) => list.push(TaskListInfo {
|
|
|
|
upid_str,
|
|
|
|
upid,
|
|
|
|
state
|
|
|
|
}),
|
|
|
|
Err(err) => {
|
|
|
|
eprintln!("unable to parse worker status '{}' - {}", line, err);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(list)
|
|
|
|
}
|
|
|
|
|
|
|
|
// note this is not locked, caller has to make sure it is
|
|
|
|
fn read_task_file_from_path<P>(path: P) -> Result<Vec<TaskListInfo>, Error>
|
|
|
|
where
|
|
|
|
P: AsRef<std::path::Path> + std::fmt::Debug,
|
|
|
|
{
|
|
|
|
let file = match File::open(&path) {
|
|
|
|
Ok(f) => f,
|
|
|
|
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(Vec::new()),
|
|
|
|
Err(err) => bail!("unable to open task list {:?} - {}", path, err),
|
|
|
|
};
|
|
|
|
|
|
|
|
read_task_file(file)
|
|
|
|
}
|
|
|
|
|
2020-09-28 13:32:08 +00:00
|
|
|
pub struct TaskListInfoIterator {
|
|
|
|
list: VecDeque<TaskListInfo>,
|
2020-10-29 09:50:13 +00:00
|
|
|
end: bool,
|
2020-09-28 13:32:08 +00:00
|
|
|
archive: Option<LogRotateFiles>,
|
2021-07-20 11:51:54 +00:00
|
|
|
lock: Option<BackupLockGuard>,
|
2020-09-28 13:32:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl TaskListInfoIterator {
|
|
|
|
pub fn new(active_only: bool) -> Result<Self, Error> {
|
|
|
|
let (read_lock, active_list) = {
|
|
|
|
let lock = lock_task_list_files(false)?;
|
|
|
|
let active_list = read_task_file_from_path(PROXMOX_BACKUP_ACTIVE_TASK_FN)?;
|
|
|
|
|
|
|
|
let needs_update = active_list
|
|
|
|
.iter()
|
2020-09-30 08:45:06 +00:00
|
|
|
.any(|info| info.state.is_some() || !worker_is_active_local(&info.upid));
|
2020-09-28 13:32:08 +00:00
|
|
|
|
2020-10-29 09:50:13 +00:00
|
|
|
// TODO remove with 1.x
|
|
|
|
let index_exists = std::path::Path::new(PROXMOX_BACKUP_INDEX_TASK_FN).is_file();
|
|
|
|
|
|
|
|
if needs_update || index_exists {
|
2020-09-28 13:32:08 +00:00
|
|
|
drop(lock);
|
|
|
|
update_active_workers(None)?;
|
|
|
|
let lock = lock_task_list_files(false)?;
|
|
|
|
let active_list = read_task_file_from_path(PROXMOX_BACKUP_ACTIVE_TASK_FN)?;
|
|
|
|
(lock, active_list)
|
|
|
|
} else {
|
|
|
|
(lock, active_list)
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let archive = if active_only {
|
|
|
|
None
|
|
|
|
} else {
|
2020-10-19 13:11:51 +00:00
|
|
|
let logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, true)
|
|
|
|
.ok_or_else(|| format_err!("could not get archive file names"))?;
|
2020-09-28 13:32:08 +00:00
|
|
|
Some(logrotate.files())
|
|
|
|
};
|
|
|
|
|
|
|
|
let lock = if active_only { None } else { Some(read_lock) };
|
|
|
|
|
|
|
|
Ok(Self {
|
|
|
|
list: active_list.into(),
|
2020-10-29 09:50:13 +00:00
|
|
|
end: active_only,
|
2020-09-28 13:32:08 +00:00
|
|
|
archive,
|
|
|
|
lock,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Iterator for TaskListInfoIterator {
|
|
|
|
type Item = Result<TaskListInfo, Error>;
|
|
|
|
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
loop {
|
|
|
|
if let Some(element) = self.list.pop_back() {
|
|
|
|
return Some(Ok(element));
|
2020-10-29 09:50:13 +00:00
|
|
|
} else if self.end {
|
|
|
|
return None;
|
2020-09-28 13:32:08 +00:00
|
|
|
} else {
|
2020-10-29 09:50:13 +00:00
|
|
|
if let Some(mut archive) = self.archive.take() {
|
|
|
|
if let Some(file) = archive.next() {
|
|
|
|
let list = match read_task_file(file) {
|
|
|
|
Ok(list) => list,
|
2020-09-28 13:32:08 +00:00
|
|
|
Err(err) => return Some(Err(err)),
|
|
|
|
};
|
2020-10-29 09:50:13 +00:00
|
|
|
self.list.append(&mut list.into());
|
|
|
|
self.archive = Some(archive);
|
|
|
|
continue;
|
2020-09-28 13:32:08 +00:00
|
|
|
}
|
|
|
|
}
|
2020-10-29 09:50:13 +00:00
|
|
|
|
|
|
|
self.end = true;
|
|
|
|
self.lock.take();
|
2020-09-28 13:32:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-06 07:17:25 +00:00
|
|
|
/// Launch long running worker tasks.
|
|
|
|
///
|
|
|
|
/// A worker task can either be a whole thread, or a simply tokio
|
|
|
|
/// task/future. Each task can `log()` messages, which are stored
|
|
|
|
/// persistently to files. Task should poll the `abort_requested`
|
|
|
|
/// flag, and stop execution when requested.
|
2019-04-04 06:58:39 +00:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct WorkerTask {
|
|
|
|
upid: UPID,
|
|
|
|
data: Mutex<WorkerTaskData>,
|
|
|
|
abort_requested: AtomicBool,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl std::fmt::Display for WorkerTask {
|
|
|
|
|
|
|
|
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
|
|
|
self.upid.fmt(f)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
struct WorkerTaskData {
|
|
|
|
logger: FileLogger,
|
|
|
|
progress: f64, // 0..1
|
2020-06-24 07:04:35 +00:00
|
|
|
warn_count: u64,
|
2019-04-15 07:38:05 +00:00
|
|
|
pub abort_listeners: Vec<oneshot::Sender<()>>,
|
2019-04-04 06:58:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl WorkerTask {
|
|
|
|
|
2020-10-23 11:33:21 +00:00
|
|
|
pub fn new(worker_type: &str, worker_id: Option<String>, auth_id: Authid, to_stdout: bool) -> Result<Arc<Self>, Error> {
|
|
|
|
let upid = UPID::new(worker_type, worker_id, auth_id)?;
|
2019-04-08 16:43:26 +00:00
|
|
|
let task_id = upid.task_id;
|
2019-04-04 06:58:39 +00:00
|
|
|
|
2019-04-08 16:43:26 +00:00
|
|
|
let mut path = std::path::PathBuf::from(PROXMOX_BACKUP_TASK_DIR);
|
2019-04-06 15:53:12 +00:00
|
|
|
|
2020-10-29 11:35:33 +00:00
|
|
|
path.push(format!("{:02X}", upid.pstart & 255));
|
2019-04-04 06:58:39 +00:00
|
|
|
|
2019-12-19 09:20:13 +00:00
|
|
|
let backup_user = crate::backup::backup_user()?;
|
2019-04-06 15:53:12 +00:00
|
|
|
|
2019-12-19 09:20:13 +00:00
|
|
|
create_path(&path, None, Some(CreateOptions::new().owner(backup_user.uid).group(backup_user.gid)))?;
|
2019-04-04 06:58:39 +00:00
|
|
|
|
|
|
|
path.push(upid.to_string());
|
|
|
|
|
2020-10-15 15:49:18 +00:00
|
|
|
let logger_options = FileLogOptions {
|
2020-11-30 12:56:21 +00:00
|
|
|
to_stdout,
|
2020-10-15 15:49:18 +00:00
|
|
|
exclusive: true,
|
2020-10-19 11:22:37 +00:00
|
|
|
prefix_time: true,
|
2020-10-15 15:49:18 +00:00
|
|
|
read: true,
|
|
|
|
..Default::default()
|
|
|
|
};
|
|
|
|
let logger = FileLogger::new(&path, logger_options)?;
|
2019-12-19 09:20:13 +00:00
|
|
|
nix::unistd::chown(&path, Some(backup_user.uid), Some(backup_user.gid))?;
|
2019-04-04 06:58:39 +00:00
|
|
|
|
|
|
|
let worker = Arc::new(Self {
|
2020-05-27 14:42:22 +00:00
|
|
|
upid: upid.clone(),
|
2019-04-04 06:58:39 +00:00
|
|
|
abort_requested: AtomicBool::new(false),
|
|
|
|
data: Mutex::new(WorkerTaskData {
|
|
|
|
logger,
|
|
|
|
progress: 0.0,
|
2020-06-24 07:04:35 +00:00
|
|
|
warn_count: 0,
|
2019-04-15 07:38:05 +00:00
|
|
|
abort_listeners: vec![],
|
2019-04-04 06:58:39 +00:00
|
|
|
}),
|
|
|
|
});
|
|
|
|
|
2020-05-27 14:42:22 +00:00
|
|
|
// scope to drop the lock again after inserting
|
|
|
|
{
|
|
|
|
let mut hash = WORKER_TASK_LIST.lock().unwrap();
|
|
|
|
hash.insert(task_id, worker.clone());
|
|
|
|
super::set_worker_count(hash.len());
|
|
|
|
}
|
2019-04-08 10:21:29 +00:00
|
|
|
|
2020-05-27 14:42:22 +00:00
|
|
|
update_active_workers(Some(&upid))?;
|
2019-04-04 06:58:39 +00:00
|
|
|
|
|
|
|
Ok(worker)
|
|
|
|
}
|
|
|
|
|
2019-04-06 07:17:25 +00:00
|
|
|
/// Spawn a new tokio task/future.
|
2019-04-06 09:23:53 +00:00
|
|
|
pub fn spawn<F, T>(
|
|
|
|
worker_type: &str,
|
|
|
|
worker_id: Option<String>,
|
2020-10-23 11:33:21 +00:00
|
|
|
auth_id: Authid,
|
2019-04-06 09:23:53 +00:00
|
|
|
to_stdout: bool,
|
|
|
|
f: F,
|
|
|
|
) -> Result<String, Error>
|
2019-04-04 06:58:39 +00:00
|
|
|
where F: Send + 'static + FnOnce(Arc<WorkerTask>) -> T,
|
2019-08-27 07:18:39 +00:00
|
|
|
T: Send + 'static + Future<Output = Result<(), Error>>,
|
2019-04-04 06:58:39 +00:00
|
|
|
{
|
2020-10-23 11:33:21 +00:00
|
|
|
let worker = WorkerTask::new(worker_type, worker_id, auth_id, to_stdout)?;
|
2019-04-06 09:23:53 +00:00
|
|
|
let upid_str = worker.upid.to_string();
|
2019-08-27 07:18:39 +00:00
|
|
|
let f = f(worker.clone());
|
|
|
|
tokio::spawn(async move {
|
|
|
|
let result = f.await;
|
2019-06-25 06:12:25 +00:00
|
|
|
worker.log_result(&result);
|
2019-08-27 07:18:39 +00:00
|
|
|
});
|
2019-04-04 06:58:39 +00:00
|
|
|
|
2019-04-06 09:23:53 +00:00
|
|
|
Ok(upid_str)
|
2019-04-04 06:58:39 +00:00
|
|
|
}
|
|
|
|
|
2019-04-06 07:17:25 +00:00
|
|
|
/// Create a new worker thread.
|
2019-04-06 09:23:53 +00:00
|
|
|
pub fn new_thread<F>(
|
|
|
|
worker_type: &str,
|
|
|
|
worker_id: Option<String>,
|
2020-10-23 11:33:21 +00:00
|
|
|
auth_id: Authid,
|
2019-04-06 09:23:53 +00:00
|
|
|
to_stdout: bool,
|
|
|
|
f: F,
|
|
|
|
) -> Result<String, Error>
|
2019-04-11 05:55:02 +00:00
|
|
|
where F: Send + UnwindSafe + 'static + FnOnce(Arc<WorkerTask>) -> Result<(), Error>
|
2019-04-04 06:58:39 +00:00
|
|
|
{
|
2020-10-23 11:33:21 +00:00
|
|
|
let worker = WorkerTask::new(worker_type, worker_id, auth_id, to_stdout)?;
|
2019-04-06 09:23:53 +00:00
|
|
|
let upid_str = worker.upid.to_string();
|
2019-04-04 06:58:39 +00:00
|
|
|
|
2019-12-19 06:07:39 +00:00
|
|
|
let _child = std::thread::Builder::new().name(upid_str.clone()).spawn(move || {
|
2019-04-11 05:55:02 +00:00
|
|
|
let worker1 = worker.clone();
|
|
|
|
let result = match std::panic::catch_unwind(move || f(worker1)) {
|
|
|
|
Ok(r) => r,
|
|
|
|
Err(panic) => {
|
|
|
|
match panic.downcast::<&str>() {
|
|
|
|
Ok(panic_msg) => {
|
|
|
|
Err(format_err!("worker panicked: {}", panic_msg))
|
|
|
|
}
|
|
|
|
Err(_) => {
|
|
|
|
Err(format_err!("worker panicked: unknown type."))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-06-25 06:12:25 +00:00
|
|
|
worker.log_result(&result);
|
2019-04-04 06:58:39 +00:00
|
|
|
});
|
|
|
|
|
2019-04-06 09:23:53 +00:00
|
|
|
Ok(upid_str)
|
2019-04-04 06:58:39 +00:00
|
|
|
}
|
|
|
|
|
2020-08-13 08:29:13 +00:00
|
|
|
/// create state from self and a result
|
|
|
|
pub fn create_state(&self, result: &Result<(), Error>) -> TaskState {
|
2020-06-24 07:04:35 +00:00
|
|
|
let warn_count = self.data.lock().unwrap().warn_count;
|
2020-07-31 12:43:23 +00:00
|
|
|
|
2020-09-12 13:10:47 +00:00
|
|
|
let endtime = proxmox::tools::time::epoch_i64();
|
2020-08-13 12:30:17 +00:00
|
|
|
|
2019-04-05 10:37:35 +00:00
|
|
|
if let Err(err) = result {
|
2020-08-13 12:30:17 +00:00
|
|
|
TaskState::Error { message: err.to_string(), endtime }
|
2020-06-24 07:04:35 +00:00
|
|
|
} else if warn_count > 0 {
|
2020-08-13 12:30:17 +00:00
|
|
|
TaskState::Warning { count: warn_count, endtime }
|
2019-04-05 10:37:35 +00:00
|
|
|
} else {
|
2020-08-13 12:30:17 +00:00
|
|
|
TaskState::OK { endtime }
|
2019-04-05 10:37:35 +00:00
|
|
|
}
|
2020-07-31 12:43:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Log task result, remove task from running list
|
|
|
|
pub fn log_result(&self, result: &Result<(), Error>) {
|
2020-08-13 08:29:13 +00:00
|
|
|
let state = self.create_state(result);
|
|
|
|
self.log(state.result_text());
|
2019-04-15 05:49:03 +00:00
|
|
|
|
|
|
|
WORKER_TASK_LIST.lock().unwrap().remove(&self.upid.task_id);
|
|
|
|
let _ = update_active_workers(None);
|
|
|
|
super::set_worker_count(WORKER_TASK_LIST.lock().unwrap().len());
|
2019-04-05 10:37:35 +00:00
|
|
|
}
|
|
|
|
|
2019-04-06 07:17:25 +00:00
|
|
|
/// Log a message.
|
2019-04-04 06:58:39 +00:00
|
|
|
pub fn log<S: AsRef<str>>(&self, msg: S) {
|
|
|
|
let mut data = self.data.lock().unwrap();
|
|
|
|
data.logger.log(msg);
|
|
|
|
}
|
|
|
|
|
2020-06-24 07:04:35 +00:00
|
|
|
/// Log a message as warning.
|
|
|
|
pub fn warn<S: AsRef<str>>(&self, msg: S) {
|
|
|
|
let mut data = self.data.lock().unwrap();
|
|
|
|
data.logger.log(format!("WARN: {}", msg.as_ref()));
|
|
|
|
data.warn_count += 1;
|
|
|
|
}
|
|
|
|
|
2019-04-06 07:17:25 +00:00
|
|
|
/// Set progress indicator
|
2019-04-04 06:58:39 +00:00
|
|
|
pub fn progress(&self, progress: f64) {
|
|
|
|
if progress >= 0.0 && progress <= 1.0 {
|
|
|
|
let mut data = self.data.lock().unwrap();
|
|
|
|
data.progress = progress;
|
|
|
|
} else {
|
|
|
|
// fixme: log!("task '{}': ignoring strange value for progress '{}'", self.upid, progress);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-06 07:17:25 +00:00
|
|
|
/// Request abort
|
2019-04-09 10:15:06 +00:00
|
|
|
pub fn request_abort(&self) {
|
2019-04-10 11:54:28 +00:00
|
|
|
eprintln!("set abort flag for worker {}", self.upid);
|
2021-01-29 08:22:37 +00:00
|
|
|
|
|
|
|
let prev_abort = self.abort_requested.swap(true, Ordering::SeqCst);
|
|
|
|
if !prev_abort { // log abort one time
|
|
|
|
self.log(format!("received abort request ..."));
|
|
|
|
}
|
2019-04-15 07:38:05 +00:00
|
|
|
// noitify listeners
|
|
|
|
let mut data = self.data.lock().unwrap();
|
|
|
|
loop {
|
|
|
|
match data.abort_listeners.pop() {
|
|
|
|
None => { break; },
|
|
|
|
Some(ch) => {
|
2021-03-10 15:37:09 +00:00
|
|
|
let _ = ch.send(()); // ignore errors here
|
2019-04-15 07:38:05 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
2019-04-04 06:58:39 +00:00
|
|
|
}
|
|
|
|
|
2019-04-06 07:17:25 +00:00
|
|
|
/// Test if abort was requested.
|
2019-04-04 06:58:39 +00:00
|
|
|
pub fn abort_requested(&self) -> bool {
|
|
|
|
self.abort_requested.load(Ordering::SeqCst)
|
|
|
|
}
|
|
|
|
|
2019-04-06 07:17:25 +00:00
|
|
|
/// Fail if abort was requested.
|
2019-04-04 06:58:39 +00:00
|
|
|
pub fn fail_on_abort(&self) -> Result<(), Error> {
|
|
|
|
if self.abort_requested() {
|
2020-05-05 07:06:34 +00:00
|
|
|
bail!("abort requested - aborting task");
|
2019-04-04 06:58:39 +00:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
2019-04-15 07:38:05 +00:00
|
|
|
|
|
|
|
/// Get a future which resolves on task abort
|
|
|
|
pub fn abort_future(&self) -> oneshot::Receiver<()> {
|
|
|
|
let (tx, rx) = oneshot::channel::<()>();
|
|
|
|
|
|
|
|
let mut data = self.data.lock().unwrap();
|
|
|
|
if self.abort_requested() {
|
|
|
|
let _ = tx.send(());
|
|
|
|
} else {
|
|
|
|
data.abort_listeners.push(tx);
|
|
|
|
}
|
|
|
|
rx
|
|
|
|
}
|
2020-07-31 12:43:24 +00:00
|
|
|
|
|
|
|
pub fn upid(&self) -> &UPID {
|
|
|
|
&self.upid
|
|
|
|
}
|
2019-04-04 06:58:39 +00:00
|
|
|
}
|
2020-10-12 09:28:03 +00:00
|
|
|
|
2021-07-07 12:37:47 +00:00
|
|
|
impl pbs_datastore::task::TaskState for WorkerTask {
|
2020-10-12 09:28:03 +00:00
|
|
|
fn check_abort(&self) -> Result<(), Error> {
|
|
|
|
self.fail_on_abort()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn log(&self, level: log::Level, message: &std::fmt::Arguments) {
|
|
|
|
match level {
|
|
|
|
log::Level::Error => self.warn(&message.to_string()),
|
|
|
|
log::Level::Warn => self.warn(&message.to_string()),
|
|
|
|
log::Level::Info => self.log(&message.to_string()),
|
|
|
|
log::Level::Debug => self.log(&format!("DEBUG: {}", message)),
|
|
|
|
log::Level::Trace => self.log(&format!("TRACE: {}", message)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-07-19 08:44:40 +00:00
|
|
|
|
|
|
|
/// Wait for a locally spanned worker task
|
|
|
|
///
|
|
|
|
/// Note: local workers should print logs to stdout, so there is no
|
|
|
|
/// need to fetch/display logs. We just wait for the worker to finish.
|
|
|
|
pub async fn wait_for_local_worker(upid_str: &str) -> Result<(), Error> {
|
|
|
|
|
|
|
|
let upid: UPID = upid_str.parse()?;
|
|
|
|
|
|
|
|
let sleep_duration = core::time::Duration::new(0, 100_000_000);
|
|
|
|
|
|
|
|
loop {
|
|
|
|
if worker_is_active_local(&upid) {
|
|
|
|
tokio::time::sleep(sleep_duration).await;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|