server: rustfmt
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
parent
dc7a5b3491
commit
ee0ea73500
|
@ -4,10 +4,10 @@ use anyhow::format_err;
|
||||||
|
|
||||||
use proxmox_router::UserInformation;
|
use proxmox_router::UserInformation;
|
||||||
|
|
||||||
use pbs_tools::ticket::{self, Ticket};
|
|
||||||
use pbs_config::{token_shadow, CachedUserInfo};
|
|
||||||
use pbs_api_types::{Authid, Userid};
|
use pbs_api_types::{Authid, Userid};
|
||||||
use proxmox_rest_server::{AuthError, extract_cookie};
|
use pbs_config::{token_shadow, CachedUserInfo};
|
||||||
|
use pbs_tools::ticket::{self, Ticket};
|
||||||
|
use proxmox_rest_server::{extract_cookie, AuthError};
|
||||||
|
|
||||||
use crate::auth_helpers::*;
|
use crate::auth_helpers::*;
|
||||||
|
|
||||||
|
@ -53,7 +53,6 @@ pub async fn check_pbs_auth(
|
||||||
headers: &http::HeaderMap,
|
headers: &http::HeaderMap,
|
||||||
method: &hyper::Method,
|
method: &hyper::Method,
|
||||||
) -> Result<(String, Box<dyn UserInformation + Sync + Send>), AuthError> {
|
) -> Result<(String, Box<dyn UserInformation + Sync + Send>), AuthError> {
|
||||||
|
|
||||||
// fixme: make all IO async
|
// fixme: make all IO async
|
||||||
|
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
|
@ -1,16 +1,17 @@
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
|
|
||||||
use handlebars::{Handlebars, Helper, Context, RenderError, RenderContext, Output, HelperResult, TemplateError};
|
use handlebars::{
|
||||||
|
Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError, TemplateError,
|
||||||
|
};
|
||||||
|
|
||||||
use proxmox_sys::email::sendmail;
|
|
||||||
use proxmox_lang::try_block;
|
use proxmox_lang::try_block;
|
||||||
use proxmox_schema::ApiType;
|
use proxmox_schema::ApiType;
|
||||||
|
use proxmox_sys::email::sendmail;
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
User, TapeBackupJobSetup, SyncJobConfig, VerificationJobConfig,
|
APTUpdateInfo, DataStoreConfig, DatastoreNotify, GarbageCollectionStatus, HumanByte, Notify,
|
||||||
APTUpdateInfo, GarbageCollectionStatus, HumanByte,
|
SyncJobConfig, TapeBackupJobSetup, User, Userid, VerificationJobConfig,
|
||||||
Userid, Notify, DatastoreNotify, DataStoreConfig,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const GC_OK_TEMPLATE: &str = r###"
|
const GC_OK_TEMPLATE: &str = r###"
|
||||||
|
@ -41,7 +42,6 @@ Please visit the web interface for further details:
|
||||||
|
|
||||||
"###;
|
"###;
|
||||||
|
|
||||||
|
|
||||||
const GC_ERR_TEMPLATE: &str = r###"
|
const GC_ERR_TEMPLATE: &str = r###"
|
||||||
|
|
||||||
Datastore: {{datastore}}
|
Datastore: {{datastore}}
|
||||||
|
@ -183,7 +183,7 @@ Please visit the web interface for further details:
|
||||||
|
|
||||||
"###;
|
"###;
|
||||||
|
|
||||||
lazy_static::lazy_static!{
|
lazy_static::lazy_static! {
|
||||||
|
|
||||||
static ref HANDLEBARS: Handlebars<'static> = {
|
static ref HANDLEBARS: Handlebars<'static> = {
|
||||||
let mut hb = Handlebars::new();
|
let mut hb = Handlebars::new();
|
||||||
|
@ -229,18 +229,16 @@ pub struct TapeBackupJobSummary {
|
||||||
pub duration: std::time::Duration,
|
pub duration: std::time::Duration,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn send_job_status_mail(
|
fn send_job_status_mail(email: &str, subject: &str, text: &str) -> Result<(), Error> {
|
||||||
email: &str,
|
|
||||||
subject: &str,
|
|
||||||
text: &str,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
|
|
||||||
let (config, _) = crate::config::node::config()?;
|
let (config, _) = crate::config::node::config()?;
|
||||||
let from = config.email_from;
|
let from = config.email_from;
|
||||||
|
|
||||||
// Note: OX has serious problems displaying text mails,
|
// Note: OX has serious problems displaying text mails,
|
||||||
// so we include html as well
|
// so we include html as well
|
||||||
let html = format!("<html><body><pre>\n{}\n<pre>", handlebars::html_escape(text));
|
let html = format!(
|
||||||
|
"<html><body><pre>\n{}\n<pre>",
|
||||||
|
handlebars::html_escape(text)
|
||||||
|
);
|
||||||
|
|
||||||
let nodename = proxmox_sys::nodename();
|
let nodename = proxmox_sys::nodename();
|
||||||
|
|
||||||
|
@ -265,9 +263,8 @@ pub fn send_gc_status(
|
||||||
status: &GarbageCollectionStatus,
|
status: &GarbageCollectionStatus,
|
||||||
result: &Result<(), Error>,
|
result: &Result<(), Error>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
match notify.gc {
|
match notify.gc {
|
||||||
None => { /* send notifications by default */ },
|
None => { /* send notifications by default */ }
|
||||||
Some(notify) => {
|
Some(notify) => {
|
||||||
if notify == Notify::Never || (result.is_ok() && notify == Notify::Error) {
|
if notify == Notify::Never || (result.is_ok() && notify == Notify::Error) {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
@ -285,7 +282,7 @@ pub fn send_gc_status(
|
||||||
let text = match result {
|
let text = match result {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
let deduplication_factor = if status.disk_bytes > 0 {
|
let deduplication_factor = if status.disk_bytes > 0 {
|
||||||
(status.index_data_bytes as f64)/(status.disk_bytes as f64)
|
(status.index_data_bytes as f64) / (status.disk_bytes as f64)
|
||||||
} else {
|
} else {
|
||||||
1.0
|
1.0
|
||||||
};
|
};
|
||||||
|
@ -302,14 +299,8 @@ pub fn send_gc_status(
|
||||||
};
|
};
|
||||||
|
|
||||||
let subject = match result {
|
let subject = match result {
|
||||||
Ok(()) => format!(
|
Ok(()) => format!("Garbage Collect Datastore '{}' successful", datastore,),
|
||||||
"Garbage Collect Datastore '{}' successful",
|
Err(_) => format!("Garbage Collect Datastore '{}' failed", datastore,),
|
||||||
datastore,
|
|
||||||
),
|
|
||||||
Err(_) => format!(
|
|
||||||
"Garbage Collect Datastore '{}' failed",
|
|
||||||
datastore,
|
|
||||||
),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
send_job_status_mail(email, &subject, &text)?;
|
send_job_status_mail(email, &subject, &text)?;
|
||||||
|
@ -323,7 +314,6 @@ pub fn send_verify_status(
|
||||||
job: VerificationJobConfig,
|
job: VerificationJobConfig,
|
||||||
result: &Result<Vec<String>, Error>,
|
result: &Result<Vec<String>, Error>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let (fqdn, port) = get_server_url();
|
let (fqdn, port) = get_server_url();
|
||||||
let mut data = json!({
|
let mut data = json!({
|
||||||
"job": job,
|
"job": job,
|
||||||
|
@ -349,7 +339,7 @@ pub fn send_verify_status(
|
||||||
};
|
};
|
||||||
|
|
||||||
match notify.verify {
|
match notify.verify {
|
||||||
None => { /* send notifications by default */ },
|
None => { /* send notifications by default */ }
|
||||||
Some(notify) => {
|
Some(notify) => {
|
||||||
if notify == Notify::Never || (result_is_ok && notify == Notify::Error) {
|
if notify == Notify::Never || (result_is_ok && notify == Notify::Error) {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
@ -358,14 +348,8 @@ pub fn send_verify_status(
|
||||||
}
|
}
|
||||||
|
|
||||||
let subject = match result {
|
let subject = match result {
|
||||||
Ok(errors) if errors.is_empty() => format!(
|
Ok(errors) if errors.is_empty() => format!("Verify Datastore '{}' successful", job.store,),
|
||||||
"Verify Datastore '{}' successful",
|
_ => format!("Verify Datastore '{}' failed", job.store,),
|
||||||
job.store,
|
|
||||||
),
|
|
||||||
_ => format!(
|
|
||||||
"Verify Datastore '{}' failed",
|
|
||||||
job.store,
|
|
||||||
),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
send_job_status_mail(email, &subject, &text)?;
|
send_job_status_mail(email, &subject, &text)?;
|
||||||
|
@ -379,9 +363,8 @@ pub fn send_sync_status(
|
||||||
job: &SyncJobConfig,
|
job: &SyncJobConfig,
|
||||||
result: &Result<(), Error>,
|
result: &Result<(), Error>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
match notify.sync {
|
match notify.sync {
|
||||||
None => { /* send notifications by default */ },
|
None => { /* send notifications by default */ }
|
||||||
Some(notify) => {
|
Some(notify) => {
|
||||||
if notify == Notify::Never || (result.is_ok() && notify == Notify::Error) {
|
if notify == Notify::Never || (result.is_ok() && notify == Notify::Error) {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
@ -397,9 +380,7 @@ pub fn send_sync_status(
|
||||||
});
|
});
|
||||||
|
|
||||||
let text = match result {
|
let text = match result {
|
||||||
Ok(()) => {
|
Ok(()) => HANDLEBARS.render("sync_ok_template", &data)?,
|
||||||
HANDLEBARS.render("sync_ok_template", &data)?
|
|
||||||
}
|
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
data["error"] = err.to_string().into();
|
data["error"] = err.to_string().into();
|
||||||
HANDLEBARS.render("sync_err_template", &data)?
|
HANDLEBARS.render("sync_err_template", &data)?
|
||||||
|
@ -409,13 +390,11 @@ pub fn send_sync_status(
|
||||||
let subject = match result {
|
let subject = match result {
|
||||||
Ok(()) => format!(
|
Ok(()) => format!(
|
||||||
"Sync remote '{}' datastore '{}' successful",
|
"Sync remote '{}' datastore '{}' successful",
|
||||||
job.remote,
|
job.remote, job.remote_store,
|
||||||
job.remote_store,
|
|
||||||
),
|
),
|
||||||
Err(_) => format!(
|
Err(_) => format!(
|
||||||
"Sync remote '{}' datastore '{}' failed",
|
"Sync remote '{}' datastore '{}' failed",
|
||||||
job.remote,
|
job.remote, job.remote_store,
|
||||||
job.remote_store,
|
|
||||||
),
|
),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -431,7 +410,6 @@ pub fn send_tape_backup_status(
|
||||||
result: &Result<(), Error>,
|
result: &Result<(), Error>,
|
||||||
summary: TapeBackupJobSummary,
|
summary: TapeBackupJobSummary,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let (fqdn, port) = get_server_url();
|
let (fqdn, port) = get_server_url();
|
||||||
let duration: proxmox_time::TimeSpan = summary.duration.into();
|
let duration: proxmox_time::TimeSpan = summary.duration.into();
|
||||||
let mut data = json!({
|
let mut data = json!({
|
||||||
|
@ -444,9 +422,7 @@ pub fn send_tape_backup_status(
|
||||||
});
|
});
|
||||||
|
|
||||||
let text = match result {
|
let text = match result {
|
||||||
Ok(()) => {
|
Ok(()) => HANDLEBARS.render("tape_backup_ok_template", &data)?,
|
||||||
HANDLEBARS.render("tape_backup_ok_template", &data)?
|
|
||||||
}
|
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
data["error"] = err.to_string().into();
|
data["error"] = err.to_string().into();
|
||||||
HANDLEBARS.render("tape_backup_err_template", &data)?
|
HANDLEBARS.render("tape_backup_err_template", &data)?
|
||||||
|
@ -454,24 +430,10 @@ pub fn send_tape_backup_status(
|
||||||
};
|
};
|
||||||
|
|
||||||
let subject = match (result, id) {
|
let subject = match (result, id) {
|
||||||
(Ok(()), Some(id)) => format!(
|
(Ok(()), Some(id)) => format!("Tape Backup '{}' datastore '{}' successful", id, job.store,),
|
||||||
"Tape Backup '{}' datastore '{}' successful",
|
(Ok(()), None) => format!("Tape Backup datastore '{}' successful", job.store,),
|
||||||
id,
|
(Err(_), Some(id)) => format!("Tape Backup '{}' datastore '{}' failed", id, job.store,),
|
||||||
job.store,
|
(Err(_), None) => format!("Tape Backup datastore '{}' failed", job.store,),
|
||||||
),
|
|
||||||
(Ok(()), None) => format!(
|
|
||||||
"Tape Backup datastore '{}' successful",
|
|
||||||
job.store,
|
|
||||||
),
|
|
||||||
(Err(_), Some(id)) => format!(
|
|
||||||
"Tape Backup '{}' datastore '{}' failed",
|
|
||||||
id,
|
|
||||||
job.store,
|
|
||||||
),
|
|
||||||
(Err(_), None) => format!(
|
|
||||||
"Tape Backup datastore '{}' failed",
|
|
||||||
job.store,
|
|
||||||
),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
send_job_status_mail(email, &subject, &text)?;
|
send_job_status_mail(email, &subject, &text)?;
|
||||||
|
@ -486,13 +448,15 @@ pub fn send_load_media_email(
|
||||||
to: &str,
|
to: &str,
|
||||||
reason: Option<String>,
|
reason: Option<String>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let subject = format!("Load Media '{}' request for drive '{}'", label_text, drive);
|
let subject = format!("Load Media '{}' request for drive '{}'", label_text, drive);
|
||||||
|
|
||||||
let mut text = String::new();
|
let mut text = String::new();
|
||||||
|
|
||||||
if let Some(reason) = reason {
|
if let Some(reason) = reason {
|
||||||
text.push_str(&format!("The drive has the wrong or no tape inserted. Error:\n{}\n\n", reason));
|
text.push_str(&format!(
|
||||||
|
"The drive has the wrong or no tape inserted. Error:\n{}\n\n",
|
||||||
|
reason
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
text.push_str("Please insert the requested media into the backup drive.\n\n");
|
text.push_str("Please insert the requested media into the backup drive.\n\n");
|
||||||
|
@ -504,7 +468,6 @@ pub fn send_load_media_email(
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_server_url() -> (String, usize) {
|
fn get_server_url() -> (String, usize) {
|
||||||
|
|
||||||
// user will surely request that they can change this
|
// user will surely request that they can change this
|
||||||
|
|
||||||
let nodename = proxmox_sys::nodename();
|
let nodename = proxmox_sys::nodename();
|
||||||
|
@ -522,9 +485,7 @@ fn get_server_url() -> (String, usize) {
|
||||||
(fqdn, port)
|
(fqdn, port)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn send_updates_available(
|
pub fn send_updates_available(updates: &[&APTUpdateInfo]) -> Result<(), Error> {
|
||||||
updates: &[&APTUpdateInfo],
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
// update mails always go to the root@pam configured email..
|
// update mails always go to the root@pam configured email..
|
||||||
if let Some(email) = lookup_user_email(Userid::root_userid()) {
|
if let Some(email) = lookup_user_email(Userid::root_userid()) {
|
||||||
let nodename = proxmox_sys::nodename();
|
let nodename = proxmox_sys::nodename();
|
||||||
|
@ -532,11 +493,14 @@ pub fn send_updates_available(
|
||||||
|
|
||||||
let (fqdn, port) = get_server_url();
|
let (fqdn, port) = get_server_url();
|
||||||
|
|
||||||
let text = HANDLEBARS.render("package_update_template", &json!({
|
let text = HANDLEBARS.render(
|
||||||
"fqdn": fqdn,
|
"package_update_template",
|
||||||
"port": port,
|
&json!({
|
||||||
"updates": updates,
|
"fqdn": fqdn,
|
||||||
}))?;
|
"port": port,
|
||||||
|
"updates": updates,
|
||||||
|
}),
|
||||||
|
)?;
|
||||||
|
|
||||||
send_job_status_mail(&email, &subject, &text)?;
|
send_job_status_mail(&email, &subject, &text)?;
|
||||||
}
|
}
|
||||||
|
@ -545,7 +509,6 @@ pub fn send_updates_available(
|
||||||
|
|
||||||
/// Lookup users email address
|
/// Lookup users email address
|
||||||
pub fn lookup_user_email(userid: &Userid) -> Option<String> {
|
pub fn lookup_user_email(userid: &Userid) -> Option<String> {
|
||||||
|
|
||||||
if let Ok(user_config) = pbs_config::user::cached_config() {
|
if let Ok(user_config) = pbs_config::user::cached_config() {
|
||||||
if let Ok(user) = user_config.lookup::<User>("user", userid.as_str()) {
|
if let Ok(user) = user_config.lookup::<User>("user", userid.as_str()) {
|
||||||
return user.email;
|
return user.email;
|
||||||
|
@ -556,13 +519,14 @@ pub fn lookup_user_email(userid: &Userid) -> Option<String> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Lookup Datastore notify settings
|
/// Lookup Datastore notify settings
|
||||||
pub fn lookup_datastore_notify_settings(
|
pub fn lookup_datastore_notify_settings(store: &str) -> (Option<String>, DatastoreNotify) {
|
||||||
store: &str,
|
|
||||||
) -> (Option<String>, DatastoreNotify) {
|
|
||||||
|
|
||||||
let mut email = None;
|
let mut email = None;
|
||||||
|
|
||||||
let notify = DatastoreNotify { gc: None, verify: None, sync: None };
|
let notify = DatastoreNotify {
|
||||||
|
gc: None,
|
||||||
|
verify: None,
|
||||||
|
sync: None,
|
||||||
|
};
|
||||||
|
|
||||||
let (config, _digest) = match pbs_config::datastore::config() {
|
let (config, _digest) = match pbs_config::datastore::config() {
|
||||||
Ok(result) => result,
|
Ok(result) => result,
|
||||||
|
@ -597,9 +561,11 @@ fn handlebars_humam_bytes_helper(
|
||||||
_: &Handlebars,
|
_: &Handlebars,
|
||||||
_: &Context,
|
_: &Context,
|
||||||
_rc: &mut RenderContext,
|
_rc: &mut RenderContext,
|
||||||
out: &mut dyn Output
|
out: &mut dyn Output,
|
||||||
) -> HelperResult {
|
) -> HelperResult {
|
||||||
let param = h.param(0).map(|v| v.value().as_u64())
|
let param = h
|
||||||
|
.param(0)
|
||||||
|
.map(|v| v.value().as_u64())
|
||||||
.flatten()
|
.flatten()
|
||||||
.ok_or_else(|| RenderError::new("human-bytes: param not found"))?;
|
.ok_or_else(|| RenderError::new("human-bytes: param not found"))?;
|
||||||
|
|
||||||
|
@ -613,19 +579,23 @@ fn handlebars_relative_percentage_helper(
|
||||||
_: &Handlebars,
|
_: &Handlebars,
|
||||||
_: &Context,
|
_: &Context,
|
||||||
_rc: &mut RenderContext,
|
_rc: &mut RenderContext,
|
||||||
out: &mut dyn Output
|
out: &mut dyn Output,
|
||||||
) -> HelperResult {
|
) -> HelperResult {
|
||||||
let param0 = h.param(0).map(|v| v.value().as_f64())
|
let param0 = h
|
||||||
|
.param(0)
|
||||||
|
.map(|v| v.value().as_f64())
|
||||||
.flatten()
|
.flatten()
|
||||||
.ok_or_else(|| RenderError::new("relative-percentage: param0 not found"))?;
|
.ok_or_else(|| RenderError::new("relative-percentage: param0 not found"))?;
|
||||||
let param1 = h.param(1).map(|v| v.value().as_f64())
|
let param1 = h
|
||||||
|
.param(1)
|
||||||
|
.map(|v| v.value().as_f64())
|
||||||
.flatten()
|
.flatten()
|
||||||
.ok_or_else(|| RenderError::new("relative-percentage: param1 not found"))?;
|
.ok_or_else(|| RenderError::new("relative-percentage: param1 not found"))?;
|
||||||
|
|
||||||
if param1 == 0.0 {
|
if param1 == 0.0 {
|
||||||
out.write("-")?;
|
out.write("-")?;
|
||||||
} else {
|
} else {
|
||||||
out.write(&format!("{:.2}%", (param0*100.0)/param1))?;
|
out.write(&format!("{:.2}%", (param0 * 100.0) / param1))?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
use std::sync::Arc;
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use proxmox_sys::task_log;
|
use proxmox_sys::task_log;
|
||||||
|
|
||||||
|
@ -17,7 +17,6 @@ pub fn do_garbage_collection_job(
|
||||||
schedule: Option<String>,
|
schedule: Option<String>,
|
||||||
to_stdout: bool,
|
to_stdout: bool,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
let store = datastore.name().to_string();
|
let store = datastore.name().to_string();
|
||||||
|
|
||||||
let (email, notify) = crate::server::lookup_datastore_notify_settings(&store);
|
let (email, notify) = crate::server::lookup_datastore_notify_settings(&store);
|
||||||
|
@ -50,13 +49,15 @@ pub fn do_garbage_collection_job(
|
||||||
|
|
||||||
if let Some(email) = email {
|
if let Some(email) = email {
|
||||||
let gc_status = datastore.last_gc_status();
|
let gc_status = datastore.last_gc_status();
|
||||||
if let Err(err) = crate::server::send_gc_status(&email, notify, &store, &gc_status, &result) {
|
if let Err(err) =
|
||||||
|
crate::server::send_gc_status(&email, notify, &store, &gc_status, &result)
|
||||||
|
{
|
||||||
eprintln!("send gc notification failed: {}", err);
|
eprintln!("send gc notification failed: {}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
result
|
result
|
||||||
}
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Ok(upid_str)
|
Ok(upid_str)
|
||||||
|
|
|
@ -42,15 +42,13 @@ use std::path::{Path, PathBuf};
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox_sys::fs::{
|
use proxmox_sys::fs::{create_path, file_read_optional_string, replace_file, CreateOptions};
|
||||||
create_path, file_read_optional_string, replace_file, CreateOptions,
|
|
||||||
};
|
|
||||||
|
|
||||||
use proxmox_time::CalendarEvent;
|
use proxmox_time::CalendarEvent;
|
||||||
|
|
||||||
|
use pbs_api_types::{JobScheduleStatus, UPID};
|
||||||
use pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR_M;
|
use pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR_M;
|
||||||
use pbs_config::{open_backup_lockfile, BackupLockGuard};
|
use pbs_config::{open_backup_lockfile, BackupLockGuard};
|
||||||
use pbs_api_types::{UPID, JobScheduleStatus};
|
|
||||||
|
|
||||||
use proxmox_rest_server::{upid_read_status, worker_is_active_local, TaskState};
|
use proxmox_rest_server::{upid_read_status, worker_is_active_local, TaskState};
|
||||||
|
|
||||||
|
|
|
@ -37,16 +37,18 @@ pub mod pull;
|
||||||
pub(crate) async fn reload_proxy_certificate() -> Result<(), Error> {
|
pub(crate) async fn reload_proxy_certificate() -> Result<(), Error> {
|
||||||
let proxy_pid = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
|
let proxy_pid = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
|
||||||
let sock = proxmox_rest_server::ctrl_sock_from_pid(proxy_pid);
|
let sock = proxmox_rest_server::ctrl_sock_from_pid(proxy_pid);
|
||||||
let _: Value = proxmox_rest_server::send_raw_command(sock, "{\"command\":\"reload-certificate\"}\n")
|
let _: Value =
|
||||||
.await?;
|
proxmox_rest_server::send_raw_command(sock, "{\"command\":\"reload-certificate\"}\n")
|
||||||
|
.await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn notify_datastore_removed() -> Result<(), Error> {
|
pub(crate) async fn notify_datastore_removed() -> Result<(), Error> {
|
||||||
let proxy_pid = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
|
let proxy_pid = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
|
||||||
let sock = proxmox_rest_server::ctrl_sock_from_pid(proxy_pid);
|
let sock = proxmox_rest_server::ctrl_sock_from_pid(proxy_pid);
|
||||||
let _: Value = proxmox_rest_server::send_raw_command(sock, "{\"command\":\"datastore-removed\"}\n")
|
let _: Value =
|
||||||
.await?;
|
proxmox_rest_server::send_raw_command(sock, "{\"command\":\"datastore-removed\"}\n")
|
||||||
|
.await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,7 +70,11 @@ pub fn create_state_dir() -> Result<(), Error> {
|
||||||
let opts = CreateOptions::new()
|
let opts = CreateOptions::new()
|
||||||
.owner(backup_user.uid)
|
.owner(backup_user.uid)
|
||||||
.group(backup_user.gid);
|
.group(backup_user.gid);
|
||||||
create_path(pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR_M!(), None, Some(opts))?;
|
create_path(
|
||||||
|
pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR_M!(),
|
||||||
|
None,
|
||||||
|
Some(opts),
|
||||||
|
)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,11 +4,11 @@ use anyhow::Error;
|
||||||
|
|
||||||
use proxmox_sys::{task_log, task_warn};
|
use proxmox_sys::{task_log, task_warn};
|
||||||
|
|
||||||
|
use pbs_api_types::{Authid, Operation, PruneOptions, PRIV_DATASTORE_MODIFY};
|
||||||
|
use pbs_config::CachedUserInfo;
|
||||||
use pbs_datastore::backup_info::BackupInfo;
|
use pbs_datastore::backup_info::BackupInfo;
|
||||||
use pbs_datastore::prune::compute_prune_info;
|
use pbs_datastore::prune::compute_prune_info;
|
||||||
use pbs_datastore::DataStore;
|
use pbs_datastore::DataStore;
|
||||||
use pbs_api_types::{Authid, Operation, PRIV_DATASTORE_MODIFY, PruneOptions};
|
|
||||||
use pbs_config::CachedUserInfo;
|
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
|
|
||||||
use crate::server::jobstate::Job;
|
use crate::server::jobstate::Job;
|
||||||
|
@ -113,7 +113,14 @@ pub fn do_prune_job(
|
||||||
task_log!(worker, "task triggered by schedule '{}'", event_str);
|
task_log!(worker, "task triggered by schedule '{}'", event_str);
|
||||||
}
|
}
|
||||||
|
|
||||||
let result = prune_datastore(worker.clone(), auth_id, prune_options, &store, datastore, false);
|
let result = prune_datastore(
|
||||||
|
worker.clone(),
|
||||||
|
auth_id,
|
||||||
|
prune_options,
|
||||||
|
&store,
|
||||||
|
datastore,
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
|
||||||
let status = worker.create_state(&result);
|
let status = worker.create_state(&result);
|
||||||
|
|
||||||
|
|
|
@ -8,27 +8,28 @@ use std::sync::{Arc, Mutex};
|
||||||
use std::time::SystemTime;
|
use std::time::SystemTime;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::json;
|
|
||||||
use http::StatusCode;
|
use http::StatusCode;
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
use proxmox_router::HttpError;
|
use proxmox_router::HttpError;
|
||||||
use proxmox_sys::task_log;
|
use proxmox_sys::task_log;
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, GroupFilter, GroupListItem, RateLimitConfig, Remote,
|
Authid, GroupFilter, GroupListItem, Operation, RateLimitConfig, Remote, SnapshotListItem,
|
||||||
Operation, SnapshotListItem,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use pbs_datastore::{BackupDir, BackupInfo, BackupGroup, DataStore, StoreProgress};
|
use pbs_client::{
|
||||||
|
BackupReader, BackupRepository, HttpClient, HttpClientOptions, RemoteChunkReader,
|
||||||
|
};
|
||||||
use pbs_datastore::data_blob::DataBlob;
|
use pbs_datastore::data_blob::DataBlob;
|
||||||
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||||
use pbs_datastore::fixed_index::FixedIndexReader;
|
use pbs_datastore::fixed_index::FixedIndexReader;
|
||||||
use pbs_datastore::index::IndexFile;
|
use pbs_datastore::index::IndexFile;
|
||||||
use pbs_datastore::manifest::{
|
use pbs_datastore::manifest::{
|
||||||
CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME, ArchiveType, BackupManifest, FileInfo, archive_type
|
archive_type, ArchiveType, BackupManifest, FileInfo, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME,
|
||||||
};
|
};
|
||||||
|
use pbs_datastore::{BackupDir, BackupGroup, BackupInfo, DataStore, StoreProgress};
|
||||||
use pbs_tools::sha::sha256;
|
use pbs_tools::sha::sha256;
|
||||||
use pbs_client::{BackupReader, BackupRepository, HttpClient, HttpClientOptions, RemoteChunkReader};
|
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
|
|
||||||
use crate::tools::parallel_handler::ParallelHandler;
|
use crate::tools::parallel_handler::ParallelHandler;
|
||||||
|
@ -71,7 +72,15 @@ impl PullParameters {
|
||||||
remote_store.to_string(),
|
remote_store.to_string(),
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(Self { remote, source, store, owner, remove_vanished, group_filter, limit })
|
Ok(Self {
|
||||||
|
remote,
|
||||||
|
source,
|
||||||
|
store,
|
||||||
|
owner,
|
||||||
|
remove_vanished,
|
||||||
|
group_filter,
|
||||||
|
limit,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn client(&self) -> Result<HttpClient, Error> {
|
pub async fn client(&self) -> Result<HttpClient, Error> {
|
||||||
|
@ -163,7 +172,7 @@ async fn pull_index_chunks<I: IndexFile>(
|
||||||
let bytes = bytes.load(Ordering::SeqCst);
|
let bytes = bytes.load(Ordering::SeqCst);
|
||||||
|
|
||||||
task_log!(
|
task_log!(
|
||||||
worker,
|
worker,
|
||||||
"downloaded {} bytes ({:.2} MiB/s)",
|
"downloaded {} bytes ({:.2} MiB/s)",
|
||||||
bytes,
|
bytes,
|
||||||
(bytes as f64) / (1024.0 * 1024.0 * elapsed)
|
(bytes as f64) / (1024.0 * 1024.0 * elapsed)
|
||||||
|
@ -495,7 +504,11 @@ pub async fn pull_snapshot_from(
|
||||||
downloaded_chunks,
|
downloaded_chunks,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
task_log!(worker, "re-sync snapshot {:?} done", snapshot.relative_path());
|
task_log!(
|
||||||
|
worker,
|
||||||
|
"re-sync snapshot {:?} done",
|
||||||
|
snapshot.relative_path()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -524,13 +537,11 @@ impl SkipInfo {
|
||||||
match self.count {
|
match self.count {
|
||||||
0 => Ok(String::new()),
|
0 => Ok(String::new()),
|
||||||
1 => Ok(proxmox_time::epoch_to_rfc3339_utc(self.oldest)?),
|
1 => Ok(proxmox_time::epoch_to_rfc3339_utc(self.oldest)?),
|
||||||
_ => {
|
_ => Ok(format!(
|
||||||
Ok(format!(
|
"{} .. {}",
|
||||||
"{} .. {}",
|
proxmox_time::epoch_to_rfc3339_utc(self.oldest)?,
|
||||||
proxmox_time::epoch_to_rfc3339_utc(self.oldest)?,
|
proxmox_time::epoch_to_rfc3339_utc(self.newest)?,
|
||||||
proxmox_time::epoch_to_rfc3339_utc(self.newest)?,
|
)),
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -553,7 +564,10 @@ pub async fn pull_group(
|
||||||
group: &BackupGroup,
|
group: &BackupGroup,
|
||||||
progress: &mut StoreProgress,
|
progress: &mut StoreProgress,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let path = format!("api2/json/admin/datastore/{}/snapshots", params.source.store());
|
let path = format!(
|
||||||
|
"api2/json/admin/datastore/{}/snapshots",
|
||||||
|
params.source.store()
|
||||||
|
);
|
||||||
|
|
||||||
let args = json!({
|
let args = json!({
|
||||||
"backup-type": group.backup_type(),
|
"backup-type": group.backup_type(),
|
||||||
|
@ -589,7 +603,11 @@ pub async fn pull_group(
|
||||||
|
|
||||||
// in-progress backups can't be synced
|
// in-progress backups can't be synced
|
||||||
if item.size.is_none() {
|
if item.size.is_none() {
|
||||||
task_log!(worker, "skipping snapshot {} - in-progress backup", snapshot);
|
task_log!(
|
||||||
|
worker,
|
||||||
|
"skipping snapshot {} - in-progress backup",
|
||||||
|
snapshot
|
||||||
|
);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -607,8 +625,9 @@ pub async fn pull_group(
|
||||||
// get updated auth_info (new tickets)
|
// get updated auth_info (new tickets)
|
||||||
let auth_info = client.login().await?;
|
let auth_info = client.login().await?;
|
||||||
|
|
||||||
let options = HttpClientOptions::new_non_interactive(auth_info.ticket.clone(), fingerprint.clone())
|
let options =
|
||||||
.rate_limit(params.limit.clone());
|
HttpClientOptions::new_non_interactive(auth_info.ticket.clone(), fingerprint.clone())
|
||||||
|
.rate_limit(params.limit.clone());
|
||||||
|
|
||||||
let new_client = HttpClient::new(
|
let new_client = HttpClient::new(
|
||||||
params.source.host(),
|
params.source.host(),
|
||||||
|
@ -658,7 +677,11 @@ pub async fn pull_group(
|
||||||
);
|
);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
task_log!(worker, "delete vanished snapshot {:?}", info.backup_dir.relative_path());
|
task_log!(
|
||||||
|
worker,
|
||||||
|
"delete vanished snapshot {:?}",
|
||||||
|
info.backup_dir.relative_path()
|
||||||
|
);
|
||||||
params.store.remove_backup_dir(&info.backup_dir, false)?;
|
params.store.remove_backup_dir(&info.backup_dir, false)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -698,25 +721,26 @@ pub async fn pull_store(
|
||||||
});
|
});
|
||||||
|
|
||||||
let apply_filters = |group: &BackupGroup, filters: &[GroupFilter]| -> bool {
|
let apply_filters = |group: &BackupGroup, filters: &[GroupFilter]| -> bool {
|
||||||
filters
|
filters.iter().any(|filter| group.matches(filter))
|
||||||
.iter()
|
|
||||||
.any(|filter| group.matches(filter))
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let list:Vec<BackupGroup> = list
|
let list: Vec<BackupGroup> = list
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|item| BackupGroup::new(item.backup_type, item.backup_id))
|
.map(|item| BackupGroup::new(item.backup_type, item.backup_id))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let list = if let Some(ref group_filter) = ¶ms.group_filter {
|
let list = if let Some(ref group_filter) = ¶ms.group_filter {
|
||||||
let unfiltered_count = list.len();
|
let unfiltered_count = list.len();
|
||||||
let list:Vec<BackupGroup> = list
|
let list: Vec<BackupGroup> = list
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|group| {
|
.filter(|group| apply_filters(group, group_filter))
|
||||||
apply_filters(group, group_filter)
|
|
||||||
})
|
|
||||||
.collect();
|
.collect();
|
||||||
task_log!(worker, "found {} groups to sync (out of {} total)", list.len(), unfiltered_count);
|
task_log!(
|
||||||
|
worker,
|
||||||
|
"found {} groups to sync (out of {} total)",
|
||||||
|
list.len(),
|
||||||
|
unfiltered_count
|
||||||
|
);
|
||||||
list
|
list
|
||||||
} else {
|
} else {
|
||||||
task_log!(worker, "found {} groups to sync", total_count);
|
task_log!(worker, "found {} groups to sync", total_count);
|
||||||
|
@ -737,13 +761,17 @@ pub async fn pull_store(
|
||||||
progress.done_snapshots = 0;
|
progress.done_snapshots = 0;
|
||||||
progress.group_snapshots = 0;
|
progress.group_snapshots = 0;
|
||||||
|
|
||||||
let (owner, _lock_guard) = match params.store.create_locked_backup_group(&group, ¶ms.owner) {
|
let (owner, _lock_guard) = match params
|
||||||
|
.store
|
||||||
|
.create_locked_backup_group(&group, ¶ms.owner)
|
||||||
|
{
|
||||||
Ok(result) => result,
|
Ok(result) => result,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(
|
task_log!(
|
||||||
worker,
|
worker,
|
||||||
"sync group {} failed - group lock failed: {}",
|
"sync group {} failed - group lock failed: {}",
|
||||||
&group, err
|
&group,
|
||||||
|
err
|
||||||
);
|
);
|
||||||
errors = true; // do not stop here, instead continue
|
errors = true; // do not stop here, instead continue
|
||||||
continue;
|
continue;
|
||||||
|
@ -756,23 +784,13 @@ pub async fn pull_store(
|
||||||
task_log!(
|
task_log!(
|
||||||
worker,
|
worker,
|
||||||
"sync group {} failed - owner check failed ({} != {})",
|
"sync group {} failed - owner check failed ({} != {})",
|
||||||
&group, params.owner, owner
|
&group,
|
||||||
|
params.owner,
|
||||||
|
owner
|
||||||
);
|
);
|
||||||
errors = true; // do not stop here, instead continue
|
errors = true; // do not stop here, instead continue
|
||||||
} else if let Err(err) = pull_group(
|
} else if let Err(err) = pull_group(worker, client, params, &group, &mut progress).await {
|
||||||
worker,
|
task_log!(worker, "sync group {} failed - {}", &group, err,);
|
||||||
client,
|
|
||||||
params,
|
|
||||||
&group,
|
|
||||||
&mut progress,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
task_log!(
|
|
||||||
worker,
|
|
||||||
"sync group {} failed - {}",
|
|
||||||
&group, err,
|
|
||||||
);
|
|
||||||
errors = true; // do not stop here, instead continue
|
errors = true; // do not stop here, instead continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -796,10 +814,14 @@ pub async fn pull_store(
|
||||||
local_group.backup_id()
|
local_group.backup_id()
|
||||||
);
|
);
|
||||||
match params.store.remove_backup_group(&local_group) {
|
match params.store.remove_backup_group(&local_group) {
|
||||||
Ok(true) => {},
|
Ok(true) => {}
|
||||||
Ok(false) => {
|
Ok(false) => {
|
||||||
task_log!(worker, "kept some protected snapshots of group '{}'", local_group);
|
task_log!(
|
||||||
},
|
worker,
|
||||||
|
"kept some protected snapshots of group '{}'",
|
||||||
|
local_group
|
||||||
|
);
|
||||||
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(worker, "{}", err);
|
task_log!(worker, "{}", err);
|
||||||
errors = true;
|
errors = true;
|
||||||
|
|
|
@ -20,7 +20,7 @@ fn files() -> Vec<&'static str> {
|
||||||
|
|
||||||
fn commands() -> Vec<(&'static str, Vec<&'static str>)> {
|
fn commands() -> Vec<(&'static str, Vec<&'static str>)> {
|
||||||
vec![
|
vec![
|
||||||
// ("<command>", vec![<arg [, arg]>])
|
// ("<command>", vec![<arg [, arg]>])
|
||||||
("proxmox-backup-manager", vec!["versions", "--verbose"]),
|
("proxmox-backup-manager", vec!["versions", "--verbose"]),
|
||||||
("proxmox-backup-manager", vec!["subscription", "get"]),
|
("proxmox-backup-manager", vec!["subscription", "get"]),
|
||||||
("df", vec!["-h"]),
|
("df", vec!["-h"]),
|
||||||
|
@ -35,20 +35,18 @@ fn commands() -> Vec<(&'static str, Vec<&'static str>)> {
|
||||||
type FunctionMapping = (&'static str, fn() -> String);
|
type FunctionMapping = (&'static str, fn() -> String);
|
||||||
|
|
||||||
fn function_calls() -> Vec<FunctionMapping> {
|
fn function_calls() -> Vec<FunctionMapping> {
|
||||||
vec![
|
vec![("Datastores", || {
|
||||||
("Datastores", || {
|
let config = match pbs_config::datastore::config() {
|
||||||
let config = match pbs_config::datastore::config() {
|
Ok((config, _digest)) => config,
|
||||||
Ok((config, _digest)) => config,
|
_ => return String::from("could not read datastore config"),
|
||||||
_ => return String::from("could not read datastore config"),
|
};
|
||||||
};
|
|
||||||
|
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
for store in config.sections.keys() {
|
for store in config.sections.keys() {
|
||||||
list.push(store.as_str());
|
list.push(store.as_str());
|
||||||
}
|
}
|
||||||
list.join(", ")
|
list.join(", ")
|
||||||
})
|
})]
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_report() -> String {
|
pub fn generate_report() -> String {
|
||||||
|
|
|
@ -1,16 +1,13 @@
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
|
|
||||||
use proxmox_sys::task_log;
|
|
||||||
use pbs_api_types::{Authid, Operation, VerificationJobConfig};
|
use pbs_api_types::{Authid, Operation, VerificationJobConfig};
|
||||||
use proxmox_rest_server::WorkerTask;
|
|
||||||
use pbs_datastore::DataStore;
|
use pbs_datastore::DataStore;
|
||||||
|
use proxmox_rest_server::WorkerTask;
|
||||||
|
use proxmox_sys::task_log;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
backup::{verify_all_backups, verify_filter},
|
||||||
server::jobstate::Job,
|
server::jobstate::Job,
|
||||||
backup::{
|
|
||||||
verify_filter,
|
|
||||||
verify_all_backups,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Runs a verification job.
|
/// Runs a verification job.
|
||||||
|
@ -21,7 +18,6 @@ pub fn do_verification_job(
|
||||||
schedule: Option<String>,
|
schedule: Option<String>,
|
||||||
to_stdout: bool,
|
to_stdout: bool,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&verification_job.store, Some(Operation::Read))?;
|
let datastore = DataStore::lookup_datastore(&verification_job.store, Some(Operation::Read))?;
|
||||||
|
|
||||||
let outdated_after = verification_job.outdated_after;
|
let outdated_after = verification_job.outdated_after;
|
||||||
|
@ -29,9 +25,7 @@ pub fn do_verification_job(
|
||||||
|
|
||||||
let (email, notify) = crate::server::lookup_datastore_notify_settings(&verification_job.store);
|
let (email, notify) = crate::server::lookup_datastore_notify_settings(&verification_job.store);
|
||||||
|
|
||||||
let job_id = format!("{}:{}",
|
let job_id = format!("{}:{}", &verification_job.store, job.jobname());
|
||||||
&verification_job.store,
|
|
||||||
job.jobname());
|
|
||||||
let worker_type = job.jobtype().to_string();
|
let worker_type = job.jobtype().to_string();
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
&worker_type,
|
&worker_type,
|
||||||
|
@ -41,9 +35,9 @@ pub fn do_verification_job(
|
||||||
move |worker| {
|
move |worker| {
|
||||||
job.start(&worker.upid().to_string())?;
|
job.start(&worker.upid().to_string())?;
|
||||||
|
|
||||||
task_log!(worker,"Starting datastore verify job '{}'", job_id);
|
task_log!(worker, "Starting datastore verify job '{}'", job_id);
|
||||||
if let Some(event_str) = schedule {
|
if let Some(event_str) = schedule {
|
||||||
task_log!(worker,"task triggered by schedule '{}'", event_str);
|
task_log!(worker, "task triggered by schedule '{}'", event_str);
|
||||||
}
|
}
|
||||||
|
|
||||||
let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
|
let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
|
||||||
|
@ -63,8 +57,10 @@ pub fn do_verification_job(
|
||||||
task_log!(worker, "\t{}", dir);
|
task_log!(worker, "\t{}", dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
Err(format_err!("verification failed - please check the log for details"))
|
Err(format_err!(
|
||||||
},
|
"verification failed - please check the log for details"
|
||||||
|
))
|
||||||
|
}
|
||||||
Err(_) => Err(format_err!("verification failed - job aborted")),
|
Err(_) => Err(format_err!("verification failed - job aborted")),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -79,7 +75,9 @@ pub fn do_verification_job(
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(email) = email {
|
if let Some(email) = email {
|
||||||
if let Err(err) = crate::server::send_verify_status(&email, notify, verification_job, &result) {
|
if let Err(err) =
|
||||||
|
crate::server::send_verify_status(&email, notify, verification_job, &result)
|
||||||
|
{
|
||||||
eprintln!("send verify notification failed: {}", err);
|
eprintln!("send verify notification failed: {}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue