Compare commits

...

22 Commits

Author SHA1 Message Date
9d79cec4d5 bump version to 0.9.6-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 19:13:04 +01:00
4935681cf4 ui: sync jobs: add tooltip for remove vanished
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 19:07:07 +01:00
669fa672d9 ui: sync jobs: reorder fields
group local ones togeteher on the left side, and source + schedule
on the right side.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 19:05:48 +01:00
a797583535 ui: sync jobs: fix originalValue of owner and improve label
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 19:04:42 +01:00
54ed1b2a71 ui: sync jobs: only set default schedule when creating new jobs
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 19:04:06 +01:00
8e12e86f0b ui: add shell panel under administration
some users prefer an inline console
we still have the pop-out console in 'Administration'

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-04 18:16:49 +01:00
fe7bdc9d29 proxy: also rotate auth.log file
no need for triggering re-open here, we always re-open that file.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 17:26:34 +01:00
546b6a23df proxy: logrotate: do not serialize sending async log-reopen commands
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 17:26:34 +01:00
4fdf13f95f api: factor out auth logger and use for all API authentication failures
we have information here not available in the access log, especially
if the /api2/extjs formatter is used, which encapsulates errors in a
200 response.

So keep the auth log for now, but extend it use from create ticket
calls to all authentication failures for API calls, this ensures one
can also fail2ban tokens.

Do that logging in a central place, which makes it simple but means
that we do not have the user ID information available to include in
the log.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 17:26:34 +01:00
385681c9ab worker task: fix passing upid to send command
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 17:16:55 +01:00
be99df2767 log rotate: only add .zst to new file after second rotation
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 17:16:55 +01:00
30200b5c4a ui: fix task description for log rotate
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-11-04 14:20:44 +01:00
f47c1d3a2f proxy: use new datastore notify settings 2020-11-04 11:54:29 +01:00
6e545d0058 config: allow to configure who receives job notify emails 2020-11-04 11:54:29 +01:00
84006f98b2 ui: SyncJobEdit: fix sending 'delete' values on SyncJob creation
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-11-04 11:39:52 +01:00
42ca9e918a sync: improve log format 2020-11-04 09:10:56 +01:00
ea93bea7bf proxy: log if there are too many open connections 2020-11-04 08:49:35 +01:00
0081903f7c fix bug #2870: use updated tickets 2020-11-04 08:20:36 +01:00
c53797f627 ui: set default deduplication factor to 1.0 2020-11-04 07:12:55 +01:00
e1d367df47 proxy: use env PROXMOX_DEBUG to enable/disable debug output
We only print early connection errors when this env var is set.
2020-11-04 06:55:57 +01:00
71f413cd27 cleanup: use Arc to count open connections 2020-11-04 06:35:44 +01:00
48aa2b93b7 fix #3106: correctly queue incoming connections 2020-11-04 06:24:42 +01:00
24 changed files with 335 additions and 85 deletions

View File

@ -1,6 +1,6 @@
[package]
name = "proxmox-backup"
version = "0.9.5"
version = "0.9.6"
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
edition = "2018"
license = "AGPL-3"

23
debian/changelog vendored
View File

@ -1,3 +1,26 @@
rust-proxmox-backup (0.9.6-1) unstable; urgency=medium
* fix #3106: improve queueing new incoming connections
* fix #2870: sync: ensure a updated ticket is used, if available
* proxy: log if there are too many open connections
* ui: SyncJobEdit: fix sending 'delete' values on SyncJob creation
* datastore config: allow to configure who receives job notify emails
* ui: fix task description for log rotate
* proxy: also rotate auth.log file
* ui: add shell panel under administration
* ui: sync jobs: only set default schedule when creating new jobs and some
other small fixes
-- Proxmox Support Team <support@proxmox.com> Wed, 04 Nov 2020 19:12:57 +0100
rust-proxmox-backup (0.9.5-1) unstable; urgency=medium
* ui: user menu: allow one to change the language while staying logged in

3
debian/postinst vendored
View File

@ -25,6 +25,9 @@ case "$1" in
sed -i '/^\s\+verify-schedule /d' /etc/proxmox-backup/datastore.cfg || true
fi
fi
if dpkg --compare-versions "$2" 'le' '0.9.5-1'; then
chown --quiet backup:backup /var/log/proxmox-backup/api/auth.log || true
fi
fi
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then

View File

@ -12,7 +12,6 @@ use proxmox::{http_err, list_subdirs_api_method};
use crate::tools::ticket::{self, Empty, Ticket};
use crate::auth_helpers::*;
use crate::api2::types::*;
use crate::tools::{FileLogOptions, FileLogger};
use crate::config::acl as acl_config;
use crate::config::acl::{PRIVILEGES, PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
@ -144,20 +143,13 @@ fn create_ticket(
port: Option<u16>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let logger_options = FileLogOptions {
append: true,
prefix_time: true,
..Default::default()
};
let mut auth_log = FileLogger::new("/var/log/proxmox-backup/api/auth.log", logger_options)?;
match authenticate_user(&username, &password, path, privs, port) {
Ok(true) => {
let ticket = Ticket::new("PBS", &username)?.sign(private_auth_key(), None)?;
let token = assemble_csrf_prevention_token(csrf_secret(), &username);
auth_log.log(format!("successful auth for user '{}'", username));
crate::server::rest::auth_logger()?.log(format!("successful auth for user '{}'", username));
Ok(json!({
"username": username,
@ -180,7 +172,7 @@ fn create_ticket(
username,
err.to_string()
);
auth_log.log(&msg);
crate::server::rest::auth_logger()?.log(&msg);
log::error!("{}", msg);
Err(http_err!(UNAUTHORIZED, "permission check failed."))

View File

@ -68,6 +68,14 @@ pub fn list_datastores(
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
"notify-user": {
optional: true,
type: Userid,
},
"notify": {
optional: true,
type: Notify,
},
"gc-schedule": {
optional: true,
schema: GC_SCHEDULE_SCHEMA,
@ -187,6 +195,10 @@ pub enum DeletableProperty {
keep_monthly,
/// Delete the keep-yearly property
keep_yearly,
/// Delete the notify-user property
notify_user,
/// Delete the notify property
notify,
}
#[api(
@ -200,6 +212,14 @@ pub enum DeletableProperty {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
"notify-user": {
optional: true,
type: Userid,
},
"notify": {
optional: true,
type: Notify,
},
"gc-schedule": {
optional: true,
schema: GC_SCHEDULE_SCHEMA,
@ -262,6 +282,8 @@ pub fn update_datastore(
keep_weekly: Option<u64>,
keep_monthly: Option<u64>,
keep_yearly: Option<u64>,
notify: Option<Notify>,
notify_user: Option<Userid>,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
) -> Result<(), Error> {
@ -290,6 +312,8 @@ pub fn update_datastore(
DeletableProperty::keep_weekly => { data.keep_weekly = None; },
DeletableProperty::keep_monthly => { data.keep_monthly = None; },
DeletableProperty::keep_yearly => { data.keep_yearly = None; },
DeletableProperty::notify => { data.notify = None; },
DeletableProperty::notify_user => { data.notify_user = None; },
}
}
}
@ -322,6 +346,9 @@ pub fn update_datastore(
if keep_monthly.is_some() { data.keep_monthly = keep_monthly; }
if keep_yearly.is_some() { data.keep_yearly = keep_yearly; }
if notify.is_some() { data.notify = notify; }
if notify_user.is_some() { data.notify_user = notify_user; }
config.set_data(&name, "datastore", &data)?;
datastore::save_config(&config)?;

View File

@ -75,7 +75,7 @@ pub fn do_sync_job(
let job_id = job.jobname().to_string();
let worker_type = job.jobtype().to_string();
let email = crate::server::lookup_user_email(auth_id.user());
let (email, notify) = crate::server::lookup_datastore_notify_settings(&sync_job.store);
let upid_str = WorkerTask::spawn(
&worker_type,
@ -126,7 +126,7 @@ pub fn do_sync_job(
}
if let Some(email) = email {
if let Err(err) = crate::server::send_sync_status(&email, &sync_job2, &result) {
if let Err(err) = crate::server::send_sync_status(&email, notify, &sync_job2, &result) {
eprintln!("send sync notification failed: {}", err);
}
}

View File

@ -1154,3 +1154,16 @@ pub struct APTUpdateInfo {
/// URL under which the package's changelog can be retrieved
pub change_log_url: String,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// When do we send notifications
pub enum Notify {
/// Never send notification
Never,
/// Send notifications for failed and sucessful jobs
Always,
/// Send notifications for failed jobs only
Error,
}

View File

@ -1,4 +1,4 @@
use std::sync::{Arc};
use std::sync::Arc;
use std::path::{Path, PathBuf};
use std::os::unix::io::AsRawFd;
@ -74,6 +74,10 @@ async fn run() -> Result<(), Error> {
bail!("unable to inititialize syslog - {}", err);
}
// Note: To debug early connection error use
// PROXMOX_DEBUG=1 ./target/release/proxmox-backup-proxy
let debug = std::env::var("PROXMOX_DEBUG").is_ok();
let _ = public_auth_key(); // load with lazy_static
let _ = csrf_secret(); // load with lazy_static
@ -116,25 +120,12 @@ async fn run() -> Result<(), Error> {
let server = daemon::create_daemon(
([0,0,0,0,0,0,0,0], 8007).into(),
|listener, ready| {
let connections = proxmox_backup::tools::async_io::StaticIncoming::from(listener)
.map_err(Error::from)
.try_filter_map(move |(sock, _addr)| {
let acceptor = Arc::clone(&acceptor);
async move {
sock.set_nodelay(true).unwrap();
let _ = set_tcp_keepalive(sock.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
Ok(tokio_openssl::accept(&acceptor, sock)
.await
.ok() // handshake errors aren't be fatal, so return None to filter
)
}
});
let connections = proxmox_backup::tools::async_io::HyperAccept(connections);
let connections = accept_connections(listener, acceptor, debug);
let connections = hyper::server::accept::from_stream(connections);
Ok(ready
.and_then(|_| hyper::Server::builder(connections)
.and_then(|_| hyper::Server::builder(connections)
.serve(rest_server)
.with_graceful_shutdown(server::shutdown_future())
.map_err(Error::from)
@ -170,6 +161,72 @@ async fn run() -> Result<(), Error> {
Ok(())
}
fn accept_connections(
mut listener: tokio::net::TcpListener,
acceptor: Arc<openssl::ssl::SslAcceptor>,
debug: bool,
) -> tokio::sync::mpsc::Receiver<Result<tokio_openssl::SslStream<tokio::net::TcpStream>, Error>> {
const MAX_PENDING_ACCEPTS: usize = 1024;
let (sender, receiver) = tokio::sync::mpsc::channel(MAX_PENDING_ACCEPTS);
let accept_counter = Arc::new(());
tokio::spawn(async move {
loop {
match listener.accept().await {
Err(err) => {
eprintln!("error accepting tcp connection: {}", err);
}
Ok((sock, _addr)) => {
sock.set_nodelay(true).unwrap();
let _ = set_tcp_keepalive(sock.as_raw_fd(), PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
let acceptor = Arc::clone(&acceptor);
let mut sender = sender.clone();
if Arc::strong_count(&accept_counter) > MAX_PENDING_ACCEPTS {
eprintln!("connection rejected - to many open connections");
continue;
}
let accept_counter = accept_counter.clone();
tokio::spawn(async move {
let accept_future = tokio::time::timeout(
Duration::new(10, 0), tokio_openssl::accept(&acceptor, sock));
let result = accept_future.await;
match result {
Ok(Ok(connection)) => {
if let Err(_) = sender.send(Ok(connection)).await {
if debug {
eprintln!("detect closed connection channel");
}
}
}
Ok(Err(err)) => {
if debug {
eprintln!("https handshake failed - {}", err);
}
}
Err(_) => {
if debug {
eprintln!("https handshake timeout");
}
}
}
drop(accept_counter); // decrease reference count
});
}
}
}
});
receiver
}
fn start_stat_generator() {
let abort_future = server::shutdown_future();
let future = Box::pin(run_stat_generator());
@ -524,11 +581,18 @@ async fn schedule_task_log_rotate() {
let mut logrotate = LogRotate::new(buildcfg::API_ACCESS_LOG_FN, true)
.ok_or_else(|| format_err!("could not get API access log file names"))?;
let has_rotated = logrotate.rotate(max_size, None, Some(max_files))?;
if has_rotated {
if logrotate.rotate(max_size, None, Some(max_files))? {
println!("rotated access log, telling daemons to re-open log file");
proxmox_backup::tools::runtime::block_on(command_reopen_logfiles())?;
worker.log(format!("API access log was rotated"));
} else {
worker.log(format!("API access log was not rotated"));
}
let mut logrotate = LogRotate::new(buildcfg::API_AUTH_LOG_FN, true)
.ok_or_else(|| format_err!("could not get API auth log file names"))?;
if logrotate.rotate(max_size, None, Some(max_files))? {
worker.log(format!("API access log was rotated"));
} else {
worker.log(format!("API access log was not rotated"));
@ -555,16 +619,22 @@ async fn command_reopen_logfiles() -> Result<(), Error> {
// only care about the most recent daemon instance for each, proxy & api, as other older ones
// should not respond to new requests anyway, but only finish their current one and then exit.
let sock = server::our_ctrl_sock();
server::send_command(sock, serde_json::json!({
let f1 = server::send_command(sock, serde_json::json!({
"command": "api-access-log-reopen",
})).await?;
}));
let pid = server::read_pid(buildcfg::PROXMOX_BACKUP_API_PID_FN)?;
let sock = server::ctrl_sock_from_pid(pid);
server::send_command(sock, serde_json::json!({
let f2 = server::send_command(sock, serde_json::json!({
"command": "api-access-log-reopen",
})).await?;
Ok(())
}));
match futures::join!(f1, f2) {
(Err(e1), Err(e2)) => Err(format_err!("reopen commands failed, proxy: {}; api: {}", e1, e2)),
(Err(e1), Ok(_)) => Err(format_err!("reopen commands failed, proxy: {}", e1)),
(Ok(_), Err(e2)) => Err(format_err!("reopen commands failed, api: {}", e2)),
_ => Ok(()),
}
}
async fn run_stat_generator() {

View File

@ -16,9 +16,14 @@ pub const PROXMOX_BACKUP_RUN_DIR: &str = PROXMOX_BACKUP_RUN_DIR_M!();
/// namespaced directory for persistent logging
pub const PROXMOX_BACKUP_LOG_DIR: &str = PROXMOX_BACKUP_LOG_DIR_M!();
/// logfile for all API reuests handled by the proxy and privileged API daemons
/// logfile for all API reuests handled by the proxy and privileged API daemons. Note that not all
/// failed logins can be logged here with full information, use the auth log for that.
pub const API_ACCESS_LOG_FN: &str = concat!(PROXMOX_BACKUP_LOG_DIR_M!(), "/api/access.log");
/// logfile for any failed authentication, via ticket or via token, and new successfull ticket
/// creations. This file can be useful for fail2ban.
pub const API_AUTH_LOG_FN: &str = concat!(PROXMOX_BACKUP_LOG_DIR_M!(), "/api/auth.log");
/// the PID filename for the unprivileged proxy daemon
pub const PROXMOX_BACKUP_PROXY_PID_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(), "/proxy.pid");

View File

@ -405,6 +405,9 @@ impl HttpClient {
///
/// Login is done on demand, so this is only required if you need
/// access to authentication data in 'AuthInfo'.
///
/// Note: tickets a periodially re-newed, so one can use this
/// to query changed ticket.
pub async fn login(&self) -> Result<AuthInfo, Error> {
if let Some(future) = &self.first_auth {
future.listen().await?;

View File

@ -103,7 +103,7 @@ async fn pull_index_chunks<I: IndexFile>(
let bytes = bytes.load(Ordering::SeqCst);
worker.log(format!("downloaded {} bytes ({} MiB/s)", bytes, (bytes as f64)/(1024.0*1024.0*elapsed)));
worker.log(format!("downloaded {} bytes ({:.2} MiB/s)", bytes, (bytes as f64)/(1024.0*1024.0*elapsed)));
Ok(())
}
@ -410,7 +410,8 @@ pub async fn pull_group(
list.sort_unstable_by(|a, b| a.backup_time.cmp(&b.backup_time));
let auth_info = client.login().await?;
client.login().await?; // make sure auth is complete
let fingerprint = client.fingerprint();
let last_sync = tgt_store.last_successful_backup(group)?;
@ -447,6 +448,9 @@ pub async fn pull_group(
if last_sync_time > backup_time { continue; }
}
// get updated auth_info (new tickets)
let auth_info = client.login().await?;
let options = HttpClientOptions::new()
.password(Some(auth_info.ticket.clone()))
.fingerprint(fingerprint.clone());

View File

@ -32,6 +32,14 @@ pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name").schema()
path: {
schema: DIR_NAME_SCHEMA,
},
"notify-user": {
optional: true,
type: Userid,
},
"notify": {
optional: true,
type: Notify,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
@ -101,6 +109,12 @@ pub struct DataStoreConfig {
/// If enabled, all backups will be verified right after completion.
#[serde(skip_serializing_if="Option::is_none")]
pub verify_new: Option<bool>,
/// Send job email notification to this user
#[serde(skip_serializing_if="Option::is_none")]
pub notify_user: Option<Userid>,
/// Send notification only for job errors
#[serde(skip_serializing_if="Option::is_none")]
pub notify: Option<Notify>,
}
fn init() -> SectionConfig {

View File

@ -6,12 +6,14 @@ use handlebars::{Handlebars, Helper, Context, RenderError, RenderContext, Output
use proxmox::tools::email::sendmail;
use crate::{
config::datastore::DataStoreConfig,
config::verify::VerificationJobConfig,
config::sync::SyncJobConfig,
api2::types::{
APTUpdateInfo,
GarbageCollectionStatus,
Userid,
Notify,
},
tools::format::HumanByte,
};
@ -188,11 +190,16 @@ fn send_job_status_mail(
pub fn send_gc_status(
email: &str,
notify: Notify,
datastore: &str,
status: &GarbageCollectionStatus,
result: &Result<(), Error>,
) -> Result<(), Error> {
if notify == Notify::Never || (result.is_ok() && notify == Notify::Error) {
return Ok(());
}
let (fqdn, port) = get_server_url();
let mut data = json!({
"datastore": datastore,
@ -237,10 +244,15 @@ pub fn send_gc_status(
pub fn send_verify_status(
email: &str,
notify: Notify,
job: VerificationJobConfig,
result: &Result<Vec<String>, Error>,
) -> Result<(), Error> {
if notify == Notify::Never || (result.is_ok() && notify == Notify::Error) {
return Ok(());
}
let (fqdn, port) = get_server_url();
let mut data = json!({
"job": job,
@ -280,10 +292,15 @@ pub fn send_verify_status(
pub fn send_sync_status(
email: &str,
notify: Notify,
job: &SyncJobConfig,
result: &Result<(), Error>,
) -> Result<(), Error> {
if notify == Notify::Never || (result.is_ok() && notify == Notify::Error) {
return Ok(());
}
let (fqdn, port) = get_server_url();
let mut data = json!({
"job": job,
@ -362,7 +379,7 @@ pub fn send_updates_available(
/// Lookup users email address
///
/// For "backup@pam", this returns the address from "root@pam".
pub fn lookup_user_email(userid: &Userid) -> Option<String> {
fn lookup_user_email(userid: &Userid) -> Option<String> {
use crate::config::user::{self, User};
@ -379,6 +396,36 @@ pub fn lookup_user_email(userid: &Userid) -> Option<String> {
None
}
/// Lookup Datastore notify settings
pub fn lookup_datastore_notify_settings(
store: &str,
) -> (Option<String>, Notify) {
let mut notify = Notify::Always;
let mut email = None;
let (config, _digest) = match crate::config::datastore::config() {
Ok(result) => result,
Err(_) => return (email, notify),
};
let config: DataStoreConfig = match config.lookup("datastore", store) {
Ok(result) => result,
Err(_) => return (email, notify),
};
email = match config.notify_user {
Some(ref userid) => lookup_user_email(userid),
None => lookup_user_email(Userid::backup_userid()),
};
if let Some(value) = config.notify {
notify = value;
}
(email, notify)
}
// Handlerbar helper functions
fn handlebars_humam_bytes_helper(

View File

@ -17,10 +17,10 @@ pub fn do_garbage_collection_job(
to_stdout: bool,
) -> Result<String, Error> {
let email = crate::server::lookup_user_email(auth_id.user());
let store = datastore.name().to_string();
let (email, notify) = crate::server::lookup_datastore_notify_settings(&store);
let worker_type = job.jobtype().to_string();
let upid_str = WorkerTask::new_thread(
&worker_type,
@ -50,7 +50,7 @@ pub fn do_garbage_collection_job(
if let Some(email) = email {
let gc_status = datastore.last_gc_status();
if let Err(err) = crate::server::send_gc_status(&email, &store, &gc_status, &result) {
if let Err(err) = crate::server::send_gc_status(&email, notify, &store, &gc_status, &result) {
eprintln!("send gc notification failed: {}", err);
}
}

View File

@ -164,6 +164,15 @@ fn log_response(
));
}
}
pub fn auth_logger() -> Result<FileLogger, Error> {
let logger_options = tools::FileLogOptions {
append: true,
prefix_time: true,
owned_by_backup: true,
..Default::default()
};
FileLogger::new(crate::buildcfg::API_AUTH_LOG_FN, logger_options)
}
fn get_proxied_peer(headers: &HeaderMap) -> Option<std::net::SocketAddr> {
lazy_static! {
@ -687,6 +696,10 @@ async fn handle_request(
match auth_result {
Ok(authid) => rpcenv.set_auth_id(Some(authid.to_string())),
Err(err) => {
let peer = peer.ip();
auth_logger()?
.log(format!("authentication failure; rhost={} msg={}", peer, err));
// always delay unauthorized calls by 3 seconds (from start of request)
let err = http_err!(UNAUTHORIZED, "authentication failed - {}", err);
tokio::time::delay_until(Instant::from_std(delay_unauth_time)).await;

View File

@ -48,7 +48,7 @@ pub fn do_verification_job(
}
};
let email = crate::server::lookup_user_email(auth_id.user());
let (email, notify) = crate::server::lookup_datastore_notify_settings(&verification_job.store);
let job_id = job.jobname().to_string();
let worker_type = job.jobtype().to_string();
@ -84,7 +84,7 @@ pub fn do_verification_job(
}
if let Some(email) = email {
if let Err(err) = crate::server::send_verify_status(&email, verification_job, &result) {
if let Err(err) = crate::server::send_verify_status(&email, notify, verification_job, &result) {
eprintln!("send verify notification failed: {}", err);
}
}

View File

@ -55,7 +55,9 @@ pub async fn worker_is_active(upid: &UPID) -> Result<bool, Error> {
let sock = server::ctrl_sock_from_pid(upid.pid);
let cmd = json!({
"command": "worker-task-status",
"upid": upid.to_string(),
"args": {
"upid": upid.to_string(),
},
});
let status = super::send_command(sock, cmd).await?;
@ -127,7 +129,9 @@ pub async fn abort_worker(upid: UPID) -> Result<(), Error> {
let sock = server::ctrl_sock_from_pid(upid.pid);
let cmd = json!({
"command": "worker-task-abort",
"upid": upid.to_string(),
"args": {
"upid": upid.to_string(),
},
});
super::send_command(sock, cmd).map_ok(|_| ()).await
}

View File

@ -92,15 +92,15 @@ impl LogRotate {
if filenames.is_empty() {
return Ok(()); // no file means nothing to rotate
}
let count = filenames.len() + 1;
let mut next_filename = self.base_path.clone().canonicalize()?.into_os_string();
next_filename.push(format!(".{}", filenames.len()));
if self.compress {
if self.compress && count > 2 {
next_filename.push(".zst");
}
filenames.push(PathBuf::from(next_filename));
let count = filenames.len();
for i in (0..count-1).rev() {
if self.compress

View File

@ -68,8 +68,10 @@ Ext.define('PBS.DataStoreInfo', {
let gcstatus = store.getById('gc-status').data.value;
let dedup = (gcstatus['index-data-bytes'] || 0)/
(gcstatus['disk-bytes'] || Infinity);
let dedup = 1.0;
if (gcstatus['disk-bytes'] > 0) {
dedup = (gcstatus['index-data-bytes'] || 0)/gcstatus['disk-bytes'];
}
let countstext = function(count) {
count = count || {};

View File

@ -37,6 +37,7 @@ JSSRC= \
dashboard/RunningTasks.js \
dashboard/TaskSummary.js \
panel/Tasks.js \
panel/XtermJsConsole.js \
Utils.js \
AccessControlPanel.js \
ZFSList.js \

View File

@ -55,6 +55,12 @@ Ext.define('PBS.store.NavigationStore', {
expanded: true,
leaf: false,
children: [
{
text: gettext('Shell'),
iconCls: 'fa fa-terminal',
path: 'pbsXtermJsConsole',
leaf: true,
},
{
text: gettext('Disks'),
iconCls: 'fa fa-hdd-o',

View File

@ -101,7 +101,7 @@ Ext.define('PBS.Utils', {
dircreate: [gettext('Directory Storage'), gettext('Create')],
dirremove: [gettext('Directory'), gettext('Remove')],
garbage_collection: ['Datastore', gettext('Garbage collect')],
logrotate: [gettext('Log'), gettext('Rotation')],
logrotate: [null, gettext('Log Rotation')],
prune: (type, id) => PBS.Utils.render_datastore_worker_id(id, gettext('Prune')),
reader: (type, id) => PBS.Utils.render_datastore_worker_id(id, gettext('Read objects')),
sync: ['Datastore', gettext('Remote Sync')],

View File

@ -0,0 +1,25 @@
Ext.define('PBS.panel.XtermJsConsole', {
extend: 'Ext.panel.Panel',
alias: 'widget.pbsXtermJsConsole',
layout: 'fit',
items: [
{
xtype: 'uxiframe',
itemId: 'iframe',
},
],
listeners: {
'afterrender': function() {
let me = this;
let params = {
console: 'shell',
node: 'localhost',
xtermjs: 1,
};
me.getComponent('iframe').load('/?' + Ext.Object.toQueryString(params));
},
},
});

View File

@ -12,6 +12,7 @@ Ext.define('PBS.window.SyncJobEdit', {
subject: gettext('SyncJob'),
fieldDefaults: { labelWidth: 120 },
defaultFocus: 'proxmoxtextfield[name=comment]',
cbindData: function(initialConfig) {
let me = this;
@ -23,6 +24,7 @@ Ext.define('PBS.window.SyncJobEdit', {
me.url = id ? `${baseurl}/${id}` : baseurl;
me.method = id ? 'PUT' : 'POST';
me.autoLoad = !!id;
me.scheduleValue = id ? null : 'hourly';
return { };
},
@ -47,6 +49,32 @@ Ext.define('PBS.window.SyncJobEdit', {
value: '{datastore}',
},
},
{
fieldLabel: gettext('Local Owner'),
xtype: 'pbsUserSelector',
name: 'owner',
allowBlank: true,
value: null,
emptyText: 'backup@pam',
skipEmptyText: true,
cbind: {
deleteEmpty: '{!isCreate}',
},
},
{
fieldLabel: gettext('Remove vanished'),
xtype: 'proxmoxcheckbox',
name: 'remove-vanished',
autoEl: {
tag: 'div',
'data-qtip': gettext('Remove snapshots from local datastore if they vanished from source datastore?'),
},
uncheckedValue: false,
value: false,
},
],
column2: [
{
fieldLabel: gettext('Source Remote'),
xtype: 'pbsRemoteSelector',
@ -59,44 +87,14 @@ Ext.define('PBS.window.SyncJobEdit', {
allowBlank: false,
name: 'remote-store',
},
],
column2: [
{
fieldLabel: gettext('Owner'),
xtype: 'pbsUserSelector',
name: 'owner',
allowBlank: true,
emptyText: 'backup@pam',
getSubmitData: function() {
let me = this;
let name = me.getName();
let val = me.getSubmitValue();
let data = {};
if (val === null || val === "") {
data.delete = name;
} else {
data[name] = val;
}
return data;
},
},
{
fieldLabel: gettext('Remove vanished'),
xtype: 'proxmoxcheckbox',
name: 'remove-vanished',
uncheckedValue: false,
value: false,
},
{
fieldLabel: gettext('Schedule'),
fieldLabel: gettext('Sync Schedule'),
xtype: 'pbsCalendarEvent',
name: 'schedule',
value: 'hourly',
emptyText: gettext('none (disabled)'),
cbind: {
deleteEmpty: '{!isCreate}',
value: '{scheduleValue}',
},
},
],