Compare commits

..

6 Commits

Author SHA1 Message Date
beaa683a52 bump version to 0.8.9-1
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-07-24 11:24:56 +02:00
33a88dafb9 server/state: add spawn_internal_task and use it for websockets
is a helper to spawn an internal tokio task without it showing up
in the task list

it is still tracked for reload and notifies the last_worker_listeners

this enables the console to survive a reload of proxmox-backup-proxy

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-07-24 11:17:33 +02:00
224c65f8de termproxy: let users stop the termproxy task
for that we have to do a select on the workers abort_future

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2020-07-24 11:17:33 +02:00
f2b4b4b9fe fix 2885: bail on duplicate backup target
Signed-off-by: Stoiko Ivanov <s.ivanov@proxmox.com>
2020-07-24 11:08:56 +02:00
ea9e559fc4 client: log archive upload duration more accurate, fix grammar
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-07-24 10:15:28 +02:00
0cf14984cc client: avoid division by zero in avg speed calculation, be more accurate
using micros vs. as_secs_f64 allows to have it calculated as usize
bytes, easier to handle - this was also used when it still lived in
upload_chunk_info_stream

Co-authored-by: Stoiko Ivanov <s.ivanov@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-07-24 10:14:40 +02:00
6 changed files with 72 additions and 26 deletions

View File

@ -1,6 +1,6 @@
[package]
name = "proxmox-backup"
version = "0.8.8"
version = "0.8.9"
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
edition = "2018"
license = "AGPL-3"

11
debian/changelog vendored
View File

@ -1,3 +1,14 @@
rust-proxmox-backup (0.8.9-1) unstable; urgency=medium
* improve termprocy (console) behavior on updating proxmox-backup-server and
other daemon restarts
* client: improve upload log output and speed calculation
* fix #2885: client upload: bail on duplicate backup targets
-- Proxmox Support Team <support@proxmox.com> Fri, 24 Jul 2020 11:24:07 +0200
rust-proxmox-backup (0.8.8-1) unstable; urgency=medium
* pxar: .pxarexclude: match behavior from absolute paths to the one described

View File

@ -4,7 +4,7 @@ use std::os::unix::io::AsRawFd;
use anyhow::{bail, format_err, Error};
use futures::{
future::{FutureExt, TryFutureExt},
try_join,
select,
};
use hyper::body::Body;
use hyper::http::request::Parts;
@ -169,9 +169,10 @@ async fn termproxy(
let mut cmd = tokio::process::Command::new("/usr/bin/termproxy");
cmd.args(&arguments);
cmd.stdout(std::process::Stdio::piped());
cmd.stderr(std::process::Stdio::piped());
cmd.args(&arguments)
.kill_on_drop(true)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped());
let mut child = cmd.spawn().expect("error executing termproxy");
@ -184,7 +185,7 @@ async fn termproxy(
while let Some(line) = reader.next_line().await? {
worker_stdout.log(line);
}
Ok(())
Ok::<(), Error>(())
};
let worker_stderr = worker.clone();
@ -193,19 +194,25 @@ async fn termproxy(
while let Some(line) = reader.next_line().await? {
worker_stderr.warn(line);
}
Ok(())
Ok::<(), Error>(())
};
let (exit_code, _, _) = try_join!(child, stdout_fut, stderr_fut)?;
select!{
res = child.fuse() => {
let exit_code = res?;
if !exit_code.success() {
match exit_code.code() {
Some(code) => bail!("termproxy exited with {}", code),
None => bail!("termproxy exited by signal"),
}
}
Ok(())
},
res = stdout_fut.fuse() => res,
res = stderr_fut.fuse() => res,
res = worker.abort_future().fuse() => res.map_err(Error::from),
}
},
)?;
Ok(json!({
@ -260,7 +267,7 @@ fn upgrade_to_websocket(
let (ws, response) = WebSocket::new(parts.headers)?;
tokio::spawn(async move {
crate::server::spawn_internal_task(async move {
let conn: Upgraded = match req_body.on_upgrade().map_err(Error::from).await {
Ok(upgraded) => upgraded,
_ => bail!("error"),

View File

@ -935,12 +935,18 @@ async fn create_backup(
}
let mut upload_list = vec![];
let mut target_set = HashSet::new();
for backupspec in backupspec_list {
let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
let filename = &spec.config_string;
let target = &spec.archive_name;
if target_set.contains(target) {
bail!("got target twice: '{}'", target);
}
target_set.insert(target.to_string());
use std::os::unix::fs::FileTypeExt;
let metadata = std::fs::metadata(filename)

View File

@ -264,9 +264,9 @@ impl BackupWriter {
crate::tools::format::strip_server_file_expenstion(archive_name.clone())
};
if archive_name != CATALOG_NAME {
let speed: HumanByte = (uploaded / (duration.as_secs() as usize)).into();
let speed: HumanByte = ((uploaded * 1_000_000) / (duration.as_micros() as usize)).into();
let uploaded: HumanByte = uploaded.into();
println!("{}: had to upload {} from {} in {}s, avgerage speed {}/s).", archive, uploaded, vsize_h, duration.as_secs(), speed);
println!("{}: had to upload {} of {} in {:.2}s, avgerage speed {}/s).", archive, uploaded, vsize_h, duration.as_secs_f64(), speed);
} else {
println!("Uploaded backup catalog ({})", vsize_h);
}

View File

@ -19,6 +19,7 @@ pub struct ServerState {
pub shutdown_listeners: BroadcastData<()>,
pub last_worker_listeners: BroadcastData<()>,
pub worker_count: usize,
pub task_count: usize,
pub reload_request: bool,
}
@ -28,6 +29,7 @@ lazy_static! {
shutdown_listeners: BroadcastData::new(),
last_worker_listeners: BroadcastData::new(),
worker_count: 0,
task_count: 0,
reload_request: false,
});
}
@ -101,20 +103,40 @@ pub fn last_worker_future() -> impl Future<Output = Result<(), Error>> {
}
pub fn set_worker_count(count: usize) {
let mut data = SERVER_STATE.lock().unwrap();
data.worker_count = count;
SERVER_STATE.lock().unwrap().worker_count = count;
if !(data.mode == ServerMode::Shutdown && data.worker_count == 0) { return; }
data.last_worker_listeners.notify_listeners(Ok(()));
check_last_worker();
}
pub fn check_last_worker() {
let mut data = SERVER_STATE.lock().unwrap();
if !(data.mode == ServerMode::Shutdown && data.worker_count == 0) { return; }
if !(data.mode == ServerMode::Shutdown && data.worker_count == 0 && data.task_count == 0) { return; }
data.last_worker_listeners.notify_listeners(Ok(()));
}
/// Spawns a tokio task that will be tracked for reload
/// and if it is finished, notify the last_worker_listener if we
/// are in shutdown mode
pub fn spawn_internal_task<T>(task: T)
where
T: Future + Send + 'static,
T::Output: Send + 'static,
{
let mut data = SERVER_STATE.lock().unwrap();
data.task_count += 1;
tokio::spawn(async move {
let _ = tokio::spawn(task).await; // ignore errors
{ // drop mutex
let mut data = SERVER_STATE.lock().unwrap();
if data.task_count > 0 {
data.task_count -= 1;
}
}
check_last_worker();
});
}