file-restore-daemon: limit concurrent download calls

While the issue with vsock packets starving kernel memory is mostly
worked around by the '64k -> 4k buffer' patch in
'proxmox-backup-restore-image', let's be safe and also limit the number
of concurrent transfers. 8 downloads per VM seems like a fair value.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
This commit is contained in:
Stefan Reiter 2021-05-06 17:26:22 +02:00 committed by Thomas Lamprecht
parent 1fde4167ea
commit 3a804a8a20

View File

@ -6,6 +6,7 @@ use hyper::{header, Body, Response, StatusCode};
use log::error;
use pathpatterns::{MatchEntry, MatchPattern, MatchType, Pattern};
use serde_json::Value;
use tokio::sync::Semaphore;
use std::ffi::OsStr;
use std::fs;
@ -41,6 +42,8 @@ pub const ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SUBDIRS))
.subdirs(SUBDIRS);
static DOWNLOAD_SEM: Semaphore = Semaphore::const_new(8);
fn read_uptime() -> Result<f32, Error> {
let uptime = fs::read_to_string("/proc/uptime")?;
// unwrap the Option, if /proc/uptime is empty we have bigger problems
@ -252,6 +255,12 @@ fn extract(
let _inhibitor = watchdog_inhibit();
async move {
let _inhibitor = _inhibitor;
let _permit = match DOWNLOAD_SEM.try_acquire() {
Ok(permit) => permit,
Err(_) => bail!("maximum concurrent download limit reached, please wait for another restore to finish before attempting a new one"),
};
let path = tools::required_string_param(&param, "path")?;
let mut path = base64::decode(path)?;
if let Some(b'/') = path.last() {
@ -286,6 +295,7 @@ fn extract(
if pxar {
tokio::spawn(async move {
let _inhibitor = _inhibitor;
let _permit = _permit;
let result = async move {
// pxar always expects a directory as it's root, so to accommodate files as
// well we encode the parent dir with a filter only matching the target instead
@ -344,6 +354,7 @@ fn extract(
} else {
tokio::spawn(async move {
let _inhibitor = _inhibitor;
let _permit = _permit;
let result = async move {
if vm_path.is_dir() {
zip_directory(&mut writer, &vm_path).await?;