file-restore-daemon: limit concurrent download calls
While the issue with vsock packets starving kernel memory is mostly worked around by the '64k -> 4k buffer' patch in 'proxmox-backup-restore-image', let's be safe and also limit the number of concurrent transfers. 8 downloads per VM seems like a fair value. Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
This commit is contained in:
parent
1fde4167ea
commit
3a804a8a20
@ -6,6 +6,7 @@ use hyper::{header, Body, Response, StatusCode};
|
|||||||
use log::error;
|
use log::error;
|
||||||
use pathpatterns::{MatchEntry, MatchPattern, MatchType, Pattern};
|
use pathpatterns::{MatchEntry, MatchPattern, MatchType, Pattern};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
use tokio::sync::Semaphore;
|
||||||
|
|
||||||
use std::ffi::OsStr;
|
use std::ffi::OsStr;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
@ -41,6 +42,8 @@ pub const ROUTER: Router = Router::new()
|
|||||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||||
.subdirs(SUBDIRS);
|
.subdirs(SUBDIRS);
|
||||||
|
|
||||||
|
static DOWNLOAD_SEM: Semaphore = Semaphore::const_new(8);
|
||||||
|
|
||||||
fn read_uptime() -> Result<f32, Error> {
|
fn read_uptime() -> Result<f32, Error> {
|
||||||
let uptime = fs::read_to_string("/proc/uptime")?;
|
let uptime = fs::read_to_string("/proc/uptime")?;
|
||||||
// unwrap the Option, if /proc/uptime is empty we have bigger problems
|
// unwrap the Option, if /proc/uptime is empty we have bigger problems
|
||||||
@ -252,6 +255,12 @@ fn extract(
|
|||||||
let _inhibitor = watchdog_inhibit();
|
let _inhibitor = watchdog_inhibit();
|
||||||
async move {
|
async move {
|
||||||
let _inhibitor = _inhibitor;
|
let _inhibitor = _inhibitor;
|
||||||
|
|
||||||
|
let _permit = match DOWNLOAD_SEM.try_acquire() {
|
||||||
|
Ok(permit) => permit,
|
||||||
|
Err(_) => bail!("maximum concurrent download limit reached, please wait for another restore to finish before attempting a new one"),
|
||||||
|
};
|
||||||
|
|
||||||
let path = tools::required_string_param(¶m, "path")?;
|
let path = tools::required_string_param(¶m, "path")?;
|
||||||
let mut path = base64::decode(path)?;
|
let mut path = base64::decode(path)?;
|
||||||
if let Some(b'/') = path.last() {
|
if let Some(b'/') = path.last() {
|
||||||
@ -286,6 +295,7 @@ fn extract(
|
|||||||
if pxar {
|
if pxar {
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let _inhibitor = _inhibitor;
|
let _inhibitor = _inhibitor;
|
||||||
|
let _permit = _permit;
|
||||||
let result = async move {
|
let result = async move {
|
||||||
// pxar always expects a directory as it's root, so to accommodate files as
|
// pxar always expects a directory as it's root, so to accommodate files as
|
||||||
// well we encode the parent dir with a filter only matching the target instead
|
// well we encode the parent dir with a filter only matching the target instead
|
||||||
@ -344,6 +354,7 @@ fn extract(
|
|||||||
} else {
|
} else {
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let _inhibitor = _inhibitor;
|
let _inhibitor = _inhibitor;
|
||||||
|
let _permit = _permit;
|
||||||
let result = async move {
|
let result = async move {
|
||||||
if vm_path.is_dir() {
|
if vm_path.is_dir() {
|
||||||
zip_directory(&mut writer, &vm_path).await?;
|
zip_directory(&mut writer, &vm_path).await?;
|
||||||
|
Loading…
Reference in New Issue
Block a user