use new proxmox-sys crate
Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>
This commit is contained in:
parent
860eaec58f
commit
d5790a9f27
|
@ -106,6 +106,7 @@ proxmox-tfa = { version = "1.3", features = [ "api", "api-types" ] }
|
||||||
proxmox-time = "1"
|
proxmox-time = "1"
|
||||||
proxmox-uuid = "1"
|
proxmox-uuid = "1"
|
||||||
proxmox-shared-memory = "0.1.1"
|
proxmox-shared-memory = "0.1.1"
|
||||||
|
proxmox-sys = "0.1"
|
||||||
|
|
||||||
proxmox-acme-rs = "0.3"
|
proxmox-acme-rs = "0.3"
|
||||||
proxmox-apt = "0.8.0"
|
proxmox-apt = "0.8.0"
|
||||||
|
|
|
@ -24,6 +24,7 @@ proxmox-schema = "1"
|
||||||
proxmox-section-config = "1"
|
proxmox-section-config = "1"
|
||||||
proxmox-time = "1"
|
proxmox-time = "1"
|
||||||
proxmox-shared-memory = "0.1.1"
|
proxmox-shared-memory = "0.1.1"
|
||||||
|
proxmox-sys = "0.1"
|
||||||
|
|
||||||
pbs-api-types = { path = "../pbs-api-types" }
|
pbs-api-types = { path = "../pbs-api-types" }
|
||||||
pbs-buildcfg = { path = "../pbs-buildcfg" }
|
pbs-buildcfg = { path = "../pbs-buildcfg" }
|
||||||
|
|
|
@ -58,7 +58,7 @@ pub fn verify_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
|
||||||
let data = read_file()?;
|
let data = read_file()?;
|
||||||
match data.get(tokenid) {
|
match data.get(tokenid) {
|
||||||
Some(hashed_secret) => {
|
Some(hashed_secret) => {
|
||||||
pbs_tools::crypt::verify_crypt_pw(secret, &hashed_secret)
|
proxmox_sys::crypt::verify_crypt_pw(secret, &hashed_secret)
|
||||||
},
|
},
|
||||||
None => bail!("invalid API token"),
|
None => bail!("invalid API token"),
|
||||||
}
|
}
|
||||||
|
@ -73,7 +73,7 @@ pub fn set_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
|
||||||
let _guard = lock_config()?;
|
let _guard = lock_config()?;
|
||||||
|
|
||||||
let mut data = read_file()?;
|
let mut data = read_file()?;
|
||||||
let hashed_secret = pbs_tools::crypt::encrypt_pw(secret)?;
|
let hashed_secret = proxmox_sys::crypt::encrypt_pw(secret)?;
|
||||||
data.insert(tokenid.clone(), hashed_secret);
|
data.insert(tokenid.clone(), hashed_secret);
|
||||||
write_file(data)?;
|
write_file(data)?;
|
||||||
|
|
||||||
|
|
|
@ -32,6 +32,7 @@ proxmox-lang = "1"
|
||||||
proxmox-schema = { version = "1", features = [ "api-macro" ] }
|
proxmox-schema = { version = "1", features = [ "api-macro" ] }
|
||||||
proxmox-time = "1"
|
proxmox-time = "1"
|
||||||
proxmox-uuid = "1"
|
proxmox-uuid = "1"
|
||||||
|
proxmox-sys = "0.1"
|
||||||
|
|
||||||
pbs-api-types = { path = "../pbs-api-types" }
|
pbs-api-types = { path = "../pbs-api-types" }
|
||||||
pbs-tools = { path = "../pbs-tools" }
|
pbs-tools = { path = "../pbs-tools" }
|
||||||
|
|
|
@ -6,10 +6,11 @@ use std::sync::{Arc, Mutex};
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
use proxmox::tools::fs::{CreateOptions, create_path, create_dir};
|
use proxmox::tools::fs::{CreateOptions, create_path, create_dir};
|
||||||
|
use proxmox_sys::process_locker::{ProcessLocker, ProcessLockSharedGuard, ProcessLockExclusiveGuard};
|
||||||
|
use proxmox_sys::worker_task_context::WorkerTaskContext;
|
||||||
|
use proxmox_sys::task_log;
|
||||||
use pbs_api_types::GarbageCollectionStatus;
|
use pbs_api_types::GarbageCollectionStatus;
|
||||||
use pbs_tools::process_locker::{self, ProcessLocker};
|
|
||||||
use pbs_tools::{task_log, task::WorkerTaskContext};
|
|
||||||
|
|
||||||
use crate::DataBlob;
|
use crate::DataBlob;
|
||||||
|
|
||||||
|
@ -459,11 +460,11 @@ impl ChunkStore {
|
||||||
self.base.clone()
|
self.base.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_shared_lock(&self) -> Result<process_locker::ProcessLockSharedGuard, Error> {
|
pub fn try_shared_lock(&self) -> Result<ProcessLockSharedGuard, Error> {
|
||||||
ProcessLocker::try_shared_lock(self.locker.clone())
|
ProcessLocker::try_shared_lock(self.locker.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_exclusive_lock(&self) -> Result<process_locker::ProcessLockExclusiveGuard, Error> {
|
pub fn try_exclusive_lock(&self) -> Result<ProcessLockExclusiveGuard, Error> {
|
||||||
ProcessLocker::try_exclusive_lock(self.locker.clone())
|
ProcessLocker::try_exclusive_lock(self.locker.clone())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,12 +10,13 @@ use anyhow::{bail, format_err, Error};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
|
|
||||||
use proxmox::tools::fs::{replace_file, file_read_optional_string, CreateOptions};
|
use proxmox::tools::fs::{replace_file, file_read_optional_string, CreateOptions};
|
||||||
|
use proxmox_sys::process_locker::ProcessLockSharedGuard;
|
||||||
|
use proxmox_sys::worker_task_context::WorkerTaskContext;
|
||||||
|
use proxmox_sys::{task_log, task_warn};
|
||||||
|
|
||||||
use pbs_api_types::{UPID, DataStoreConfig, Authid, GarbageCollectionStatus};
|
use pbs_api_types::{UPID, DataStoreConfig, Authid, GarbageCollectionStatus};
|
||||||
use pbs_tools::format::HumanByte;
|
use pbs_tools::format::HumanByte;
|
||||||
use pbs_tools::fs::{lock_dir_noblock, DirLockGuard};
|
use pbs_tools::fs::{lock_dir_noblock, DirLockGuard};
|
||||||
use pbs_tools::process_locker::ProcessLockSharedGuard;
|
|
||||||
use pbs_tools::{task_log, task_warn, task::WorkerTaskContext};
|
|
||||||
use pbs_config::{open_backup_lockfile, BackupLockGuard};
|
use pbs_config::{open_backup_lockfile, BackupLockGuard};
|
||||||
|
|
||||||
use crate::DataBlob;
|
use crate::DataBlob;
|
||||||
|
|
|
@ -12,10 +12,10 @@ use anyhow::{bail, format_err, Error};
|
||||||
use proxmox::tools::mmap::Mmap;
|
use proxmox::tools::mmap::Mmap;
|
||||||
use proxmox_io::ReadExt;
|
use proxmox_io::ReadExt;
|
||||||
use proxmox_uuid::Uuid;
|
use proxmox_uuid::Uuid;
|
||||||
|
use proxmox_sys::process_locker::ProcessLockSharedGuard;
|
||||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||||
|
|
||||||
use pbs_tools::lru_cache::LruCache;
|
use pbs_tools::lru_cache::LruCache;
|
||||||
use pbs_tools::process_locker::ProcessLockSharedGuard;
|
|
||||||
|
|
||||||
use crate::Chunker;
|
use crate::Chunker;
|
||||||
use crate::chunk_stat::ChunkStat;
|
use crate::chunk_stat::ChunkStat;
|
||||||
|
|
|
@ -7,8 +7,7 @@ use std::io::{Seek, SeekFrom};
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
use pbs_tools::process_locker::ProcessLockSharedGuard;
|
use proxmox_sys::process_locker::ProcessLockSharedGuard;
|
||||||
|
|
||||||
use proxmox_io::ReadExt;
|
use proxmox_io::ReadExt;
|
||||||
use proxmox_uuid::Uuid;
|
use proxmox_uuid::Uuid;
|
||||||
|
|
||||||
|
|
|
@ -1,68 +0,0 @@
|
||||||
use std::ffi::CStr;
|
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
|
||||||
|
|
||||||
// from libcrypt1, 'lib/crypt.h.in'
|
|
||||||
const CRYPT_OUTPUT_SIZE: usize = 384;
|
|
||||||
const CRYPT_MAX_PASSPHRASE_SIZE: usize = 512;
|
|
||||||
const CRYPT_DATA_RESERVED_SIZE: usize = 767;
|
|
||||||
const CRYPT_DATA_INTERNAL_SIZE: usize = 30720;
|
|
||||||
|
|
||||||
#[repr(C)]
|
|
||||||
struct crypt_data {
|
|
||||||
output: [libc::c_char; CRYPT_OUTPUT_SIZE],
|
|
||||||
setting: [libc::c_char; CRYPT_OUTPUT_SIZE],
|
|
||||||
input: [libc::c_char; CRYPT_MAX_PASSPHRASE_SIZE],
|
|
||||||
reserved: [libc::c_char; CRYPT_DATA_RESERVED_SIZE],
|
|
||||||
initialized: libc::c_char,
|
|
||||||
internal: [libc::c_char; CRYPT_DATA_INTERNAL_SIZE],
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn crypt(password: &[u8], salt: &[u8]) -> Result<String, Error> {
|
|
||||||
#[link(name = "crypt")]
|
|
||||||
extern "C" {
|
|
||||||
#[link_name = "crypt_r"]
|
|
||||||
fn __crypt_r(
|
|
||||||
key: *const libc::c_char,
|
|
||||||
salt: *const libc::c_char,
|
|
||||||
data: *mut crypt_data,
|
|
||||||
) -> *mut libc::c_char;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut data: crypt_data = unsafe { std::mem::zeroed() };
|
|
||||||
for (i, c) in salt.iter().take(data.setting.len() - 1).enumerate() {
|
|
||||||
data.setting[i] = *c as libc::c_char;
|
|
||||||
}
|
|
||||||
for (i, c) in password.iter().take(data.input.len() - 1).enumerate() {
|
|
||||||
data.input[i] = *c as libc::c_char;
|
|
||||||
}
|
|
||||||
|
|
||||||
let res = unsafe {
|
|
||||||
let status = __crypt_r(
|
|
||||||
&data.input as *const _,
|
|
||||||
&data.setting as *const _,
|
|
||||||
&mut data as *mut _,
|
|
||||||
);
|
|
||||||
if status.is_null() {
|
|
||||||
bail!("internal error: crypt_r returned null pointer");
|
|
||||||
}
|
|
||||||
CStr::from_ptr(&data.output as *const _)
|
|
||||||
};
|
|
||||||
Ok(String::from(res.to_str()?))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn encrypt_pw(password: &str) -> Result<String, Error> {
|
|
||||||
|
|
||||||
let salt = proxmox::sys::linux::random_data(8)?;
|
|
||||||
let salt = format!("$5${}$", base64::encode_config(&salt, base64::CRYPT));
|
|
||||||
|
|
||||||
crypt(password.as_bytes(), salt.as_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn verify_crypt_pw(password: &str, enc_password: &str) -> Result<(), Error> {
|
|
||||||
let verify = crypt(password.as_bytes(), enc_password.as_bytes())?;
|
|
||||||
if verify != enc_password {
|
|
||||||
bail!("invalid credentials");
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
|
@ -4,23 +4,19 @@ pub mod broadcast_future;
|
||||||
pub mod cert;
|
pub mod cert;
|
||||||
pub mod cli;
|
pub mod cli;
|
||||||
pub mod compression;
|
pub mod compression;
|
||||||
pub mod crypt;
|
|
||||||
pub mod crypt_config;
|
pub mod crypt_config;
|
||||||
pub mod format;
|
pub mod format;
|
||||||
pub mod fs;
|
pub mod fs;
|
||||||
pub mod io;
|
pub mod io;
|
||||||
pub mod json;
|
pub mod json;
|
||||||
pub mod logrotate;
|
|
||||||
pub mod lru_cache;
|
pub mod lru_cache;
|
||||||
pub mod nom;
|
pub mod nom;
|
||||||
pub mod percent_encoding;
|
pub mod percent_encoding;
|
||||||
pub mod process_locker;
|
|
||||||
pub mod sha;
|
pub mod sha;
|
||||||
pub mod str;
|
pub mod str;
|
||||||
pub mod stream;
|
pub mod stream;
|
||||||
pub mod sync;
|
pub mod sync;
|
||||||
pub mod sys;
|
pub mod sys;
|
||||||
pub mod task;
|
|
||||||
pub mod ticket;
|
pub mod ticket;
|
||||||
pub mod tokio;
|
pub mod tokio;
|
||||||
pub mod xattr;
|
pub mod xattr;
|
||||||
|
|
|
@ -1,239 +0,0 @@
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::fs::{File, rename};
|
|
||||||
use std::os::unix::io::{FromRawFd, IntoRawFd};
|
|
||||||
use std::io::Read;
|
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
|
||||||
use nix::unistd;
|
|
||||||
|
|
||||||
use proxmox::tools::fs::{CreateOptions, make_tmp_file};
|
|
||||||
|
|
||||||
/// Used for rotating log files and iterating over them
|
|
||||||
pub struct LogRotate {
|
|
||||||
base_path: PathBuf,
|
|
||||||
compress: bool,
|
|
||||||
|
|
||||||
/// User logs should be reowned to.
|
|
||||||
owner: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LogRotate {
|
|
||||||
/// Creates a new instance if the path given is a valid file name (iow. does not end with ..)
|
|
||||||
/// 'compress' decides if compresses files will be created on rotation, and if it will search
|
|
||||||
/// '.zst' files when iterating
|
|
||||||
///
|
|
||||||
/// By default, newly created files will be owned by the backup user. See [`new_with_user`] for
|
|
||||||
/// a way to opt out of this behavior.
|
|
||||||
pub fn new<P: AsRef<Path>>(
|
|
||||||
path: P,
|
|
||||||
compress: bool,
|
|
||||||
) -> Option<Self> {
|
|
||||||
Self::new_with_user(path, compress, Some(pbs_buildcfg::BACKUP_USER_NAME.to_owned()))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// See [`new`]. Additionally this also takes a user which should by default be used to reown
|
|
||||||
/// new files to.
|
|
||||||
pub fn new_with_user<P: AsRef<Path>>(
|
|
||||||
path: P,
|
|
||||||
compress: bool,
|
|
||||||
owner: Option<String>,
|
|
||||||
) -> Option<Self> {
|
|
||||||
if path.as_ref().file_name().is_some() {
|
|
||||||
Some(Self {
|
|
||||||
base_path: path.as_ref().to_path_buf(),
|
|
||||||
compress,
|
|
||||||
owner,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns an iterator over the logrotated file names that exist
|
|
||||||
pub fn file_names(&self) -> LogRotateFileNames {
|
|
||||||
LogRotateFileNames {
|
|
||||||
base_path: self.base_path.clone(),
|
|
||||||
count: 0,
|
|
||||||
compress: self.compress
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns an iterator over the logrotated file handles
|
|
||||||
pub fn files(&self) -> LogRotateFiles {
|
|
||||||
LogRotateFiles {
|
|
||||||
file_names: self.file_names(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compress(source_path: &PathBuf, target_path: &PathBuf, options: &CreateOptions) -> Result<(), Error> {
|
|
||||||
let mut source = File::open(source_path)?;
|
|
||||||
let (fd, tmp_path) = make_tmp_file(target_path, options.clone())?;
|
|
||||||
let target = unsafe { File::from_raw_fd(fd.into_raw_fd()) };
|
|
||||||
let mut encoder = match zstd::stream::write::Encoder::new(target, 0) {
|
|
||||||
Ok(encoder) => encoder,
|
|
||||||
Err(err) => {
|
|
||||||
let _ = unistd::unlink(&tmp_path);
|
|
||||||
bail!("creating zstd encoder failed - {}", err);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Err(err) = std::io::copy(&mut source, &mut encoder) {
|
|
||||||
let _ = unistd::unlink(&tmp_path);
|
|
||||||
bail!("zstd encoding failed for file {:?} - {}", target_path, err);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Err(err) = encoder.finish() {
|
|
||||||
let _ = unistd::unlink(&tmp_path);
|
|
||||||
bail!("zstd finish failed for file {:?} - {}", target_path, err);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Err(err) = rename(&tmp_path, target_path) {
|
|
||||||
let _ = unistd::unlink(&tmp_path);
|
|
||||||
bail!("rename failed for file {:?} - {}", target_path, err);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Err(err) = unistd::unlink(source_path) {
|
|
||||||
bail!("unlink failed for file {:?} - {}", source_path, err);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Rotates the files up to 'max_files'
|
|
||||||
/// if the 'compress' option was given it will compress the newest file
|
|
||||||
///
|
|
||||||
/// e.g. rotates
|
|
||||||
/// foo.2.zst => foo.3.zst
|
|
||||||
/// foo.1 => foo.2.zst
|
|
||||||
/// foo => foo.1
|
|
||||||
pub fn do_rotate(&mut self, options: CreateOptions, max_files: Option<usize>) -> Result<(), Error> {
|
|
||||||
let mut filenames: Vec<PathBuf> = self.file_names().collect();
|
|
||||||
if filenames.is_empty() {
|
|
||||||
return Ok(()); // no file means nothing to rotate
|
|
||||||
}
|
|
||||||
let count = filenames.len() + 1;
|
|
||||||
|
|
||||||
let mut next_filename = self.base_path.clone().canonicalize()?.into_os_string();
|
|
||||||
next_filename.push(format!(".{}", filenames.len()));
|
|
||||||
if self.compress && count > 2 {
|
|
||||||
next_filename.push(".zst");
|
|
||||||
}
|
|
||||||
|
|
||||||
filenames.push(PathBuf::from(next_filename));
|
|
||||||
|
|
||||||
for i in (0..count-1).rev() {
|
|
||||||
if self.compress
|
|
||||||
&& filenames[i].extension() != Some(std::ffi::OsStr::new("zst"))
|
|
||||||
&& filenames[i+1].extension() == Some(std::ffi::OsStr::new("zst"))
|
|
||||||
{
|
|
||||||
Self::compress(&filenames[i], &filenames[i+1], &options)?;
|
|
||||||
} else {
|
|
||||||
rename(&filenames[i], &filenames[i+1])?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(max_files) = max_files {
|
|
||||||
for file in filenames.iter().skip(max_files) {
|
|
||||||
if let Err(err) = unistd::unlink(file) {
|
|
||||||
eprintln!("could not remove {:?}: {}", &file, err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn rotate(
|
|
||||||
&mut self,
|
|
||||||
max_size: u64,
|
|
||||||
options: Option<CreateOptions>,
|
|
||||||
max_files: Option<usize>
|
|
||||||
) -> Result<bool, Error> {
|
|
||||||
|
|
||||||
let options = match options {
|
|
||||||
Some(options) => options,
|
|
||||||
None => match self.owner.as_deref() {
|
|
||||||
Some(owner) => {
|
|
||||||
let user = crate::sys::query_user(owner)?
|
|
||||||
.ok_or_else(|| {
|
|
||||||
format_err!("failed to lookup owning user '{}' for logs", owner)
|
|
||||||
})?;
|
|
||||||
CreateOptions::new().owner(user.uid).group(user.gid)
|
|
||||||
}
|
|
||||||
None => CreateOptions::new(),
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let metadata = match self.base_path.metadata() {
|
|
||||||
Ok(metadata) => metadata,
|
|
||||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(false),
|
|
||||||
Err(err) => bail!("unable to open task archive - {}", err),
|
|
||||||
};
|
|
||||||
|
|
||||||
if metadata.len() > max_size {
|
|
||||||
self.do_rotate(options, max_files)?;
|
|
||||||
Ok(true)
|
|
||||||
} else {
|
|
||||||
Ok(false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Iterator over logrotated file names
|
|
||||||
pub struct LogRotateFileNames {
|
|
||||||
base_path: PathBuf,
|
|
||||||
count: usize,
|
|
||||||
compress: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Iterator for LogRotateFileNames {
|
|
||||||
type Item = PathBuf;
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
|
||||||
if self.count > 0 {
|
|
||||||
let mut path: std::ffi::OsString = self.base_path.clone().into();
|
|
||||||
|
|
||||||
path.push(format!(".{}", self.count));
|
|
||||||
self.count += 1;
|
|
||||||
|
|
||||||
if Path::new(&path).is_file() {
|
|
||||||
Some(path.into())
|
|
||||||
} else if self.compress {
|
|
||||||
path.push(".zst");
|
|
||||||
if Path::new(&path).is_file() {
|
|
||||||
Some(path.into())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
} else if self.base_path.is_file() {
|
|
||||||
self.count += 1;
|
|
||||||
Some(self.base_path.to_path_buf())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Iterator over logrotated files by returning a boxed reader
|
|
||||||
pub struct LogRotateFiles {
|
|
||||||
file_names: LogRotateFileNames,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Iterator for LogRotateFiles {
|
|
||||||
type Item = Box<dyn Read + Send>;
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
|
||||||
let filename = self.file_names.next()?;
|
|
||||||
let file = File::open(&filename).ok()?;
|
|
||||||
|
|
||||||
if filename.extension() == Some(std::ffi::OsStr::new("zst")) {
|
|
||||||
let encoder = zstd::stream::read::Decoder::new(file).ok()?;
|
|
||||||
return Some(Box::new(encoder));
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(Box::new(file))
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,211 +0,0 @@
|
||||||
//! Inter-process reader-writer lock builder.
|
|
||||||
//!
|
|
||||||
//! This implementation uses fcntl record locks with non-blocking
|
|
||||||
//! F_SETLK command (never blocks).
|
|
||||||
//!
|
|
||||||
//! We maintain a map of shared locks with time stamps, so you can get
|
|
||||||
//! the timestamp for the oldest open lock with
|
|
||||||
//! `oldest_shared_lock()`.
|
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::os::unix::io::AsRawFd;
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
|
||||||
|
|
||||||
// fixme: use F_OFD_ locks when implemented with nix::fcntl
|
|
||||||
|
|
||||||
// Note: flock lock conversion is not atomic, so we need to use fcntl
|
|
||||||
|
|
||||||
/// Inter-process reader-writer lock
|
|
||||||
pub struct ProcessLocker {
|
|
||||||
file: std::fs::File,
|
|
||||||
exclusive: bool,
|
|
||||||
writers: usize,
|
|
||||||
next_guard_id: u64,
|
|
||||||
shared_guard_list: HashMap<u64, i64>, // guard_id => timestamp
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Lock guard for shared locks
|
|
||||||
///
|
|
||||||
/// Release the lock when it goes out of scope.
|
|
||||||
pub struct ProcessLockSharedGuard {
|
|
||||||
guard_id: u64,
|
|
||||||
locker: Arc<Mutex<ProcessLocker>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for ProcessLockSharedGuard {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
let mut data = self.locker.lock().unwrap();
|
|
||||||
|
|
||||||
if data.writers == 0 {
|
|
||||||
panic!("unexpected ProcessLocker state");
|
|
||||||
}
|
|
||||||
|
|
||||||
data.shared_guard_list.remove(&self.guard_id);
|
|
||||||
|
|
||||||
if data.writers == 1 && !data.exclusive {
|
|
||||||
let op = libc::flock {
|
|
||||||
l_type: libc::F_UNLCK as i16,
|
|
||||||
l_whence: libc::SEEK_SET as i16,
|
|
||||||
l_start: 0,
|
|
||||||
l_len: 0,
|
|
||||||
l_pid: 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Err(err) =
|
|
||||||
nix::fcntl::fcntl(data.file.as_raw_fd(), nix::fcntl::FcntlArg::F_SETLKW(&op))
|
|
||||||
{
|
|
||||||
panic!("unable to drop writer lock - {}", err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if data.writers > 0 {
|
|
||||||
data.writers -= 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Lock guard for exclusive locks
|
|
||||||
///
|
|
||||||
/// Release the lock when it goes out of scope.
|
|
||||||
pub struct ProcessLockExclusiveGuard {
|
|
||||||
locker: Arc<Mutex<ProcessLocker>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for ProcessLockExclusiveGuard {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
let mut data = self.locker.lock().unwrap();
|
|
||||||
|
|
||||||
if !data.exclusive {
|
|
||||||
panic!("unexpected ProcessLocker state");
|
|
||||||
}
|
|
||||||
|
|
||||||
let ltype = if data.writers != 0 {
|
|
||||||
libc::F_RDLCK
|
|
||||||
} else {
|
|
||||||
libc::F_UNLCK
|
|
||||||
};
|
|
||||||
let op = libc::flock {
|
|
||||||
l_type: ltype as i16,
|
|
||||||
l_whence: libc::SEEK_SET as i16,
|
|
||||||
l_start: 0,
|
|
||||||
l_len: 0,
|
|
||||||
l_pid: 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Err(err) =
|
|
||||||
nix::fcntl::fcntl(data.file.as_raw_fd(), nix::fcntl::FcntlArg::F_SETLKW(&op))
|
|
||||||
{
|
|
||||||
panic!("unable to drop exclusive lock - {}", err);
|
|
||||||
}
|
|
||||||
|
|
||||||
data.exclusive = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ProcessLocker {
|
|
||||||
/// Create a new instance for the specified file.
|
|
||||||
///
|
|
||||||
/// This simply creates the file if it does not exist.
|
|
||||||
pub fn new<P: AsRef<std::path::Path>>(lockfile: P) -> Result<Arc<Mutex<Self>>, Error> {
|
|
||||||
let file = std::fs::OpenOptions::new()
|
|
||||||
.create(true)
|
|
||||||
.read(true)
|
|
||||||
.write(true)
|
|
||||||
.open(lockfile)?;
|
|
||||||
|
|
||||||
Ok(Arc::new(Mutex::new(Self {
|
|
||||||
file,
|
|
||||||
exclusive: false,
|
|
||||||
writers: 0,
|
|
||||||
next_guard_id: 0,
|
|
||||||
shared_guard_list: HashMap::new(),
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn try_lock(file: &std::fs::File, ltype: i32) -> Result<(), Error> {
|
|
||||||
let op = libc::flock {
|
|
||||||
l_type: ltype as i16,
|
|
||||||
l_whence: libc::SEEK_SET as i16,
|
|
||||||
l_start: 0,
|
|
||||||
l_len: 0,
|
|
||||||
l_pid: 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
nix::fcntl::fcntl(file.as_raw_fd(), nix::fcntl::FcntlArg::F_SETLK(&op))?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Try to acquire a shared lock
|
|
||||||
///
|
|
||||||
/// On success, this makes sure that no other process can get an exclusive lock for the file.
|
|
||||||
pub fn try_shared_lock(locker: Arc<Mutex<Self>>) -> Result<ProcessLockSharedGuard, Error> {
|
|
||||||
let mut data = locker.lock().unwrap();
|
|
||||||
|
|
||||||
if data.writers == 0 && !data.exclusive {
|
|
||||||
if let Err(err) = Self::try_lock(&data.file, libc::F_RDLCK) {
|
|
||||||
bail!("unable to get shared lock - {}", err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
data.writers += 1;
|
|
||||||
|
|
||||||
let guard = ProcessLockSharedGuard {
|
|
||||||
locker: locker.clone(),
|
|
||||||
guard_id: data.next_guard_id,
|
|
||||||
};
|
|
||||||
data.next_guard_id += 1;
|
|
||||||
|
|
||||||
let now = unsafe { libc::time(std::ptr::null_mut()) };
|
|
||||||
|
|
||||||
data.shared_guard_list.insert(guard.guard_id, now);
|
|
||||||
|
|
||||||
Ok(guard)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get oldest shared lock timestamp
|
|
||||||
pub fn oldest_shared_lock(locker: Arc<Mutex<Self>>) -> Option<i64> {
|
|
||||||
let mut result = None;
|
|
||||||
|
|
||||||
let data = locker.lock().unwrap();
|
|
||||||
|
|
||||||
for v in data.shared_guard_list.values() {
|
|
||||||
result = match result {
|
|
||||||
None => Some(*v),
|
|
||||||
Some(x) => {
|
|
||||||
if x < *v {
|
|
||||||
Some(x)
|
|
||||||
} else {
|
|
||||||
Some(*v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Try to acquire a exclusive lock
|
|
||||||
///
|
|
||||||
/// Make sure the we are the only process which has locks for this file (shared or exclusive).
|
|
||||||
pub fn try_exclusive_lock(
|
|
||||||
locker: Arc<Mutex<Self>>,
|
|
||||||
) -> Result<ProcessLockExclusiveGuard, Error> {
|
|
||||||
let mut data = locker.lock().unwrap();
|
|
||||||
|
|
||||||
if data.exclusive {
|
|
||||||
bail!("already locked exclusively");
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Err(err) = Self::try_lock(&data.file, libc::F_WRLCK) {
|
|
||||||
bail!("unable to get exclusive lock - {}", err);
|
|
||||||
}
|
|
||||||
|
|
||||||
data.exclusive = true;
|
|
||||||
|
|
||||||
Ok(ProcessLockExclusiveGuard {
|
|
||||||
locker: locker.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,92 +0,0 @@
|
||||||
use anyhow::{bail, Error};
|
|
||||||
|
|
||||||
/// Worker task abstraction
|
|
||||||
///
|
|
||||||
/// A worker task is a long running task, which usually logs output into a separate file.
|
|
||||||
pub trait WorkerTaskContext: Send + Sync {
|
|
||||||
|
|
||||||
/// Test if there was a request to abort the task.
|
|
||||||
fn abort_requested(&self) -> bool;
|
|
||||||
|
|
||||||
/// If the task should be aborted, this should fail with a reasonable error message.
|
|
||||||
fn check_abort(&self) -> Result<(), Error> {
|
|
||||||
if self.abort_requested() {
|
|
||||||
bail!("abort requested - aborting task");
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Test if there was a request to shutdown the server.
|
|
||||||
fn shutdown_requested(&self) -> bool;
|
|
||||||
|
|
||||||
|
|
||||||
/// This should fail with a reasonable error message if there was
|
|
||||||
/// a request to shutdown the server.
|
|
||||||
fn fail_on_shutdown(&self) -> Result<(), Error> {
|
|
||||||
if self.shutdown_requested() {
|
|
||||||
bail!("Server shutdown requested - aborting task");
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a log message for this task.
|
|
||||||
fn log(&self, level: log::Level, message: &std::fmt::Arguments);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Convenience implementation:
|
|
||||||
impl<T: WorkerTaskContext + ?Sized> WorkerTaskContext for std::sync::Arc<T> {
|
|
||||||
fn abort_requested(&self) -> bool {
|
|
||||||
<T as WorkerTaskContext>::abort_requested(&*self)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_abort(&self) -> Result<(), Error> {
|
|
||||||
<T as WorkerTaskContext>::check_abort(&*self)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn shutdown_requested(&self) -> bool {
|
|
||||||
<T as WorkerTaskContext>::shutdown_requested(&*self)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fail_on_shutdown(&self) -> Result<(), Error> {
|
|
||||||
<T as WorkerTaskContext>::fail_on_shutdown(&*self)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn log(&self, level: log::Level, message: &std::fmt::Arguments) {
|
|
||||||
<T as WorkerTaskContext>::log(&*self, level, message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! task_error {
|
|
||||||
($task:expr, $($fmt:tt)+) => {{
|
|
||||||
$crate::task::WorkerTaskContext::log(&*$task, log::Level::Error, &format_args!($($fmt)+))
|
|
||||||
}};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! task_warn {
|
|
||||||
($task:expr, $($fmt:tt)+) => {{
|
|
||||||
$crate::task::WorkerTaskContext::log(&*$task, log::Level::Warn, &format_args!($($fmt)+))
|
|
||||||
}};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! task_log {
|
|
||||||
($task:expr, $($fmt:tt)+) => {{
|
|
||||||
$crate::task::WorkerTaskContext::log(&*$task, log::Level::Info, &format_args!($($fmt)+))
|
|
||||||
}};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! task_debug {
|
|
||||||
($task:expr, $($fmt:tt)+) => {{
|
|
||||||
$crate::task::WorkerTaskContext::log(&*$task, log::Level::Debug, &format_args!($($fmt)+))
|
|
||||||
}};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! task_trace {
|
|
||||||
($task:expr, $($fmt:tt)+) => {{
|
|
||||||
$crate::task::WorkerTaskContext::log(&*$task, log::Level::Trace, &format_args!($($fmt)+))
|
|
||||||
}};
|
|
||||||
}
|
|
|
@ -22,6 +22,7 @@ proxmox-router = { version = "1.1", features = [ "cli" ] }
|
||||||
proxmox-schema = { version = "1", features = [ "api-macro" ] }
|
proxmox-schema = { version = "1", features = [ "api-macro" ] }
|
||||||
proxmox-time = "1"
|
proxmox-time = "1"
|
||||||
proxmox-uuid = "1"
|
proxmox-uuid = "1"
|
||||||
|
proxmox-sys = "0.1"
|
||||||
|
|
||||||
pbs-api-types = { path = "../pbs-api-types" }
|
pbs-api-types = { path = "../pbs-api-types" }
|
||||||
pbs-buildcfg = { path = "../pbs-buildcfg" }
|
pbs-buildcfg = { path = "../pbs-buildcfg" }
|
||||||
|
|
|
@ -13,6 +13,7 @@ use nix::unistd::Pid;
|
||||||
|
|
||||||
use proxmox::tools::fs::{create_path, file_read_string, make_tmp_file, CreateOptions};
|
use proxmox::tools::fs::{create_path, file_read_string, make_tmp_file, CreateOptions};
|
||||||
use proxmox::tools::fd::fd_change_cloexec;
|
use proxmox::tools::fd::fd_change_cloexec;
|
||||||
|
use proxmox_sys::logrotate::LogRotate;
|
||||||
|
|
||||||
use pbs_client::{VsockClient, DEFAULT_VSOCK_PORT};
|
use pbs_client::{VsockClient, DEFAULT_VSOCK_PORT};
|
||||||
|
|
||||||
|
@ -149,10 +150,9 @@ pub async fn start_vm(
|
||||||
|
|
||||||
let logpath = create_restore_log_dir()?;
|
let logpath = create_restore_log_dir()?;
|
||||||
let logfile = &format!("{}/qemu.log", logpath);
|
let logfile = &format!("{}/qemu.log", logpath);
|
||||||
let mut logrotate = pbs_tools::logrotate::LogRotate::new(logfile, false)
|
let mut logrotate = LogRotate::new(logfile, false, Some(16), None)?;
|
||||||
.ok_or_else(|| format_err!("could not get QEMU log file names"))?;
|
|
||||||
|
|
||||||
if let Err(err) = logrotate.do_rotate(CreateOptions::default(), Some(16)) {
|
if let Err(err) = logrotate.do_rotate() {
|
||||||
eprintln!("warning: logrotate for QEMU log file failed - {}", err);
|
eprintln!("warning: logrotate for QEMU log file failed - {}", err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,6 +37,7 @@ proxmox-http = { version = "0.5.0", features = [ "client" ] }
|
||||||
proxmox-router = "1.1"
|
proxmox-router = "1.1"
|
||||||
proxmox-schema = { version = "1", features = [ "api-macro", "upid-api-impl" ] }
|
proxmox-schema = { version = "1", features = [ "api-macro", "upid-api-impl" ] }
|
||||||
proxmox-time = "1"
|
proxmox-time = "1"
|
||||||
|
proxmox-sys = "0.1"
|
||||||
|
|
||||||
# fixme: remove this dependency (pbs_tools::broadcast_future)
|
# fixme: remove this dependency (pbs_tools::broadcast_future)
|
||||||
pbs-tools = { path = "../pbs-tools" }
|
pbs-tools = { path = "../pbs-tools" }
|
||||||
|
|
|
@ -21,8 +21,8 @@ use proxmox::tools::fs::{create_path, replace_file, atomic_open_or_create_file,
|
||||||
use proxmox_lang::try_block;
|
use proxmox_lang::try_block;
|
||||||
use proxmox_schema::upid::UPID;
|
use proxmox_schema::upid::UPID;
|
||||||
|
|
||||||
use pbs_tools::task::WorkerTaskContext;
|
use proxmox_sys::worker_task_context::{WorkerTaskContext};
|
||||||
use pbs_tools::logrotate::{LogRotate, LogRotateFiles};
|
use proxmox_sys::logrotate::{LogRotate, LogRotateFiles};
|
||||||
|
|
||||||
use crate::{CommandSocket, FileLogger, FileLogOptions};
|
use crate::{CommandSocket, FileLogger, FileLogOptions};
|
||||||
|
|
||||||
|
@ -209,16 +209,25 @@ pub fn init_worker_tasks(basedir: PathBuf, file_opts: CreateOptions) -> Result<(
|
||||||
|
|
||||||
/// checks if the Task Archive is bigger that 'size_threshold' bytes, and
|
/// checks if the Task Archive is bigger that 'size_threshold' bytes, and
|
||||||
/// rotates it if it is
|
/// rotates it if it is
|
||||||
pub fn rotate_task_log_archive(size_threshold: u64, compress: bool, max_files: Option<usize>) -> Result<bool, Error> {
|
pub fn rotate_task_log_archive(
|
||||||
|
size_threshold: u64,
|
||||||
|
compress: bool,
|
||||||
|
max_files: Option<usize>,
|
||||||
|
options: Option<CreateOptions>,
|
||||||
|
) -> Result<bool, Error> {
|
||||||
|
|
||||||
let setup = worker_task_setup()?;
|
let setup = worker_task_setup()?;
|
||||||
|
|
||||||
let _lock = setup.lock_task_list_files(true)?;
|
let _lock = setup.lock_task_list_files(true)?;
|
||||||
|
|
||||||
let mut logrotate = LogRotate::new(&setup.task_archive_fn, compress)
|
let mut logrotate = LogRotate::new(
|
||||||
.ok_or_else(|| format_err!("could not get archive file names"))?;
|
&setup.task_archive_fn,
|
||||||
|
compress,
|
||||||
|
max_files,
|
||||||
|
options,
|
||||||
|
)?;
|
||||||
|
|
||||||
logrotate.rotate(size_threshold, None, max_files)
|
logrotate.rotate(size_threshold)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// removes all task logs that are older than the oldest task entry in the
|
/// removes all task logs that are older than the oldest task entry in the
|
||||||
|
@ -228,8 +237,12 @@ pub fn cleanup_old_tasks(compressed: bool) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = setup.lock_task_list_files(true)?;
|
let _lock = setup.lock_task_list_files(true)?;
|
||||||
|
|
||||||
let logrotate = LogRotate::new(&setup.task_archive_fn, compressed)
|
let logrotate = LogRotate::new(
|
||||||
.ok_or_else(|| format_err!("could not get archive file names"))?;
|
&setup.task_archive_fn,
|
||||||
|
compressed,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
)?;
|
||||||
|
|
||||||
let mut timestamp = None;
|
let mut timestamp = None;
|
||||||
if let Some(last_file) = logrotate.files().last() {
|
if let Some(last_file) = logrotate.files().last() {
|
||||||
|
@ -649,8 +662,7 @@ impl TaskListInfoIterator {
|
||||||
let archive = if active_only {
|
let archive = if active_only {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
let logrotate = LogRotate::new(&setup.task_archive_fn, true)
|
let logrotate = LogRotate::new(&setup.task_archive_fn, true, None, None)?;
|
||||||
.ok_or_else(|| format_err!("could not get archive file names"))?;
|
|
||||||
Some(logrotate.files())
|
Some(logrotate.files())
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ use proxmox_router::{
|
||||||
RpcEnvironment, RpcEnvironmentType, SubdirMap, Permission,
|
RpcEnvironment, RpcEnvironmentType, SubdirMap, Permission,
|
||||||
};
|
};
|
||||||
use proxmox_schema::*;
|
use proxmox_schema::*;
|
||||||
|
use proxmox_sys::{task_log, task_warn};
|
||||||
|
|
||||||
use pxar::accessor::aio::Accessor;
|
use pxar::accessor::aio::Accessor;
|
||||||
use pxar::EntryKind;
|
use pxar::EntryKind;
|
||||||
|
@ -55,7 +56,6 @@ use pbs_datastore::prune::compute_prune_info;
|
||||||
use pbs_tools::blocking::WrappedReaderStream;
|
use pbs_tools::blocking::WrappedReaderStream;
|
||||||
use pbs_tools::stream::{AsyncReaderStream, AsyncChannelWriter};
|
use pbs_tools::stream::{AsyncReaderStream, AsyncChannelWriter};
|
||||||
use pbs_tools::json::{required_integer_param, required_string_param};
|
use pbs_tools::json::{required_integer_param, required_string_param};
|
||||||
use pbs_tools::{task_log, task_warn};
|
|
||||||
use pbs_config::CachedUserInfo;
|
use pbs_config::CachedUserInfo;
|
||||||
use proxmox_rest_server::{WorkerTask, formatter};
|
use proxmox_rest_server::{WorkerTask, formatter};
|
||||||
|
|
||||||
|
|
|
@ -13,12 +13,12 @@ use proxmox_router::{
|
||||||
http_bail, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap,
|
http_bail, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap,
|
||||||
};
|
};
|
||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
|
use proxmox_sys::{task_log, task_warn};
|
||||||
|
|
||||||
use proxmox_acme_rs::account::AccountData as AcmeAccountData;
|
use proxmox_acme_rs::account::AccountData as AcmeAccountData;
|
||||||
use proxmox_acme_rs::Account;
|
use proxmox_acme_rs::Account;
|
||||||
|
|
||||||
use pbs_api_types::{Authid, PRIV_SYS_MODIFY};
|
use pbs_api_types::{Authid, PRIV_SYS_MODIFY};
|
||||||
use pbs_tools::{task_log, task_warn};
|
|
||||||
|
|
||||||
use crate::acme::AcmeClient;
|
use crate::acme::AcmeClient;
|
||||||
use crate::api2::types::{AcmeAccountName, AcmeChallengeSchema, KnownAcmeDirectory};
|
use crate::api2::types::{AcmeAccountName, AcmeChallengeSchema, KnownAcmeDirectory};
|
||||||
|
|
|
@ -7,6 +7,7 @@ use ::serde::{Deserialize, Serialize};
|
||||||
use proxmox_router::{Router, RpcEnvironment, RpcEnvironmentType, Permission};
|
use proxmox_router::{Router, RpcEnvironment, RpcEnvironmentType, Permission};
|
||||||
use proxmox_schema::{api, ApiType, parse_property_string};
|
use proxmox_schema::{api, ApiType, parse_property_string};
|
||||||
use proxmox_section_config::SectionConfigData;
|
use proxmox_section_config::SectionConfigData;
|
||||||
|
use proxmox_sys::worker_task_context::WorkerTaskContext;
|
||||||
|
|
||||||
use pbs_datastore::chunk_store::ChunkStore;
|
use pbs_datastore::chunk_store::ChunkStore;
|
||||||
use pbs_config::BackupLockGuard;
|
use pbs_config::BackupLockGuard;
|
||||||
|
@ -25,7 +26,6 @@ use crate::api2::admin::{
|
||||||
verify::list_verification_jobs,
|
verify::list_verification_jobs,
|
||||||
};
|
};
|
||||||
use pbs_config::CachedUserInfo;
|
use pbs_config::CachedUserInfo;
|
||||||
use pbs_tools::task::WorkerTaskContext;
|
|
||||||
|
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
|
|
||||||
|
|
|
@ -11,10 +11,11 @@ use proxmox_router::SubdirMap;
|
||||||
use proxmox_router::{Permission, Router, RpcEnvironment};
|
use proxmox_router::{Permission, Router, RpcEnvironment};
|
||||||
use proxmox_router::list_subdirs_api_method;
|
use proxmox_router::list_subdirs_api_method;
|
||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
|
use proxmox_sys::{task_log, task_warn};
|
||||||
|
|
||||||
use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_MODIFY};
|
use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_MODIFY};
|
||||||
use pbs_buildcfg::configdir;
|
use pbs_buildcfg::configdir;
|
||||||
use pbs_tools::{task_log, task_warn, cert};
|
use pbs_tools::cert;
|
||||||
|
|
||||||
use crate::acme::AcmeClient;
|
use crate::acme::AcmeClient;
|
||||||
use crate::api2::types::AcmeDomain;
|
use crate::api2::types::AcmeDomain;
|
||||||
|
|
|
@ -5,12 +5,12 @@ use ::serde::{Deserialize, Serialize};
|
||||||
use proxmox_router::{Router, RpcEnvironment, RpcEnvironmentType, Permission};
|
use proxmox_router::{Router, RpcEnvironment, RpcEnvironmentType, Permission};
|
||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
use proxmox_section_config::SectionConfigData;
|
use proxmox_section_config::SectionConfigData;
|
||||||
|
use proxmox_sys::task_log;
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
DataStoreConfig, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA,
|
DataStoreConfig, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA,
|
||||||
DATASTORE_SCHEMA, UPID_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
|
DATASTORE_SCHEMA, UPID_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
|
||||||
};
|
};
|
||||||
use pbs_tools::task_log;
|
|
||||||
|
|
||||||
use crate::tools::disks::{
|
use crate::tools::disks::{
|
||||||
DiskManage, FileSystemType, DiskUsageType,
|
DiskManage, FileSystemType, DiskUsageType,
|
||||||
|
|
|
@ -6,6 +6,7 @@ use proxmox_router::{
|
||||||
list_subdirs_api_method, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap, Permission,
|
list_subdirs_api_method, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap, Permission,
|
||||||
};
|
};
|
||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
|
use proxmox_sys::task_log;
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA,
|
UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA,
|
||||||
|
@ -17,7 +18,6 @@ use crate::tools::disks::{
|
||||||
get_disks, get_smart_data, get_disk_usage_info, inititialize_gpt_disk,
|
get_disks, get_smart_data, get_disk_usage_info, inititialize_gpt_disk,
|
||||||
};
|
};
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
use pbs_tools::task_log;
|
|
||||||
|
|
||||||
pub mod directory;
|
pub mod directory;
|
||||||
pub mod zfs;
|
pub mod zfs;
|
||||||
|
|
|
@ -3,6 +3,7 @@ use serde_json::{json, Value};
|
||||||
|
|
||||||
use proxmox_router::{Router, RpcEnvironment, RpcEnvironmentType, Permission};
|
use proxmox_router::{Router, RpcEnvironment, RpcEnvironmentType, Permission};
|
||||||
use proxmox_schema::{api, parse_property_string};
|
use proxmox_schema::{api, parse_property_string};
|
||||||
|
use proxmox_sys::task_log;
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
ZpoolListItem, ZfsRaidLevel, ZfsCompressionType, DataStoreConfig,
|
ZpoolListItem, ZfsRaidLevel, ZfsCompressionType, DataStoreConfig,
|
||||||
|
@ -10,7 +11,6 @@ use pbs_api_types::{
|
||||||
DISK_LIST_SCHEMA, ZFS_ASHIFT_SCHEMA, UPID_SCHEMA,
|
DISK_LIST_SCHEMA, ZFS_ASHIFT_SCHEMA, UPID_SCHEMA,
|
||||||
PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
|
PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
|
||||||
};
|
};
|
||||||
use pbs_tools::task_log;
|
|
||||||
|
|
||||||
use crate::tools::disks::{
|
use crate::tools::disks::{
|
||||||
zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree,
|
zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree,
|
||||||
|
|
|
@ -6,13 +6,13 @@ use futures::{select, future::FutureExt};
|
||||||
|
|
||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
|
use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
|
||||||
|
use proxmox_sys::task_log;
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, SyncJobConfig, GroupFilter, GROUP_FILTER_LIST_SCHEMA,
|
Authid, SyncJobConfig, GroupFilter, GROUP_FILTER_LIST_SCHEMA,
|
||||||
DATASTORE_SCHEMA, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
|
DATASTORE_SCHEMA, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||||
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ,
|
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ,
|
||||||
};
|
};
|
||||||
use pbs_tools::task_log;
|
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
use pbs_config::CachedUserInfo;
|
use pbs_config::CachedUserInfo;
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ use serde_json::Value;
|
||||||
use proxmox_lang::try_block;
|
use proxmox_lang::try_block;
|
||||||
use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
|
use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
|
||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
|
use proxmox_sys::{task_log, task_warn, worker_task_context::WorkerTaskContext};
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, Userid, TapeBackupJobConfig, TapeBackupJobSetup, TapeBackupJobStatus, MediaPoolConfig,
|
Authid, Userid, TapeBackupJobConfig, TapeBackupJobSetup, TapeBackupJobStatus, MediaPoolConfig,
|
||||||
|
@ -16,7 +17,6 @@ use pbs_api_types::{
|
||||||
|
|
||||||
use pbs_datastore::{DataStore, StoreProgress, SnapshotReader};
|
use pbs_datastore::{DataStore, StoreProgress, SnapshotReader};
|
||||||
use pbs_datastore::backup_info::{BackupDir, BackupInfo, BackupGroup};
|
use pbs_datastore::backup_info::{BackupDir, BackupInfo, BackupGroup};
|
||||||
use pbs_tools::{task_log, task_warn, task::WorkerTaskContext};
|
|
||||||
use pbs_config::CachedUserInfo;
|
use pbs_config::CachedUserInfo;
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,7 @@ use proxmox_router::{
|
||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
use proxmox_section_config::SectionConfigData;
|
use proxmox_section_config::SectionConfigData;
|
||||||
use proxmox_uuid::Uuid;
|
use proxmox_uuid::Uuid;
|
||||||
|
use proxmox_sys::{task_log, task_warn};
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
UPID_SCHEMA, CHANGER_NAME_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_LABEL_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
|
UPID_SCHEMA, CHANGER_NAME_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_LABEL_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
@ -28,7 +29,6 @@ use pbs_tape::{
|
||||||
sg_tape::tape_alert_flags_critical,
|
sg_tape::tape_alert_flags_critical,
|
||||||
linux_list_drives::{lto_tape_device_list, lookup_device_identification, open_lto_tape_device},
|
linux_list_drives::{lto_tape_device_list, lookup_device_identification, open_lto_tape_device},
|
||||||
};
|
};
|
||||||
use pbs_tools::{task_log, task_warn};
|
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
|
|
@ -14,6 +14,7 @@ use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
|
||||||
use proxmox_schema::{api, parse_property_string};
|
use proxmox_schema::{api, parse_property_string};
|
||||||
use proxmox_section_config::SectionConfigData;
|
use proxmox_section_config::SectionConfigData;
|
||||||
use proxmox_uuid::Uuid;
|
use proxmox_uuid::Uuid;
|
||||||
|
use proxmox_sys::{task_log, task_warn, worker_task_context::WorkerTaskContext};
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, Userid, CryptMode,
|
Authid, Userid, CryptMode,
|
||||||
|
@ -32,7 +33,6 @@ use pbs_tape::{
|
||||||
TapeRead, BlockReadError, MediaContentHeader,
|
TapeRead, BlockReadError, MediaContentHeader,
|
||||||
PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
|
PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
|
||||||
};
|
};
|
||||||
use pbs_tools::{task_log, task_warn, task::WorkerTaskContext};
|
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
|
|
@ -78,13 +78,13 @@ impl ProxmoxAuthenticator for PBS {
|
||||||
let data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
let data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
||||||
match data[username.as_str()].as_str() {
|
match data[username.as_str()].as_str() {
|
||||||
None => bail!("no password set"),
|
None => bail!("no password set"),
|
||||||
Some(enc_password) => pbs_tools::crypt::verify_crypt_pw(password, enc_password)?,
|
Some(enc_password) => proxmox_sys::crypt::verify_crypt_pw(password, enc_password)?,
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
|
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
|
||||||
let enc_password = pbs_tools::crypt::encrypt_pw(password)?;
|
let enc_password = proxmox_sys::crypt::encrypt_pw(password)?;
|
||||||
let mut data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
let mut data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
||||||
data[username.as_str()] = enc_password.into();
|
data[username.as_str()] = enc_password.into();
|
||||||
|
|
||||||
|
|
|
@ -6,13 +6,14 @@ use std::time::Instant;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
|
use proxmox_sys::{task_log, worker_task_context::WorkerTaskContext};
|
||||||
|
|
||||||
use pbs_api_types::{Authid, CryptMode, VerifyState, UPID, SnapshotVerifyState};
|
use pbs_api_types::{Authid, CryptMode, VerifyState, UPID, SnapshotVerifyState};
|
||||||
use pbs_datastore::{DataStore, DataBlob, StoreProgress};
|
use pbs_datastore::{DataStore, DataBlob, StoreProgress};
|
||||||
use pbs_datastore::backup_info::{BackupGroup, BackupDir, BackupInfo};
|
use pbs_datastore::backup_info::{BackupGroup, BackupDir, BackupInfo};
|
||||||
use pbs_datastore::index::IndexFile;
|
use pbs_datastore::index::IndexFile;
|
||||||
use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, FileInfo};
|
use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, FileInfo};
|
||||||
use pbs_tools::fs::lock_dir_noblock_shared;
|
use pbs_tools::fs::lock_dir_noblock_shared;
|
||||||
use pbs_tools::{task_log, task::WorkerTaskContext};
|
|
||||||
|
|
||||||
use crate::tools::ParallelHandler;
|
use crate::tools::ParallelHandler;
|
||||||
|
|
||||||
|
|
|
@ -22,8 +22,9 @@ use proxmox::tools::fs::CreateOptions;
|
||||||
use proxmox_lang::try_block;
|
use proxmox_lang::try_block;
|
||||||
use proxmox_router::{RpcEnvironment, RpcEnvironmentType, UserInformation};
|
use proxmox_router::{RpcEnvironment, RpcEnvironmentType, UserInformation};
|
||||||
use proxmox_http::client::{RateLimitedStream, ShareableRateLimit};
|
use proxmox_http::client::{RateLimitedStream, ShareableRateLimit};
|
||||||
|
use proxmox_sys::{task_log, task_warn};
|
||||||
|
use proxmox_sys::logrotate::LogRotate;
|
||||||
|
|
||||||
use pbs_tools::{task_log, task_warn};
|
|
||||||
use pbs_datastore::DataStore;
|
use pbs_datastore::DataStore;
|
||||||
|
|
||||||
use proxmox_rest_server::{
|
use proxmox_rest_server::{
|
||||||
|
@ -47,7 +48,6 @@ use proxmox_backup::{
|
||||||
|
|
||||||
use pbs_buildcfg::configdir;
|
use pbs_buildcfg::configdir;
|
||||||
use proxmox_time::{compute_next_event, parse_calendar_event};
|
use proxmox_time::{compute_next_event, parse_calendar_event};
|
||||||
use pbs_tools::logrotate::LogRotate;
|
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, TapeBackupJobConfig, VerificationJobConfig, SyncJobConfig, DataStoreConfig,
|
Authid, TapeBackupJobConfig, VerificationJobConfig, SyncJobConfig, DataStoreConfig,
|
||||||
|
@ -813,7 +813,19 @@ async fn schedule_task_log_rotate() {
|
||||||
let result = try_block!({
|
let result = try_block!({
|
||||||
let max_size = 512 * 1024 - 1; // an entry has ~ 100b, so > 5000 entries/file
|
let max_size = 512 * 1024 - 1; // an entry has ~ 100b, so > 5000 entries/file
|
||||||
let max_files = 20; // times twenty files gives > 100000 task entries
|
let max_files = 20; // times twenty files gives > 100000 task entries
|
||||||
let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?;
|
|
||||||
|
let user = pbs_config::backup_user()?;
|
||||||
|
let options = proxmox::tools::fs::CreateOptions::new()
|
||||||
|
.owner(user.uid)
|
||||||
|
.group(user.gid);
|
||||||
|
|
||||||
|
let has_rotated = rotate_task_log_archive(
|
||||||
|
max_size,
|
||||||
|
true,
|
||||||
|
Some(max_files),
|
||||||
|
Some(options.clone()),
|
||||||
|
)?;
|
||||||
|
|
||||||
if has_rotated {
|
if has_rotated {
|
||||||
task_log!(worker, "task log archive was rotated");
|
task_log!(worker, "task log archive was rotated");
|
||||||
} else {
|
} else {
|
||||||
|
@ -822,10 +834,16 @@ async fn schedule_task_log_rotate() {
|
||||||
|
|
||||||
let max_size = 32 * 1024 * 1024 - 1;
|
let max_size = 32 * 1024 * 1024 - 1;
|
||||||
let max_files = 14;
|
let max_files = 14;
|
||||||
let mut logrotate = LogRotate::new(pbs_buildcfg::API_ACCESS_LOG_FN, true)
|
|
||||||
.ok_or_else(|| format_err!("could not get API access log file names"))?;
|
|
||||||
|
|
||||||
if logrotate.rotate(max_size, None, Some(max_files))? {
|
|
||||||
|
let mut logrotate = LogRotate::new(
|
||||||
|
pbs_buildcfg::API_ACCESS_LOG_FN,
|
||||||
|
true,
|
||||||
|
Some(max_files),
|
||||||
|
Some(options.clone()),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
if logrotate.rotate(max_size)? {
|
||||||
println!("rotated access log, telling daemons to re-open log file");
|
println!("rotated access log, telling daemons to re-open log file");
|
||||||
pbs_runtime::block_on(command_reopen_access_logfiles())?;
|
pbs_runtime::block_on(command_reopen_access_logfiles())?;
|
||||||
task_log!(worker, "API access log was rotated");
|
task_log!(worker, "API access log was rotated");
|
||||||
|
@ -833,10 +851,14 @@ async fn schedule_task_log_rotate() {
|
||||||
task_log!(worker, "API access log was not rotated");
|
task_log!(worker, "API access log was not rotated");
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut logrotate = LogRotate::new(pbs_buildcfg::API_AUTH_LOG_FN, true)
|
let mut logrotate = LogRotate::new(
|
||||||
.ok_or_else(|| format_err!("could not get API auth log file names"))?;
|
pbs_buildcfg::API_AUTH_LOG_FN,
|
||||||
|
true,
|
||||||
|
Some(max_files),
|
||||||
|
Some(options),
|
||||||
|
)?;
|
||||||
|
|
||||||
if logrotate.rotate(max_size, None, Some(max_files))? {
|
if logrotate.rotate(max_size)? {
|
||||||
println!("rotated auth log, telling daemons to re-open log file");
|
println!("rotated auth log, telling daemons to re-open log file");
|
||||||
pbs_runtime::block_on(command_reopen_auth_logfiles())?;
|
pbs_runtime::block_on(command_reopen_auth_logfiles())?;
|
||||||
task_log!(worker, "API authentication log was rotated");
|
task_log!(worker, "API authentication log was rotated");
|
||||||
|
|
|
@ -1,8 +1,9 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
|
|
||||||
|
use proxmox_sys::task_log;
|
||||||
|
|
||||||
use pbs_api_types::Authid;
|
use pbs_api_types::Authid;
|
||||||
use pbs_tools::task_log;
|
|
||||||
use pbs_datastore::DataStore;
|
use pbs_datastore::DataStore;
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
|
|
||||||
|
|
|
@ -2,12 +2,13 @@ use std::sync::Arc;
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
|
|
||||||
|
use proxmox_sys::{task_log, task_warn};
|
||||||
|
|
||||||
use pbs_datastore::backup_info::BackupInfo;
|
use pbs_datastore::backup_info::BackupInfo;
|
||||||
use pbs_datastore::prune::compute_prune_info;
|
use pbs_datastore::prune::compute_prune_info;
|
||||||
use pbs_datastore::DataStore;
|
use pbs_datastore::DataStore;
|
||||||
use pbs_api_types::{Authid, PRIV_DATASTORE_MODIFY, PruneOptions};
|
use pbs_api_types::{Authid, PRIV_DATASTORE_MODIFY, PruneOptions};
|
||||||
use pbs_config::CachedUserInfo;
|
use pbs_config::CachedUserInfo;
|
||||||
use pbs_tools::{task_log, task_warn};
|
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
|
|
||||||
use crate::server::jobstate::Job;
|
use crate::server::jobstate::Job;
|
||||||
|
|
|
@ -12,6 +12,7 @@ use serde_json::json;
|
||||||
use http::StatusCode;
|
use http::StatusCode;
|
||||||
|
|
||||||
use proxmox_router::HttpError;
|
use proxmox_router::HttpError;
|
||||||
|
use proxmox_sys::task_log;
|
||||||
|
|
||||||
use pbs_api_types::{Authid, GroupFilter, GroupListItem, Remote, SnapshotListItem};
|
use pbs_api_types::{Authid, GroupFilter, GroupListItem, Remote, SnapshotListItem};
|
||||||
|
|
||||||
|
@ -24,7 +25,6 @@ use pbs_datastore::manifest::{
|
||||||
CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME, ArchiveType, BackupManifest, FileInfo, archive_type
|
CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME, ArchiveType, BackupManifest, FileInfo, archive_type
|
||||||
};
|
};
|
||||||
use pbs_tools::sha::sha256;
|
use pbs_tools::sha::sha256;
|
||||||
use pbs_tools::task_log;
|
|
||||||
use pbs_client::{BackupReader, BackupRepository, HttpClient, HttpClientOptions, RemoteChunkReader};
|
use pbs_client::{BackupReader, BackupRepository, HttpClient, HttpClientOptions, RemoteChunkReader};
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
|
|
||||||
use pbs_tools::task_log;
|
use proxmox_sys::task_log;
|
||||||
use pbs_api_types::{Authid, VerificationJobConfig};
|
use pbs_api_types::{Authid, VerificationJobConfig};
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
use pbs_datastore::DataStore;
|
use pbs_datastore::DataStore;
|
||||||
|
|
|
@ -27,10 +27,10 @@ use proxmox::{
|
||||||
use proxmox_io::ReadExt;
|
use proxmox_io::ReadExt;
|
||||||
use proxmox_section_config::SectionConfigData;
|
use proxmox_section_config::SectionConfigData;
|
||||||
use proxmox_uuid::Uuid;
|
use proxmox_uuid::Uuid;
|
||||||
|
use proxmox_sys::{task_log, worker_task_context::WorkerTaskContext};
|
||||||
|
|
||||||
use pbs_api_types::{VirtualTapeDrive, LtoTapeDrive, Fingerprint};
|
use pbs_api_types::{VirtualTapeDrive, LtoTapeDrive, Fingerprint};
|
||||||
use pbs_config::key_config::KeyConfig;
|
use pbs_config::key_config::KeyConfig;
|
||||||
use pbs_tools::{task_log, task::WorkerTaskContext};
|
|
||||||
|
|
||||||
use pbs_tape::{
|
use pbs_tape::{
|
||||||
TapeWrite, TapeRead, BlockReadError, MediaContentHeader,
|
TapeWrite, TapeRead, BlockReadError, MediaContentHeader,
|
||||||
|
|
|
@ -12,8 +12,8 @@ use std::sync::{Arc, Mutex};
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
|
|
||||||
use proxmox_uuid::Uuid;
|
use proxmox_uuid::Uuid;
|
||||||
|
use proxmox_sys::{task_log, task_warn};
|
||||||
|
|
||||||
use pbs_tools::{task_log, task_warn};
|
|
||||||
use pbs_config::tape_encryption_keys::load_key_configs;
|
use pbs_config::tape_encryption_keys::load_key_configs;
|
||||||
use pbs_tape::{
|
use pbs_tape::{
|
||||||
TapeWrite,
|
TapeWrite,
|
||||||
|
|
|
@ -7,9 +7,9 @@ extern crate nix;
|
||||||
|
|
||||||
use proxmox::tools::fs::CreateOptions;
|
use proxmox::tools::fs::CreateOptions;
|
||||||
use proxmox_lang::try_block;
|
use proxmox_lang::try_block;
|
||||||
|
use proxmox_sys::{task_log, worker_task_context::WorkerTaskContext};
|
||||||
|
|
||||||
use pbs_api_types::{Authid, UPID};
|
use pbs_api_types::{Authid, UPID};
|
||||||
use pbs_tools::{task_log, task::WorkerTaskContext};
|
|
||||||
|
|
||||||
use proxmox_rest_server::{CommandSocket, WorkerTask};
|
use proxmox_rest_server::{CommandSocket, WorkerTask};
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue