use new proxmox-sys crate

Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>
This commit is contained in:
Dietmar Maurer
2021-11-19 10:51:41 +01:00
parent 860eaec58f
commit d5790a9f27
38 changed files with 99 additions and 669 deletions

View File

@ -1,68 +0,0 @@
use std::ffi::CStr;
use anyhow::{bail, Error};
// from libcrypt1, 'lib/crypt.h.in'
const CRYPT_OUTPUT_SIZE: usize = 384;
const CRYPT_MAX_PASSPHRASE_SIZE: usize = 512;
const CRYPT_DATA_RESERVED_SIZE: usize = 767;
const CRYPT_DATA_INTERNAL_SIZE: usize = 30720;
#[repr(C)]
struct crypt_data {
output: [libc::c_char; CRYPT_OUTPUT_SIZE],
setting: [libc::c_char; CRYPT_OUTPUT_SIZE],
input: [libc::c_char; CRYPT_MAX_PASSPHRASE_SIZE],
reserved: [libc::c_char; CRYPT_DATA_RESERVED_SIZE],
initialized: libc::c_char,
internal: [libc::c_char; CRYPT_DATA_INTERNAL_SIZE],
}
pub fn crypt(password: &[u8], salt: &[u8]) -> Result<String, Error> {
#[link(name = "crypt")]
extern "C" {
#[link_name = "crypt_r"]
fn __crypt_r(
key: *const libc::c_char,
salt: *const libc::c_char,
data: *mut crypt_data,
) -> *mut libc::c_char;
}
let mut data: crypt_data = unsafe { std::mem::zeroed() };
for (i, c) in salt.iter().take(data.setting.len() - 1).enumerate() {
data.setting[i] = *c as libc::c_char;
}
for (i, c) in password.iter().take(data.input.len() - 1).enumerate() {
data.input[i] = *c as libc::c_char;
}
let res = unsafe {
let status = __crypt_r(
&data.input as *const _,
&data.setting as *const _,
&mut data as *mut _,
);
if status.is_null() {
bail!("internal error: crypt_r returned null pointer");
}
CStr::from_ptr(&data.output as *const _)
};
Ok(String::from(res.to_str()?))
}
pub fn encrypt_pw(password: &str) -> Result<String, Error> {
let salt = proxmox::sys::linux::random_data(8)?;
let salt = format!("$5${}$", base64::encode_config(&salt, base64::CRYPT));
crypt(password.as_bytes(), salt.as_bytes())
}
pub fn verify_crypt_pw(password: &str, enc_password: &str) -> Result<(), Error> {
let verify = crypt(password.as_bytes(), enc_password.as_bytes())?;
if verify != enc_password {
bail!("invalid credentials");
}
Ok(())
}

View File

@ -4,23 +4,19 @@ pub mod broadcast_future;
pub mod cert;
pub mod cli;
pub mod compression;
pub mod crypt;
pub mod crypt_config;
pub mod format;
pub mod fs;
pub mod io;
pub mod json;
pub mod logrotate;
pub mod lru_cache;
pub mod nom;
pub mod percent_encoding;
pub mod process_locker;
pub mod sha;
pub mod str;
pub mod stream;
pub mod sync;
pub mod sys;
pub mod task;
pub mod ticket;
pub mod tokio;
pub mod xattr;

View File

@ -1,239 +0,0 @@
use std::path::{Path, PathBuf};
use std::fs::{File, rename};
use std::os::unix::io::{FromRawFd, IntoRawFd};
use std::io::Read;
use anyhow::{bail, format_err, Error};
use nix::unistd;
use proxmox::tools::fs::{CreateOptions, make_tmp_file};
/// Used for rotating log files and iterating over them
pub struct LogRotate {
base_path: PathBuf,
compress: bool,
/// User logs should be reowned to.
owner: Option<String>,
}
impl LogRotate {
/// Creates a new instance if the path given is a valid file name (iow. does not end with ..)
/// 'compress' decides if compresses files will be created on rotation, and if it will search
/// '.zst' files when iterating
///
/// By default, newly created files will be owned by the backup user. See [`new_with_user`] for
/// a way to opt out of this behavior.
pub fn new<P: AsRef<Path>>(
path: P,
compress: bool,
) -> Option<Self> {
Self::new_with_user(path, compress, Some(pbs_buildcfg::BACKUP_USER_NAME.to_owned()))
}
/// See [`new`]. Additionally this also takes a user which should by default be used to reown
/// new files to.
pub fn new_with_user<P: AsRef<Path>>(
path: P,
compress: bool,
owner: Option<String>,
) -> Option<Self> {
if path.as_ref().file_name().is_some() {
Some(Self {
base_path: path.as_ref().to_path_buf(),
compress,
owner,
})
} else {
None
}
}
/// Returns an iterator over the logrotated file names that exist
pub fn file_names(&self) -> LogRotateFileNames {
LogRotateFileNames {
base_path: self.base_path.clone(),
count: 0,
compress: self.compress
}
}
/// Returns an iterator over the logrotated file handles
pub fn files(&self) -> LogRotateFiles {
LogRotateFiles {
file_names: self.file_names(),
}
}
fn compress(source_path: &PathBuf, target_path: &PathBuf, options: &CreateOptions) -> Result<(), Error> {
let mut source = File::open(source_path)?;
let (fd, tmp_path) = make_tmp_file(target_path, options.clone())?;
let target = unsafe { File::from_raw_fd(fd.into_raw_fd()) };
let mut encoder = match zstd::stream::write::Encoder::new(target, 0) {
Ok(encoder) => encoder,
Err(err) => {
let _ = unistd::unlink(&tmp_path);
bail!("creating zstd encoder failed - {}", err);
}
};
if let Err(err) = std::io::copy(&mut source, &mut encoder) {
let _ = unistd::unlink(&tmp_path);
bail!("zstd encoding failed for file {:?} - {}", target_path, err);
}
if let Err(err) = encoder.finish() {
let _ = unistd::unlink(&tmp_path);
bail!("zstd finish failed for file {:?} - {}", target_path, err);
}
if let Err(err) = rename(&tmp_path, target_path) {
let _ = unistd::unlink(&tmp_path);
bail!("rename failed for file {:?} - {}", target_path, err);
}
if let Err(err) = unistd::unlink(source_path) {
bail!("unlink failed for file {:?} - {}", source_path, err);
}
Ok(())
}
/// Rotates the files up to 'max_files'
/// if the 'compress' option was given it will compress the newest file
///
/// e.g. rotates
/// foo.2.zst => foo.3.zst
/// foo.1 => foo.2.zst
/// foo => foo.1
pub fn do_rotate(&mut self, options: CreateOptions, max_files: Option<usize>) -> Result<(), Error> {
let mut filenames: Vec<PathBuf> = self.file_names().collect();
if filenames.is_empty() {
return Ok(()); // no file means nothing to rotate
}
let count = filenames.len() + 1;
let mut next_filename = self.base_path.clone().canonicalize()?.into_os_string();
next_filename.push(format!(".{}", filenames.len()));
if self.compress && count > 2 {
next_filename.push(".zst");
}
filenames.push(PathBuf::from(next_filename));
for i in (0..count-1).rev() {
if self.compress
&& filenames[i].extension() != Some(std::ffi::OsStr::new("zst"))
&& filenames[i+1].extension() == Some(std::ffi::OsStr::new("zst"))
{
Self::compress(&filenames[i], &filenames[i+1], &options)?;
} else {
rename(&filenames[i], &filenames[i+1])?;
}
}
if let Some(max_files) = max_files {
for file in filenames.iter().skip(max_files) {
if let Err(err) = unistd::unlink(file) {
eprintln!("could not remove {:?}: {}", &file, err);
}
}
}
Ok(())
}
pub fn rotate(
&mut self,
max_size: u64,
options: Option<CreateOptions>,
max_files: Option<usize>
) -> Result<bool, Error> {
let options = match options {
Some(options) => options,
None => match self.owner.as_deref() {
Some(owner) => {
let user = crate::sys::query_user(owner)?
.ok_or_else(|| {
format_err!("failed to lookup owning user '{}' for logs", owner)
})?;
CreateOptions::new().owner(user.uid).group(user.gid)
}
None => CreateOptions::new(),
}
};
let metadata = match self.base_path.metadata() {
Ok(metadata) => metadata,
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(false),
Err(err) => bail!("unable to open task archive - {}", err),
};
if metadata.len() > max_size {
self.do_rotate(options, max_files)?;
Ok(true)
} else {
Ok(false)
}
}
}
/// Iterator over logrotated file names
pub struct LogRotateFileNames {
base_path: PathBuf,
count: usize,
compress: bool,
}
impl Iterator for LogRotateFileNames {
type Item = PathBuf;
fn next(&mut self) -> Option<Self::Item> {
if self.count > 0 {
let mut path: std::ffi::OsString = self.base_path.clone().into();
path.push(format!(".{}", self.count));
self.count += 1;
if Path::new(&path).is_file() {
Some(path.into())
} else if self.compress {
path.push(".zst");
if Path::new(&path).is_file() {
Some(path.into())
} else {
None
}
} else {
None
}
} else if self.base_path.is_file() {
self.count += 1;
Some(self.base_path.to_path_buf())
} else {
None
}
}
}
/// Iterator over logrotated files by returning a boxed reader
pub struct LogRotateFiles {
file_names: LogRotateFileNames,
}
impl Iterator for LogRotateFiles {
type Item = Box<dyn Read + Send>;
fn next(&mut self) -> Option<Self::Item> {
let filename = self.file_names.next()?;
let file = File::open(&filename).ok()?;
if filename.extension() == Some(std::ffi::OsStr::new("zst")) {
let encoder = zstd::stream::read::Decoder::new(file).ok()?;
return Some(Box::new(encoder));
}
Some(Box::new(file))
}
}

View File

@ -1,211 +0,0 @@
//! Inter-process reader-writer lock builder.
//!
//! This implementation uses fcntl record locks with non-blocking
//! F_SETLK command (never blocks).
//!
//! We maintain a map of shared locks with time stamps, so you can get
//! the timestamp for the oldest open lock with
//! `oldest_shared_lock()`.
use std::collections::HashMap;
use std::os::unix::io::AsRawFd;
use std::sync::{Arc, Mutex};
use anyhow::{bail, Error};
// fixme: use F_OFD_ locks when implemented with nix::fcntl
// Note: flock lock conversion is not atomic, so we need to use fcntl
/// Inter-process reader-writer lock
pub struct ProcessLocker {
file: std::fs::File,
exclusive: bool,
writers: usize,
next_guard_id: u64,
shared_guard_list: HashMap<u64, i64>, // guard_id => timestamp
}
/// Lock guard for shared locks
///
/// Release the lock when it goes out of scope.
pub struct ProcessLockSharedGuard {
guard_id: u64,
locker: Arc<Mutex<ProcessLocker>>,
}
impl Drop for ProcessLockSharedGuard {
fn drop(&mut self) {
let mut data = self.locker.lock().unwrap();
if data.writers == 0 {
panic!("unexpected ProcessLocker state");
}
data.shared_guard_list.remove(&self.guard_id);
if data.writers == 1 && !data.exclusive {
let op = libc::flock {
l_type: libc::F_UNLCK as i16,
l_whence: libc::SEEK_SET as i16,
l_start: 0,
l_len: 0,
l_pid: 0,
};
if let Err(err) =
nix::fcntl::fcntl(data.file.as_raw_fd(), nix::fcntl::FcntlArg::F_SETLKW(&op))
{
panic!("unable to drop writer lock - {}", err);
}
}
if data.writers > 0 {
data.writers -= 1;
}
}
}
/// Lock guard for exclusive locks
///
/// Release the lock when it goes out of scope.
pub struct ProcessLockExclusiveGuard {
locker: Arc<Mutex<ProcessLocker>>,
}
impl Drop for ProcessLockExclusiveGuard {
fn drop(&mut self) {
let mut data = self.locker.lock().unwrap();
if !data.exclusive {
panic!("unexpected ProcessLocker state");
}
let ltype = if data.writers != 0 {
libc::F_RDLCK
} else {
libc::F_UNLCK
};
let op = libc::flock {
l_type: ltype as i16,
l_whence: libc::SEEK_SET as i16,
l_start: 0,
l_len: 0,
l_pid: 0,
};
if let Err(err) =
nix::fcntl::fcntl(data.file.as_raw_fd(), nix::fcntl::FcntlArg::F_SETLKW(&op))
{
panic!("unable to drop exclusive lock - {}", err);
}
data.exclusive = false;
}
}
impl ProcessLocker {
/// Create a new instance for the specified file.
///
/// This simply creates the file if it does not exist.
pub fn new<P: AsRef<std::path::Path>>(lockfile: P) -> Result<Arc<Mutex<Self>>, Error> {
let file = std::fs::OpenOptions::new()
.create(true)
.read(true)
.write(true)
.open(lockfile)?;
Ok(Arc::new(Mutex::new(Self {
file,
exclusive: false,
writers: 0,
next_guard_id: 0,
shared_guard_list: HashMap::new(),
})))
}
fn try_lock(file: &std::fs::File, ltype: i32) -> Result<(), Error> {
let op = libc::flock {
l_type: ltype as i16,
l_whence: libc::SEEK_SET as i16,
l_start: 0,
l_len: 0,
l_pid: 0,
};
nix::fcntl::fcntl(file.as_raw_fd(), nix::fcntl::FcntlArg::F_SETLK(&op))?;
Ok(())
}
/// Try to acquire a shared lock
///
/// On success, this makes sure that no other process can get an exclusive lock for the file.
pub fn try_shared_lock(locker: Arc<Mutex<Self>>) -> Result<ProcessLockSharedGuard, Error> {
let mut data = locker.lock().unwrap();
if data.writers == 0 && !data.exclusive {
if let Err(err) = Self::try_lock(&data.file, libc::F_RDLCK) {
bail!("unable to get shared lock - {}", err);
}
}
data.writers += 1;
let guard = ProcessLockSharedGuard {
locker: locker.clone(),
guard_id: data.next_guard_id,
};
data.next_guard_id += 1;
let now = unsafe { libc::time(std::ptr::null_mut()) };
data.shared_guard_list.insert(guard.guard_id, now);
Ok(guard)
}
/// Get oldest shared lock timestamp
pub fn oldest_shared_lock(locker: Arc<Mutex<Self>>) -> Option<i64> {
let mut result = None;
let data = locker.lock().unwrap();
for v in data.shared_guard_list.values() {
result = match result {
None => Some(*v),
Some(x) => {
if x < *v {
Some(x)
} else {
Some(*v)
}
}
};
}
result
}
/// Try to acquire a exclusive lock
///
/// Make sure the we are the only process which has locks for this file (shared or exclusive).
pub fn try_exclusive_lock(
locker: Arc<Mutex<Self>>,
) -> Result<ProcessLockExclusiveGuard, Error> {
let mut data = locker.lock().unwrap();
if data.exclusive {
bail!("already locked exclusively");
}
if let Err(err) = Self::try_lock(&data.file, libc::F_WRLCK) {
bail!("unable to get exclusive lock - {}", err);
}
data.exclusive = true;
Ok(ProcessLockExclusiveGuard {
locker: locker.clone(),
})
}
}

View File

@ -1,92 +0,0 @@
use anyhow::{bail, Error};
/// Worker task abstraction
///
/// A worker task is a long running task, which usually logs output into a separate file.
pub trait WorkerTaskContext: Send + Sync {
/// Test if there was a request to abort the task.
fn abort_requested(&self) -> bool;
/// If the task should be aborted, this should fail with a reasonable error message.
fn check_abort(&self) -> Result<(), Error> {
if self.abort_requested() {
bail!("abort requested - aborting task");
}
Ok(())
}
/// Test if there was a request to shutdown the server.
fn shutdown_requested(&self) -> bool;
/// This should fail with a reasonable error message if there was
/// a request to shutdown the server.
fn fail_on_shutdown(&self) -> Result<(), Error> {
if self.shutdown_requested() {
bail!("Server shutdown requested - aborting task");
}
Ok(())
}
/// Create a log message for this task.
fn log(&self, level: log::Level, message: &std::fmt::Arguments);
}
/// Convenience implementation:
impl<T: WorkerTaskContext + ?Sized> WorkerTaskContext for std::sync::Arc<T> {
fn abort_requested(&self) -> bool {
<T as WorkerTaskContext>::abort_requested(&*self)
}
fn check_abort(&self) -> Result<(), Error> {
<T as WorkerTaskContext>::check_abort(&*self)
}
fn shutdown_requested(&self) -> bool {
<T as WorkerTaskContext>::shutdown_requested(&*self)
}
fn fail_on_shutdown(&self) -> Result<(), Error> {
<T as WorkerTaskContext>::fail_on_shutdown(&*self)
}
fn log(&self, level: log::Level, message: &std::fmt::Arguments) {
<T as WorkerTaskContext>::log(&*self, level, message)
}
}
#[macro_export]
macro_rules! task_error {
($task:expr, $($fmt:tt)+) => {{
$crate::task::WorkerTaskContext::log(&*$task, log::Level::Error, &format_args!($($fmt)+))
}};
}
#[macro_export]
macro_rules! task_warn {
($task:expr, $($fmt:tt)+) => {{
$crate::task::WorkerTaskContext::log(&*$task, log::Level::Warn, &format_args!($($fmt)+))
}};
}
#[macro_export]
macro_rules! task_log {
($task:expr, $($fmt:tt)+) => {{
$crate::task::WorkerTaskContext::log(&*$task, log::Level::Info, &format_args!($($fmt)+))
}};
}
#[macro_export]
macro_rules! task_debug {
($task:expr, $($fmt:tt)+) => {{
$crate::task::WorkerTaskContext::log(&*$task, log::Level::Debug, &format_args!($($fmt)+))
}};
}
#[macro_export]
macro_rules! task_trace {
($task:expr, $($fmt:tt)+) => {{
$crate::task::WorkerTaskContext::log(&*$task, log::Level::Trace, &format_args!($($fmt)+))
}};
}