2020-09-28 13:32:04 +00:00
|
|
|
use std::path::{Path, PathBuf};
|
|
|
|
use std::fs::{File, rename};
|
2020-11-30 08:53:59 +00:00
|
|
|
use std::os::unix::io::{FromRawFd, IntoRawFd};
|
2020-09-28 13:32:04 +00:00
|
|
|
use std::io::Read;
|
|
|
|
|
2021-09-01 10:21:51 +00:00
|
|
|
use anyhow::{bail, format_err, Error};
|
2020-09-28 13:32:04 +00:00
|
|
|
use nix::unistd;
|
|
|
|
|
2020-10-20 08:05:51 +00:00
|
|
|
use proxmox::tools::fs::{CreateOptions, make_tmp_file};
|
2020-09-28 13:32:04 +00:00
|
|
|
|
|
|
|
/// Used for rotating log files and iterating over them
|
|
|
|
pub struct LogRotate {
|
|
|
|
base_path: PathBuf,
|
|
|
|
compress: bool,
|
2021-09-01 10:21:51 +00:00
|
|
|
|
|
|
|
/// User logs should be reowned to.
|
|
|
|
owner: Option<String>,
|
2020-09-28 13:32:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl LogRotate {
|
2021-09-01 10:21:51 +00:00
|
|
|
/// Creates a new instance if the path given is a valid file name (iow. does not end with ..)
|
|
|
|
/// 'compress' decides if compresses files will be created on rotation, and if it will search
|
|
|
|
/// '.zst' files when iterating
|
|
|
|
///
|
|
|
|
/// By default, newly created files will be owned by the backup user. See [`new_with_user`] for
|
|
|
|
/// a way to opt out of this behavior.
|
|
|
|
pub fn new<P: AsRef<Path>>(
|
|
|
|
path: P,
|
|
|
|
compress: bool,
|
|
|
|
) -> Option<Self> {
|
|
|
|
Self::new_with_user(path, compress, Some(pbs_buildcfg::BACKUP_USER_NAME.to_owned()))
|
|
|
|
}
|
|
|
|
|
|
|
|
/// See [`new`]. Additionally this also takes a user which should by default be used to reown
|
|
|
|
/// new files to.
|
|
|
|
pub fn new_with_user<P: AsRef<Path>>(
|
|
|
|
path: P,
|
|
|
|
compress: bool,
|
|
|
|
owner: Option<String>,
|
|
|
|
) -> Option<Self> {
|
2020-09-28 13:32:04 +00:00
|
|
|
if path.as_ref().file_name().is_some() {
|
|
|
|
Some(Self {
|
|
|
|
base_path: path.as_ref().to_path_buf(),
|
|
|
|
compress,
|
2021-09-01 10:21:51 +00:00
|
|
|
owner,
|
2020-09-28 13:32:04 +00:00
|
|
|
})
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns an iterator over the logrotated file names that exist
|
|
|
|
pub fn file_names(&self) -> LogRotateFileNames {
|
|
|
|
LogRotateFileNames {
|
|
|
|
base_path: self.base_path.clone(),
|
|
|
|
count: 0,
|
|
|
|
compress: self.compress
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns an iterator over the logrotated file handles
|
|
|
|
pub fn files(&self) -> LogRotateFiles {
|
|
|
|
LogRotateFiles {
|
|
|
|
file_names: self.file_names(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-28 09:58:00 +00:00
|
|
|
fn compress(source_path: &PathBuf, target_path: &PathBuf, options: &CreateOptions) -> Result<(), Error> {
|
|
|
|
let mut source = File::open(source_path)?;
|
|
|
|
let (fd, tmp_path) = make_tmp_file(target_path, options.clone())?;
|
2020-11-30 08:53:59 +00:00
|
|
|
let target = unsafe { File::from_raw_fd(fd.into_raw_fd()) };
|
2020-10-20 08:24:46 +00:00
|
|
|
let mut encoder = match zstd::stream::write::Encoder::new(target, 0) {
|
|
|
|
Ok(encoder) => encoder,
|
|
|
|
Err(err) => {
|
|
|
|
let _ = unistd::unlink(&tmp_path);
|
|
|
|
bail!("creating zstd encoder failed - {}", err);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if let Err(err) = std::io::copy(&mut source, &mut encoder) {
|
|
|
|
let _ = unistd::unlink(&tmp_path);
|
2020-10-28 09:58:00 +00:00
|
|
|
bail!("zstd encoding failed for file {:?} - {}", target_path, err);
|
2020-10-20 08:24:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if let Err(err) = encoder.finish() {
|
|
|
|
let _ = unistd::unlink(&tmp_path);
|
2020-10-28 09:58:00 +00:00
|
|
|
bail!("zstd finish failed for file {:?} - {}", target_path, err);
|
2020-10-20 08:24:46 +00:00
|
|
|
}
|
|
|
|
|
2020-10-28 09:58:00 +00:00
|
|
|
if let Err(err) = rename(&tmp_path, target_path) {
|
2020-10-20 08:24:46 +00:00
|
|
|
let _ = unistd::unlink(&tmp_path);
|
2020-10-28 09:58:00 +00:00
|
|
|
bail!("rename failed for file {:?} - {}", target_path, err);
|
2020-10-20 08:24:46 +00:00
|
|
|
}
|
2020-10-28 09:58:00 +00:00
|
|
|
|
|
|
|
if let Err(err) = unistd::unlink(source_path) {
|
|
|
|
bail!("unlink failed for file {:?} - {}", source_path, err);
|
|
|
|
}
|
|
|
|
|
2020-10-20 08:24:46 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-09-28 13:32:04 +00:00
|
|
|
/// Rotates the files up to 'max_files'
|
|
|
|
/// if the 'compress' option was given it will compress the newest file
|
|
|
|
///
|
|
|
|
/// e.g. rotates
|
|
|
|
/// foo.2.zst => foo.3.zst
|
2020-10-28 09:58:00 +00:00
|
|
|
/// foo.1 => foo.2.zst
|
|
|
|
/// foo => foo.1
|
2020-10-20 08:32:15 +00:00
|
|
|
pub fn do_rotate(&mut self, options: CreateOptions, max_files: Option<usize>) -> Result<(), Error> {
|
2020-09-28 13:32:04 +00:00
|
|
|
let mut filenames: Vec<PathBuf> = self.file_names().collect();
|
|
|
|
if filenames.is_empty() {
|
|
|
|
return Ok(()); // no file means nothing to rotate
|
|
|
|
}
|
2020-11-04 16:15:11 +00:00
|
|
|
let count = filenames.len() + 1;
|
2020-09-28 13:32:04 +00:00
|
|
|
|
|
|
|
let mut next_filename = self.base_path.clone().canonicalize()?.into_os_string();
|
2020-10-28 09:58:00 +00:00
|
|
|
next_filename.push(format!(".{}", filenames.len()));
|
2020-11-04 16:15:11 +00:00
|
|
|
if self.compress && count > 2 {
|
2020-11-02 17:31:31 +00:00
|
|
|
next_filename.push(".zst");
|
|
|
|
}
|
2020-09-28 13:32:04 +00:00
|
|
|
|
|
|
|
filenames.push(PathBuf::from(next_filename));
|
|
|
|
|
log rotate: do NOT compress first rotation
The first rotation is normally the one still opened by one or more
processes for writing, so it must NOT be replaced, removed, ..., as
this then makes the remaining logging, until those processes are
noticed that they should reopen the logfile due to rotation, goes
into nirvana, which is far from ideal for a log.
Only rotating (renaming) is OK for this active file, as this does not
invalidates the file and keeps open FDs intact.
So start compressing with the second rotation, which should be clear
to use, as all writers must have been told to reopen the log during
the last rotation, reopen is a fast operation and normally triggered
at least day ago (at least if one did not dropped the state file
manually), so we are fine to archive that one for real.
If we plan to allow faster rotation the whole rotation+reopen should
be locked, so that we can guarantee that all writers switched over,
but this is unlikely to be needed.
Again, this is was logrotate sanely does by default since forever.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2020-10-20 08:26:28 +00:00
|
|
|
for i in (0..count-1).rev() {
|
2020-11-02 17:31:31 +00:00
|
|
|
if self.compress
|
2021-01-19 13:12:07 +00:00
|
|
|
&& filenames[i].extension() != Some(std::ffi::OsStr::new("zst"))
|
2021-01-19 13:04:46 +00:00
|
|
|
&& filenames[i+1].extension() == Some(std::ffi::OsStr::new("zst"))
|
2020-11-02 17:31:31 +00:00
|
|
|
{
|
|
|
|
Self::compress(&filenames[i], &filenames[i+1], &options)?;
|
|
|
|
} else {
|
|
|
|
rename(&filenames[i], &filenames[i+1])?;
|
2020-09-28 13:32:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(max_files) = max_files {
|
|
|
|
for file in filenames.iter().skip(max_files) {
|
|
|
|
if let Err(err) = unistd::unlink(file) {
|
|
|
|
eprintln!("could not remove {:?}: {}", &file, err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-10-20 08:32:15 +00:00
|
|
|
|
|
|
|
pub fn rotate(
|
|
|
|
&mut self,
|
|
|
|
max_size: u64,
|
|
|
|
options: Option<CreateOptions>,
|
|
|
|
max_files: Option<usize>
|
|
|
|
) -> Result<bool, Error> {
|
|
|
|
|
|
|
|
let options = match options {
|
|
|
|
Some(options) => options,
|
2021-09-01 10:21:51 +00:00
|
|
|
None => match self.owner.as_deref() {
|
|
|
|
Some(owner) => {
|
|
|
|
let user = crate::sys::query_user(owner)?
|
|
|
|
.ok_or_else(|| {
|
|
|
|
format_err!("failed to lookup owning user '{}' for logs", owner)
|
|
|
|
})?;
|
|
|
|
CreateOptions::new().owner(user.uid).group(user.gid)
|
|
|
|
}
|
|
|
|
None => CreateOptions::new(),
|
|
|
|
}
|
2020-10-20 08:32:15 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
let metadata = match self.base_path.metadata() {
|
|
|
|
Ok(metadata) => metadata,
|
|
|
|
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(false),
|
|
|
|
Err(err) => bail!("unable to open task archive - {}", err),
|
|
|
|
};
|
|
|
|
|
|
|
|
if metadata.len() > max_size {
|
|
|
|
self.do_rotate(options, max_files)?;
|
|
|
|
Ok(true)
|
|
|
|
} else {
|
|
|
|
Ok(false)
|
|
|
|
}
|
|
|
|
}
|
2020-09-28 13:32:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Iterator over logrotated file names
|
|
|
|
pub struct LogRotateFileNames {
|
|
|
|
base_path: PathBuf,
|
|
|
|
count: usize,
|
|
|
|
compress: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Iterator for LogRotateFileNames {
|
|
|
|
type Item = PathBuf;
|
|
|
|
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
if self.count > 0 {
|
|
|
|
let mut path: std::ffi::OsString = self.base_path.clone().into();
|
|
|
|
|
|
|
|
path.push(format!(".{}", self.count));
|
|
|
|
self.count += 1;
|
|
|
|
|
|
|
|
if Path::new(&path).is_file() {
|
|
|
|
Some(path.into())
|
|
|
|
} else if self.compress {
|
|
|
|
path.push(".zst");
|
|
|
|
if Path::new(&path).is_file() {
|
|
|
|
Some(path.into())
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
} else if self.base_path.is_file() {
|
|
|
|
self.count += 1;
|
|
|
|
Some(self.base_path.to_path_buf())
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Iterator over logrotated files by returning a boxed reader
|
|
|
|
pub struct LogRotateFiles {
|
|
|
|
file_names: LogRotateFileNames,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Iterator for LogRotateFiles {
|
|
|
|
type Item = Box<dyn Read + Send>;
|
|
|
|
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
let filename = self.file_names.next()?;
|
|
|
|
let file = File::open(&filename).ok()?;
|
|
|
|
|
2021-01-19 13:04:46 +00:00
|
|
|
if filename.extension() == Some(std::ffi::OsStr::new("zst")) {
|
2020-09-28 13:32:04 +00:00
|
|
|
let encoder = zstd::stream::read::Decoder::new(file).ok()?;
|
|
|
|
return Some(Box::new(encoder));
|
|
|
|
}
|
|
|
|
|
|
|
|
Some(Box::new(file))
|
|
|
|
}
|
|
|
|
}
|