split proxmox-file-restore into its own crate

This also moves a couple of required utilities such as
logrotate and some file descriptor methods to pbs-tools.

Note that the logrotate usage and run-dir handling should be
improved to work as a regular user as this *should* (IMHO)
be a regular unprivileged command (including running
qemu given the kvm privileges...)

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
This commit is contained in:
Wolfgang Bumiller
2021-09-01 12:21:51 +02:00
parent e5f9b7f79e
commit 6c76aa434d
23 changed files with 182 additions and 79 deletions

View File

@ -1,73 +0,0 @@
//! Provides a very basic "newc" format cpio encoder.
//! See 'man 5 cpio' for format details, as well as:
//! https://www.kernel.org/doc/html/latest/driver-api/early-userspace/buffer-format.html
//! This does not provide full support for the format, only what is needed to include files in an
//! initramfs intended for a linux kernel.
use anyhow::{bail, Error};
use std::ffi::{CString, CStr};
use tokio::io::{copy, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
/// Write a cpio file entry to an AsyncWrite.
pub async fn append_file<W: AsyncWrite + Unpin, R: AsyncRead + Unpin>(
mut target: W,
content: R,
name: &CStr,
inode: u16,
mode: u16,
uid: u16,
gid: u16,
// negative mtimes are generally valid, but cpio defines all fields as unsigned
mtime: u64,
// c_filesize has 8 bytes, but man page claims that 4 GB files are the maximum, let's be safe
size: u32,
) -> Result<(), Error> {
let name = name.to_bytes_with_nul();
target.write_all(b"070701").await?; // c_magic
print_cpio_hex(&mut target, inode as u64).await?; // c_ino
print_cpio_hex(&mut target, mode as u64).await?; // c_mode
print_cpio_hex(&mut target, uid as u64).await?; // c_uid
print_cpio_hex(&mut target, gid as u64).await?; // c_gid
print_cpio_hex(&mut target, 0).await?; // c_nlink
print_cpio_hex(&mut target, mtime as u64).await?; // c_mtime
print_cpio_hex(&mut target, size as u64).await?; // c_filesize
print_cpio_hex(&mut target, 0).await?; // c_devmajor
print_cpio_hex(&mut target, 0).await?; // c_devminor
print_cpio_hex(&mut target, 0).await?; // c_rdevmajor
print_cpio_hex(&mut target, 0).await?; // c_rdevminor
print_cpio_hex(&mut target, name.len() as u64).await?; // c_namesize
print_cpio_hex(&mut target, 0).await?; // c_check (ignored for newc)
target.write_all(name).await?;
let header_size = 6 + 8*13 + name.len();
let mut name_pad = header_size;
while name_pad & 3 != 0 {
target.write_u8(0).await?;
name_pad += 1;
}
let mut content = content.take(size as u64);
let copied = copy(&mut content, &mut target).await?;
if copied < size as u64 {
bail!("cpio: not enough data, or size to big - encoding invalid");
}
let mut data_pad = copied;
while data_pad & 3 != 0 {
target.write_u8(0).await?;
data_pad += 1;
}
Ok(())
}
/// Write the TRAILER!!! file to an AsyncWrite, signifying the end of a cpio archive. Note that you
/// can immediately add more files after, to create a concatenated archive, the kernel for example
/// will merge these upon loading an initramfs.
pub async fn append_trailer<W: AsyncWrite + Unpin>(target: W) -> Result<(), Error> {
let name = CString::new("TRAILER!!!").unwrap();
append_file(target, tokio::io::empty(), &name, 0, 0, 0, 0, 0, 0).await
}
async fn print_cpio_hex<W: AsyncWrite + Unpin>(target: &mut W, value: u64) -> Result<(), Error> {
target.write_all(format!("{:08x}", value).as_bytes()).await.map_err(|e| e.into())
}

View File

@ -1,214 +0,0 @@
use std::path::{Path, PathBuf};
use std::fs::{File, rename};
use std::os::unix::io::{FromRawFd, IntoRawFd};
use std::io::Read;
use anyhow::{bail, Error};
use nix::unistd;
use proxmox::tools::fs::{CreateOptions, make_tmp_file};
/// Used for rotating log files and iterating over them
pub struct LogRotate {
base_path: PathBuf,
compress: bool,
}
impl LogRotate {
/// Creates a new instance if the path given is a valid file name
/// (iow. does not end with ..)
/// 'compress' decides if compresses files will be created on
/// rotation, and if it will search '.zst' files when iterating
pub fn new<P: AsRef<Path>>(path: P, compress: bool) -> Option<Self> {
if path.as_ref().file_name().is_some() {
Some(Self {
base_path: path.as_ref().to_path_buf(),
compress,
})
} else {
None
}
}
/// Returns an iterator over the logrotated file names that exist
pub fn file_names(&self) -> LogRotateFileNames {
LogRotateFileNames {
base_path: self.base_path.clone(),
count: 0,
compress: self.compress
}
}
/// Returns an iterator over the logrotated file handles
pub fn files(&self) -> LogRotateFiles {
LogRotateFiles {
file_names: self.file_names(),
}
}
fn compress(source_path: &PathBuf, target_path: &PathBuf, options: &CreateOptions) -> Result<(), Error> {
let mut source = File::open(source_path)?;
let (fd, tmp_path) = make_tmp_file(target_path, options.clone())?;
let target = unsafe { File::from_raw_fd(fd.into_raw_fd()) };
let mut encoder = match zstd::stream::write::Encoder::new(target, 0) {
Ok(encoder) => encoder,
Err(err) => {
let _ = unistd::unlink(&tmp_path);
bail!("creating zstd encoder failed - {}", err);
}
};
if let Err(err) = std::io::copy(&mut source, &mut encoder) {
let _ = unistd::unlink(&tmp_path);
bail!("zstd encoding failed for file {:?} - {}", target_path, err);
}
if let Err(err) = encoder.finish() {
let _ = unistd::unlink(&tmp_path);
bail!("zstd finish failed for file {:?} - {}", target_path, err);
}
if let Err(err) = rename(&tmp_path, target_path) {
let _ = unistd::unlink(&tmp_path);
bail!("rename failed for file {:?} - {}", target_path, err);
}
if let Err(err) = unistd::unlink(source_path) {
bail!("unlink failed for file {:?} - {}", source_path, err);
}
Ok(())
}
/// Rotates the files up to 'max_files'
/// if the 'compress' option was given it will compress the newest file
///
/// e.g. rotates
/// foo.2.zst => foo.3.zst
/// foo.1 => foo.2.zst
/// foo => foo.1
pub fn do_rotate(&mut self, options: CreateOptions, max_files: Option<usize>) -> Result<(), Error> {
let mut filenames: Vec<PathBuf> = self.file_names().collect();
if filenames.is_empty() {
return Ok(()); // no file means nothing to rotate
}
let count = filenames.len() + 1;
let mut next_filename = self.base_path.clone().canonicalize()?.into_os_string();
next_filename.push(format!(".{}", filenames.len()));
if self.compress && count > 2 {
next_filename.push(".zst");
}
filenames.push(PathBuf::from(next_filename));
for i in (0..count-1).rev() {
if self.compress
&& filenames[i].extension() != Some(std::ffi::OsStr::new("zst"))
&& filenames[i+1].extension() == Some(std::ffi::OsStr::new("zst"))
{
Self::compress(&filenames[i], &filenames[i+1], &options)?;
} else {
rename(&filenames[i], &filenames[i+1])?;
}
}
if let Some(max_files) = max_files {
for file in filenames.iter().skip(max_files) {
if let Err(err) = unistd::unlink(file) {
eprintln!("could not remove {:?}: {}", &file, err);
}
}
}
Ok(())
}
pub fn rotate(
&mut self,
max_size: u64,
options: Option<CreateOptions>,
max_files: Option<usize>
) -> Result<bool, Error> {
let options = match options {
Some(options) => options,
None => {
let backup_user = crate::backup::backup_user()?;
CreateOptions::new().owner(backup_user.uid).group(backup_user.gid)
},
};
let metadata = match self.base_path.metadata() {
Ok(metadata) => metadata,
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(false),
Err(err) => bail!("unable to open task archive - {}", err),
};
if metadata.len() > max_size {
self.do_rotate(options, max_files)?;
Ok(true)
} else {
Ok(false)
}
}
}
/// Iterator over logrotated file names
pub struct LogRotateFileNames {
base_path: PathBuf,
count: usize,
compress: bool,
}
impl Iterator for LogRotateFileNames {
type Item = PathBuf;
fn next(&mut self) -> Option<Self::Item> {
if self.count > 0 {
let mut path: std::ffi::OsString = self.base_path.clone().into();
path.push(format!(".{}", self.count));
self.count += 1;
if Path::new(&path).is_file() {
Some(path.into())
} else if self.compress {
path.push(".zst");
if Path::new(&path).is_file() {
Some(path.into())
} else {
None
}
} else {
None
}
} else if self.base_path.is_file() {
self.count += 1;
Some(self.base_path.to_path_buf())
} else {
None
}
}
}
/// Iterator over logrotated files by returning a boxed reader
pub struct LogRotateFiles {
file_names: LogRotateFileNames,
}
impl Iterator for LogRotateFiles {
type Item = Box<dyn Read + Send>;
fn next(&mut self) -> Option<Self::Item> {
let filename = self.file_names.next()?;
let file = File::open(&filename).ok()?;
if filename.extension() == Some(std::ffi::OsStr::new("zst")) {
let encoder = zstd::stream::read::Decoder::new(file).ok()?;
return Some(Box::new(encoder));
}
Some(Box::new(file))
}
}

View File

@ -8,7 +8,6 @@ use anyhow::{bail, format_err, Error};
use openssl::hash::{hash, DigestBytes, MessageDigest};
pub use proxmox::tools::fd::Fd;
use proxmox::tools::fs::{create_path, CreateOptions};
use proxmox_http::{
client::SimpleHttp,
@ -27,14 +26,12 @@ pub mod apt;
pub mod async_io;
pub mod compression;
pub mod config;
pub mod cpio;
pub mod daemon;
pub mod disks;
mod memcom;
pub use memcom::Memcom;
pub mod logrotate;
pub mod serde_filter;
pub mod statistics;
pub mod subscription;
@ -211,16 +208,3 @@ pub fn setup_safe_path_env() {
std::env::remove_var(name);
}
}
/// Create the base run-directory.
///
/// This exists to fixate the permissions for the run *base* directory while allowing intermediate
/// directories after it to have different permissions.
pub fn create_run_dir() -> Result<(), Error> {
let backup_user = crate::backup::backup_user()?;
let opts = CreateOptions::new()
.owner(backup_user.uid)
.group(backup_user.gid);
let _: bool = create_path(pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR_M!(), None, Some(opts))?;
Ok(())
}