move client to pbs-client subcrate
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
This commit is contained in:
1079
pbs-client/src/pxar/create.rs
Normal file
1079
pbs-client/src/pxar/create.rs
Normal file
File diff suppressed because it is too large
Load Diff
162
pbs-client/src/pxar/dir_stack.rs
Normal file
162
pbs-client/src/pxar/dir_stack.rs
Normal file
@ -0,0 +1,162 @@
|
||||
use std::ffi::OsString;
|
||||
use std::os::unix::io::{AsRawFd, RawFd};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use nix::dir::Dir;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::{mkdirat, Mode};
|
||||
|
||||
use proxmox::sys::error::SysError;
|
||||
use proxmox::tools::fd::BorrowedFd;
|
||||
use pxar::Metadata;
|
||||
|
||||
use crate::pxar::tools::{assert_single_path_component, perms_from_metadata};
|
||||
|
||||
pub struct PxarDir {
|
||||
file_name: OsString,
|
||||
metadata: Metadata,
|
||||
dir: Option<Dir>,
|
||||
}
|
||||
|
||||
impl PxarDir {
|
||||
pub fn new(file_name: OsString, metadata: Metadata) -> Self {
|
||||
Self {
|
||||
file_name,
|
||||
metadata,
|
||||
dir: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_dir(dir: Dir, metadata: Metadata) -> Self {
|
||||
Self {
|
||||
file_name: OsString::from("."),
|
||||
metadata,
|
||||
dir: Some(dir),
|
||||
}
|
||||
}
|
||||
|
||||
fn create_dir(
|
||||
&mut self,
|
||||
parent: RawFd,
|
||||
allow_existing_dirs: bool,
|
||||
) -> Result<BorrowedFd, Error> {
|
||||
match mkdirat(
|
||||
parent,
|
||||
self.file_name.as_os_str(),
|
||||
perms_from_metadata(&self.metadata)?,
|
||||
) {
|
||||
Ok(()) => (),
|
||||
Err(err) => {
|
||||
if !(allow_existing_dirs && err.already_exists()) {
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.open_dir(parent)
|
||||
}
|
||||
|
||||
fn open_dir(&mut self, parent: RawFd) -> Result<BorrowedFd, Error> {
|
||||
let dir = Dir::openat(
|
||||
parent,
|
||||
self.file_name.as_os_str(),
|
||||
OFlag::O_DIRECTORY,
|
||||
Mode::empty(),
|
||||
)?;
|
||||
|
||||
let fd = BorrowedFd::new(&dir);
|
||||
self.dir = Some(dir);
|
||||
|
||||
Ok(fd)
|
||||
}
|
||||
|
||||
pub fn try_as_borrowed_fd(&self) -> Option<BorrowedFd> {
|
||||
self.dir.as_ref().map(BorrowedFd::new)
|
||||
}
|
||||
|
||||
pub fn metadata(&self) -> &Metadata {
|
||||
&self.metadata
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PxarDirStack {
|
||||
dirs: Vec<PxarDir>,
|
||||
path: PathBuf,
|
||||
created: usize,
|
||||
}
|
||||
|
||||
impl PxarDirStack {
|
||||
pub fn new(root: Dir, metadata: Metadata) -> Self {
|
||||
Self {
|
||||
dirs: vec![PxarDir::with_dir(root, metadata)],
|
||||
path: PathBuf::from("/"),
|
||||
created: 1, // the root directory exists
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.dirs.is_empty()
|
||||
}
|
||||
|
||||
pub fn push(&mut self, file_name: OsString, metadata: Metadata) -> Result<(), Error> {
|
||||
assert_single_path_component(&file_name)?;
|
||||
self.path.push(&file_name);
|
||||
self.dirs.push(PxarDir::new(file_name, metadata));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn pop(&mut self) -> Result<Option<PxarDir>, Error> {
|
||||
let out = self.dirs.pop();
|
||||
if !self.path.pop() {
|
||||
if self.path.as_os_str() == "/" {
|
||||
// we just finished the root directory, make sure this can only happen once:
|
||||
self.path = PathBuf::new();
|
||||
} else {
|
||||
bail!("lost track of path");
|
||||
}
|
||||
}
|
||||
self.created = self.created.min(self.dirs.len());
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
pub fn last_dir_fd(&mut self, allow_existing_dirs: bool) -> Result<BorrowedFd, Error> {
|
||||
// should not be possible given the way we use it:
|
||||
assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
|
||||
|
||||
let dirs_len = self.dirs.len();
|
||||
let mut fd = self.dirs[self.created - 1]
|
||||
.try_as_borrowed_fd()
|
||||
.ok_or_else(|| format_err!("lost track of directory file descriptors"))?
|
||||
.as_raw_fd();
|
||||
|
||||
while self.created < dirs_len {
|
||||
fd = self.dirs[self.created]
|
||||
.create_dir(fd, allow_existing_dirs)?
|
||||
.as_raw_fd();
|
||||
self.created += 1;
|
||||
}
|
||||
|
||||
self.dirs[self.created - 1]
|
||||
.try_as_borrowed_fd()
|
||||
.ok_or_else(|| format_err!("lost track of directory file descriptors"))
|
||||
}
|
||||
|
||||
pub fn create_last_dir(&mut self, allow_existing_dirs: bool) -> Result<(), Error> {
|
||||
let _: BorrowedFd = self.last_dir_fd(allow_existing_dirs)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn root_dir_fd(&self) -> Result<BorrowedFd, Error> {
|
||||
// should not be possible given the way we use it:
|
||||
assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
|
||||
|
||||
self.dirs[0]
|
||||
.try_as_borrowed_fd()
|
||||
.ok_or_else(|| format_err!("lost track of directory file descriptors"))
|
||||
}
|
||||
|
||||
pub fn path(&self) -> &Path {
|
||||
&self.path
|
||||
}
|
||||
}
|
864
pbs-client/src/pxar/extract.rs
Normal file
864
pbs-client/src/pxar/extract.rs
Normal file
@ -0,0 +1,864 @@
|
||||
//! Code for extraction of pxar contents onto the file system.
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::{CStr, CString, OsStr, OsString};
|
||||
use std::io;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::pin::Pin;
|
||||
|
||||
use futures::future::Future;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use nix::dir::Dir;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
use pathpatterns::{MatchEntry, MatchList, MatchType};
|
||||
use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
|
||||
use pxar::decoder::aio::Decoder;
|
||||
use pxar::format::Device;
|
||||
use pxar::{Entry, EntryKind, Metadata};
|
||||
|
||||
use proxmox::c_result;
|
||||
use proxmox::tools::{
|
||||
fs::{create_path, CreateOptions},
|
||||
io::{sparse_copy, sparse_copy_async},
|
||||
};
|
||||
|
||||
use pbs_tools::zip::{ZipEncoder, ZipEntry};
|
||||
|
||||
use crate::pxar::dir_stack::PxarDirStack;
|
||||
use crate::pxar::metadata;
|
||||
use crate::pxar::Flags;
|
||||
|
||||
pub struct PxarExtractOptions<'a> {
|
||||
pub match_list: &'a[MatchEntry],
|
||||
pub extract_match_default: bool,
|
||||
pub allow_existing_dirs: bool,
|
||||
pub on_error: Option<ErrorHandler>,
|
||||
}
|
||||
|
||||
pub type ErrorHandler = Box<dyn FnMut(Error) -> Result<(), Error> + Send>;
|
||||
|
||||
pub fn extract_archive<T, F>(
|
||||
mut decoder: pxar::decoder::Decoder<T>,
|
||||
destination: &Path,
|
||||
feature_flags: Flags,
|
||||
mut callback: F,
|
||||
options: PxarExtractOptions,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: pxar::decoder::SeqRead,
|
||||
F: FnMut(&Path),
|
||||
{
|
||||
// we use this to keep track of our directory-traversal
|
||||
decoder.enable_goodbye_entries(true);
|
||||
|
||||
let root = decoder
|
||||
.next()
|
||||
.ok_or_else(|| format_err!("found empty pxar archive"))?
|
||||
.map_err(|err| format_err!("error reading pxar archive: {}", err))?;
|
||||
|
||||
if !root.is_dir() {
|
||||
bail!("pxar archive does not start with a directory entry!");
|
||||
}
|
||||
|
||||
create_path(
|
||||
&destination,
|
||||
None,
|
||||
Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
|
||||
)
|
||||
.map_err(|err| format_err!("error creating directory {:?}: {}", destination, err))?;
|
||||
|
||||
let dir = Dir::open(
|
||||
destination,
|
||||
OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
|
||||
Mode::empty(),
|
||||
)
|
||||
.map_err(|err| format_err!("unable to open target directory {:?}: {}", destination, err,))?;
|
||||
|
||||
let mut extractor = Extractor::new(
|
||||
dir,
|
||||
root.metadata().clone(),
|
||||
options.allow_existing_dirs,
|
||||
feature_flags,
|
||||
);
|
||||
|
||||
if let Some(on_error) = options.on_error {
|
||||
extractor.on_error(on_error);
|
||||
}
|
||||
|
||||
let mut match_stack = Vec::new();
|
||||
let mut err_path_stack = vec![OsString::from("/")];
|
||||
let mut current_match = options.extract_match_default;
|
||||
while let Some(entry) = decoder.next() {
|
||||
let entry = entry.map_err(|err| format_err!("error reading pxar archive: {}", err))?;
|
||||
|
||||
let file_name_os = entry.file_name();
|
||||
|
||||
// safety check: a file entry in an archive must never contain slashes:
|
||||
if file_name_os.as_bytes().contains(&b'/') {
|
||||
bail!("archive file entry contains slashes, which is invalid and a security concern");
|
||||
}
|
||||
|
||||
let file_name = CString::new(file_name_os.as_bytes())
|
||||
.map_err(|_| format_err!("encountered file name with null-bytes"))?;
|
||||
|
||||
let metadata = entry.metadata();
|
||||
|
||||
extractor.set_path(entry.path().as_os_str().to_owned());
|
||||
|
||||
let match_result = options.match_list.matches(
|
||||
entry.path().as_os_str().as_bytes(),
|
||||
Some(metadata.file_type() as u32),
|
||||
);
|
||||
|
||||
let did_match = match match_result {
|
||||
Some(MatchType::Include) => true,
|
||||
Some(MatchType::Exclude) => false,
|
||||
None => current_match,
|
||||
};
|
||||
match (did_match, entry.kind()) {
|
||||
(_, EntryKind::Directory) => {
|
||||
callback(entry.path());
|
||||
|
||||
let create = current_match && match_result != Some(MatchType::Exclude);
|
||||
extractor
|
||||
.enter_directory(file_name_os.to_owned(), metadata.clone(), create)
|
||||
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
|
||||
|
||||
// We're starting a new directory, push our old matching state and replace it with
|
||||
// our new one:
|
||||
match_stack.push(current_match);
|
||||
current_match = did_match;
|
||||
|
||||
// When we hit the goodbye table we'll try to apply metadata to the directory, but
|
||||
// the Goodbye entry will not contain the path, so push it to our path stack for
|
||||
// error messages:
|
||||
err_path_stack.push(extractor.clone_path());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
(_, EntryKind::GoodbyeTable) => {
|
||||
// go up a directory
|
||||
|
||||
extractor.set_path(err_path_stack.pop().ok_or_else(|| {
|
||||
format_err!(
|
||||
"error at entry {:?}: unexpected end of directory",
|
||||
file_name_os
|
||||
)
|
||||
})?);
|
||||
|
||||
extractor
|
||||
.leave_directory()
|
||||
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
|
||||
|
||||
// We left a directory, also get back our previous matching state. This is in sync
|
||||
// with `dir_stack` so this should never be empty except for the final goodbye
|
||||
// table, in which case we get back to the default of `true`.
|
||||
current_match = match_stack.pop().unwrap_or(true);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
(true, EntryKind::Symlink(link)) => {
|
||||
callback(entry.path());
|
||||
extractor.extract_symlink(&file_name, metadata, link.as_ref())
|
||||
}
|
||||
(true, EntryKind::Hardlink(link)) => {
|
||||
callback(entry.path());
|
||||
extractor.extract_hardlink(&file_name, link.as_os_str())
|
||||
}
|
||||
(true, EntryKind::Device(dev)) => {
|
||||
if extractor.contains_flags(Flags::WITH_DEVICE_NODES) {
|
||||
callback(entry.path());
|
||||
extractor.extract_device(&file_name, metadata, dev)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
(true, EntryKind::Fifo) => {
|
||||
if extractor.contains_flags(Flags::WITH_FIFOS) {
|
||||
callback(entry.path());
|
||||
extractor.extract_special(&file_name, metadata, 0)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
(true, EntryKind::Socket) => {
|
||||
if extractor.contains_flags(Flags::WITH_SOCKETS) {
|
||||
callback(entry.path());
|
||||
extractor.extract_special(&file_name, metadata, 0)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
(true, EntryKind::File { size, .. }) => extractor.extract_file(
|
||||
&file_name,
|
||||
metadata,
|
||||
*size,
|
||||
&mut decoder.contents().ok_or_else(|| {
|
||||
format_err!("found regular file entry without contents in archive")
|
||||
})?,
|
||||
),
|
||||
(false, _) => Ok(()), // skip this
|
||||
}
|
||||
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
|
||||
}
|
||||
|
||||
if !extractor.dir_stack.is_empty() {
|
||||
bail!("unexpected eof while decoding pxar archive");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Common state for file extraction.
|
||||
pub struct Extractor {
|
||||
feature_flags: Flags,
|
||||
allow_existing_dirs: bool,
|
||||
dir_stack: PxarDirStack,
|
||||
|
||||
/// For better error output we need to track the current path in the Extractor state.
|
||||
current_path: Arc<Mutex<OsString>>,
|
||||
|
||||
/// Error callback. Includes `current_path` in the reformatted error, should return `Ok` to
|
||||
/// continue extracting or the passed error as `Err` to bail out.
|
||||
on_error: ErrorHandler,
|
||||
}
|
||||
|
||||
impl Extractor {
|
||||
/// Create a new extractor state for a target directory.
|
||||
pub fn new(
|
||||
root_dir: Dir,
|
||||
metadata: Metadata,
|
||||
allow_existing_dirs: bool,
|
||||
feature_flags: Flags,
|
||||
) -> Self {
|
||||
Self {
|
||||
dir_stack: PxarDirStack::new(root_dir, metadata),
|
||||
allow_existing_dirs,
|
||||
feature_flags,
|
||||
current_path: Arc::new(Mutex::new(OsString::new())),
|
||||
on_error: Box::new(Err),
|
||||
}
|
||||
}
|
||||
|
||||
/// We call this on errors. The error will be reformatted to include `current_path`. The
|
||||
/// callback should decide whether this error was fatal (simply return it) to bail out early,
|
||||
/// or log/remember/accumulate errors somewhere and return `Ok(())` in its place to continue
|
||||
/// extracting.
|
||||
pub fn on_error(&mut self, mut on_error: Box<dyn FnMut(Error) -> Result<(), Error> + Send>) {
|
||||
let path = Arc::clone(&self.current_path);
|
||||
self.on_error = Box::new(move |err: Error| -> Result<(), Error> {
|
||||
on_error(format_err!("error at {:?}: {}", path.lock().unwrap(), err))
|
||||
});
|
||||
}
|
||||
|
||||
pub fn set_path(&mut self, path: OsString) {
|
||||
*self.current_path.lock().unwrap() = path;
|
||||
}
|
||||
|
||||
pub fn clone_path(&self) -> OsString {
|
||||
self.current_path.lock().unwrap().clone()
|
||||
}
|
||||
|
||||
/// When encountering a directory during extraction, this is used to keep track of it. If
|
||||
/// `create` is true it is immediately created and its metadata will be updated once we leave
|
||||
/// it. If `create` is false it will only be created if it is going to have any actual content.
|
||||
pub fn enter_directory(
|
||||
&mut self,
|
||||
file_name: OsString,
|
||||
metadata: Metadata,
|
||||
create: bool,
|
||||
) -> Result<(), Error> {
|
||||
self.dir_stack.push(file_name, metadata)?;
|
||||
|
||||
if create {
|
||||
self.dir_stack.create_last_dir(self.allow_existing_dirs)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When done with a directory we can apply its metadata if it has been created.
|
||||
pub fn leave_directory(&mut self) -> Result<(), Error> {
|
||||
let path_info = self.dir_stack.path().to_owned();
|
||||
|
||||
let dir = self
|
||||
.dir_stack
|
||||
.pop()
|
||||
.map_err(|err| format_err!("unexpected end of directory entry: {}", err))?
|
||||
.ok_or_else(|| format_err!("broken pxar archive (directory stack underrun)"))?;
|
||||
|
||||
if let Some(fd) = dir.try_as_borrowed_fd() {
|
||||
metadata::apply(
|
||||
self.feature_flags,
|
||||
dir.metadata(),
|
||||
fd.as_raw_fd(),
|
||||
&path_info,
|
||||
&mut self.on_error,
|
||||
)
|
||||
.map_err(|err| format_err!("failed to apply directory metadata: {}", err))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn contains_flags(&self, flag: Flags) -> bool {
|
||||
self.feature_flags.contains(flag)
|
||||
}
|
||||
|
||||
fn parent_fd(&mut self) -> Result<RawFd, Error> {
|
||||
self.dir_stack
|
||||
.last_dir_fd(self.allow_existing_dirs)
|
||||
.map(|d| d.as_raw_fd())
|
||||
.map_err(|err| format_err!("failed to get parent directory file descriptor: {}", err))
|
||||
}
|
||||
|
||||
pub fn extract_symlink(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
metadata: &Metadata,
|
||||
link: &OsStr,
|
||||
) -> Result<(), Error> {
|
||||
let parent = self.parent_fd()?;
|
||||
nix::unistd::symlinkat(link, Some(parent), file_name)?;
|
||||
metadata::apply_at(
|
||||
self.feature_flags,
|
||||
metadata,
|
||||
parent,
|
||||
file_name,
|
||||
self.dir_stack.path(),
|
||||
&mut self.on_error,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn extract_hardlink(&mut self, file_name: &CStr, link: &OsStr) -> Result<(), Error> {
|
||||
crate::pxar::tools::assert_relative_path(link)?;
|
||||
|
||||
let parent = self.parent_fd()?;
|
||||
let root = self.dir_stack.root_dir_fd()?;
|
||||
let target = CString::new(link.as_bytes())?;
|
||||
nix::unistd::linkat(
|
||||
Some(root.as_raw_fd()),
|
||||
target.as_c_str(),
|
||||
Some(parent),
|
||||
file_name,
|
||||
nix::unistd::LinkatFlags::NoSymlinkFollow,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn extract_device(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
metadata: &Metadata,
|
||||
device: &Device,
|
||||
) -> Result<(), Error> {
|
||||
self.extract_special(file_name, metadata, device.to_dev_t())
|
||||
}
|
||||
|
||||
pub fn extract_special(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
metadata: &Metadata,
|
||||
device: libc::dev_t,
|
||||
) -> Result<(), Error> {
|
||||
let mode = metadata.stat.mode;
|
||||
let mode = u32::try_from(mode).map_err(|_| {
|
||||
format_err!(
|
||||
"device node's mode contains illegal bits: 0x{:x} (0o{:o})",
|
||||
mode,
|
||||
mode,
|
||||
)
|
||||
})?;
|
||||
let parent = self.parent_fd()?;
|
||||
unsafe { c_result!(libc::mknodat(parent, file_name.as_ptr(), mode, device)) }
|
||||
.map_err(|err| format_err!("failed to create device node: {}", err))?;
|
||||
|
||||
metadata::apply_at(
|
||||
self.feature_flags,
|
||||
metadata,
|
||||
parent,
|
||||
file_name,
|
||||
self.dir_stack.path(),
|
||||
&mut self.on_error,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn extract_file(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
metadata: &Metadata,
|
||||
size: u64,
|
||||
contents: &mut dyn io::Read,
|
||||
) -> Result<(), Error> {
|
||||
let parent = self.parent_fd()?;
|
||||
let mut file = unsafe {
|
||||
std::fs::File::from_raw_fd(
|
||||
nix::fcntl::openat(
|
||||
parent,
|
||||
file_name,
|
||||
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
||||
Mode::from_bits(0o600).unwrap(),
|
||||
)
|
||||
.map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?,
|
||||
)
|
||||
};
|
||||
|
||||
metadata::apply_initial_flags(
|
||||
self.feature_flags,
|
||||
metadata,
|
||||
file.as_raw_fd(),
|
||||
&mut self.on_error,
|
||||
)
|
||||
.map_err(|err| format_err!("failed to apply initial flags: {}", err))?;
|
||||
|
||||
let result = sparse_copy(&mut *contents, &mut file)
|
||||
.map_err(|err| format_err!("failed to copy file contents: {}", err))?;
|
||||
|
||||
if size != result.written {
|
||||
bail!(
|
||||
"extracted {} bytes of a file of {} bytes",
|
||||
result.written,
|
||||
size
|
||||
);
|
||||
}
|
||||
|
||||
if result.seeked_last {
|
||||
while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
|
||||
Ok(_) => false,
|
||||
Err(nix::Error::Sys(errno)) if errno == nix::errno::Errno::EINTR => true,
|
||||
Err(err) => bail!("error setting file size: {}", err),
|
||||
} {}
|
||||
}
|
||||
|
||||
metadata::apply(
|
||||
self.feature_flags,
|
||||
metadata,
|
||||
file.as_raw_fd(),
|
||||
self.dir_stack.path(),
|
||||
&mut self.on_error,
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn async_extract_file<T: tokio::io::AsyncRead + Unpin>(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
metadata: &Metadata,
|
||||
size: u64,
|
||||
contents: &mut T,
|
||||
) -> Result<(), Error> {
|
||||
let parent = self.parent_fd()?;
|
||||
let mut file = tokio::fs::File::from_std(unsafe {
|
||||
std::fs::File::from_raw_fd(
|
||||
nix::fcntl::openat(
|
||||
parent,
|
||||
file_name,
|
||||
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
||||
Mode::from_bits(0o600).unwrap(),
|
||||
)
|
||||
.map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?,
|
||||
)
|
||||
});
|
||||
|
||||
metadata::apply_initial_flags(
|
||||
self.feature_flags,
|
||||
metadata,
|
||||
file.as_raw_fd(),
|
||||
&mut self.on_error,
|
||||
)
|
||||
.map_err(|err| format_err!("failed to apply initial flags: {}", err))?;
|
||||
|
||||
let result = sparse_copy_async(&mut *contents, &mut file)
|
||||
.await
|
||||
.map_err(|err| format_err!("failed to copy file contents: {}", err))?;
|
||||
|
||||
if size != result.written {
|
||||
bail!(
|
||||
"extracted {} bytes of a file of {} bytes",
|
||||
result.written,
|
||||
size
|
||||
);
|
||||
}
|
||||
|
||||
if result.seeked_last {
|
||||
while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
|
||||
Ok(_) => false,
|
||||
Err(nix::Error::Sys(errno)) if errno == nix::errno::Errno::EINTR => true,
|
||||
Err(err) => bail!("error setting file size: {}", err),
|
||||
} {}
|
||||
}
|
||||
|
||||
metadata::apply(
|
||||
self.feature_flags,
|
||||
metadata,
|
||||
file.as_raw_fd(),
|
||||
self.dir_stack.path(),
|
||||
&mut self.on_error,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn create_zip<T, W, P>(
|
||||
output: W,
|
||||
decoder: Accessor<T>,
|
||||
path: P,
|
||||
verbose: bool,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
|
||||
W: tokio::io::AsyncWrite + Unpin + Send + 'static,
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let root = decoder.open_root().await?;
|
||||
let file = root
|
||||
.lookup(&path).await?
|
||||
.ok_or(format_err!("error opening '{:?}'", path.as_ref()))?;
|
||||
|
||||
let mut prefix = PathBuf::new();
|
||||
let mut components = file.entry().path().components();
|
||||
components.next_back(); // discar last
|
||||
for comp in components {
|
||||
prefix.push(comp);
|
||||
}
|
||||
|
||||
let mut zipencoder = ZipEncoder::new(output);
|
||||
let mut decoder = decoder;
|
||||
recurse_files_zip(&mut zipencoder, &mut decoder, &prefix, file, verbose)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
eprintln!("error during creating of zip: {}", err);
|
||||
err
|
||||
})?;
|
||||
|
||||
zipencoder
|
||||
.finish()
|
||||
.await
|
||||
.map_err(|err| {
|
||||
eprintln!("error during finishing of zip: {}", err);
|
||||
err
|
||||
})
|
||||
}
|
||||
|
||||
fn recurse_files_zip<'a, T, W>(
|
||||
zip: &'a mut ZipEncoder<W>,
|
||||
decoder: &'a mut Accessor<T>,
|
||||
prefix: &'a Path,
|
||||
file: FileEntry<T>,
|
||||
verbose: bool,
|
||||
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
|
||||
where
|
||||
T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
|
||||
W: tokio::io::AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
Box::pin(async move {
|
||||
let metadata = file.entry().metadata();
|
||||
let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
|
||||
|
||||
match file.kind() {
|
||||
EntryKind::File { .. } => {
|
||||
if verbose {
|
||||
eprintln!("adding '{}' to zip", path.display());
|
||||
}
|
||||
let entry = ZipEntry::new(
|
||||
path,
|
||||
metadata.stat.mtime.secs,
|
||||
metadata.stat.mode as u16,
|
||||
true,
|
||||
);
|
||||
zip.add_entry(entry, Some(file.contents().await?))
|
||||
.await
|
||||
.map_err(|err| format_err!("could not send file entry: {}", err))?;
|
||||
}
|
||||
EntryKind::Hardlink(_) => {
|
||||
let realfile = decoder.follow_hardlink(&file).await?;
|
||||
if verbose {
|
||||
eprintln!("adding '{}' to zip", path.display());
|
||||
}
|
||||
let entry = ZipEntry::new(
|
||||
path,
|
||||
metadata.stat.mtime.secs,
|
||||
metadata.stat.mode as u16,
|
||||
true,
|
||||
);
|
||||
zip.add_entry(entry, Some(realfile.contents().await?))
|
||||
.await
|
||||
.map_err(|err| format_err!("could not send file entry: {}", err))?;
|
||||
}
|
||||
EntryKind::Directory => {
|
||||
let dir = file.enter_directory().await?;
|
||||
let mut readdir = dir.read_dir();
|
||||
if verbose {
|
||||
eprintln!("adding '{}' to zip", path.display());
|
||||
}
|
||||
let entry = ZipEntry::new(
|
||||
path,
|
||||
metadata.stat.mtime.secs,
|
||||
metadata.stat.mode as u16,
|
||||
false,
|
||||
);
|
||||
zip.add_entry::<FileContents<T>>(entry, None).await?;
|
||||
while let Some(entry) = readdir.next().await {
|
||||
let entry = entry?.decode_entry().await?;
|
||||
recurse_files_zip(zip, decoder, prefix, entry, verbose).await?;
|
||||
}
|
||||
}
|
||||
_ => {} // ignore all else
|
||||
};
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn get_extractor<DEST>(destination: DEST, metadata: Metadata) -> Result<Extractor, Error>
|
||||
where
|
||||
DEST: AsRef<Path>,
|
||||
{
|
||||
create_path(
|
||||
&destination,
|
||||
None,
|
||||
Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
|
||||
)
|
||||
.map_err(|err| {
|
||||
format_err!(
|
||||
"error creating directory {:?}: {}",
|
||||
destination.as_ref(),
|
||||
err
|
||||
)
|
||||
})?;
|
||||
|
||||
let dir = Dir::open(
|
||||
destination.as_ref(),
|
||||
OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
|
||||
Mode::empty(),
|
||||
)
|
||||
.map_err(|err| {
|
||||
format_err!(
|
||||
"unable to open target directory {:?}: {}",
|
||||
destination.as_ref(),
|
||||
err,
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(Extractor::new(dir, metadata, false, Flags::DEFAULT))
|
||||
}
|
||||
|
||||
pub async fn extract_sub_dir<T, DEST, PATH>(
|
||||
destination: DEST,
|
||||
decoder: Accessor<T>,
|
||||
path: PATH,
|
||||
verbose: bool,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
|
||||
DEST: AsRef<Path>,
|
||||
PATH: AsRef<Path>,
|
||||
{
|
||||
let root = decoder.open_root().await?;
|
||||
|
||||
let mut extractor = get_extractor(
|
||||
destination,
|
||||
root.lookup_self().await?.entry().metadata().clone(),
|
||||
)?;
|
||||
|
||||
let file = root
|
||||
.lookup(&path)
|
||||
.await?
|
||||
.ok_or(format_err!("error opening '{:?}'", path.as_ref()))?;
|
||||
|
||||
recurse_files_extractor(&mut extractor, file, verbose).await
|
||||
}
|
||||
|
||||
pub async fn extract_sub_dir_seq<S, DEST>(
|
||||
destination: DEST,
|
||||
mut decoder: Decoder<S>,
|
||||
verbose: bool,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: pxar::decoder::SeqRead + Unpin + Send + 'static,
|
||||
DEST: AsRef<Path>,
|
||||
{
|
||||
decoder.enable_goodbye_entries(true);
|
||||
let root = match decoder.next().await {
|
||||
Some(Ok(root)) => root,
|
||||
Some(Err(err)) => bail!("error getting root entry from pxar: {}", err),
|
||||
None => bail!("cannot extract empty archive"),
|
||||
};
|
||||
|
||||
let mut extractor = get_extractor(destination, root.metadata().clone())?;
|
||||
|
||||
if let Err(err) = seq_files_extractor(&mut extractor, decoder, verbose).await {
|
||||
eprintln!("error extracting pxar archive: {}", err);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn extract_special(
|
||||
extractor: &mut Extractor,
|
||||
entry: &Entry,
|
||||
file_name: &CStr,
|
||||
) -> Result<(), Error> {
|
||||
let metadata = entry.metadata();
|
||||
match entry.kind() {
|
||||
EntryKind::Symlink(link) => {
|
||||
extractor.extract_symlink(file_name, metadata, link.as_ref())?;
|
||||
}
|
||||
EntryKind::Hardlink(link) => {
|
||||
extractor.extract_hardlink(file_name, link.as_os_str())?;
|
||||
}
|
||||
EntryKind::Device(dev) => {
|
||||
if extractor.contains_flags(Flags::WITH_DEVICE_NODES) {
|
||||
extractor.extract_device(file_name, metadata, dev)?;
|
||||
}
|
||||
}
|
||||
EntryKind::Fifo => {
|
||||
if extractor.contains_flags(Flags::WITH_FIFOS) {
|
||||
extractor.extract_special(file_name, metadata, 0)?;
|
||||
}
|
||||
}
|
||||
EntryKind::Socket => {
|
||||
if extractor.contains_flags(Flags::WITH_SOCKETS) {
|
||||
extractor.extract_special(file_name, metadata, 0)?;
|
||||
}
|
||||
}
|
||||
_ => bail!("extract_special used with unsupported entry kind"),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_filename(entry: &Entry) -> Result<(OsString, CString), Error> {
|
||||
let file_name_os = entry.file_name().to_owned();
|
||||
|
||||
// safety check: a file entry in an archive must never contain slashes:
|
||||
if file_name_os.as_bytes().contains(&b'/') {
|
||||
bail!("archive file entry contains slashes, which is invalid and a security concern");
|
||||
}
|
||||
|
||||
let file_name = CString::new(file_name_os.as_bytes())
|
||||
.map_err(|_| format_err!("encountered file name with null-bytes"))?;
|
||||
|
||||
Ok((file_name_os, file_name))
|
||||
}
|
||||
|
||||
async fn recurse_files_extractor<'a, T>(
|
||||
extractor: &'a mut Extractor,
|
||||
file: FileEntry<T>,
|
||||
verbose: bool,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
|
||||
{
|
||||
let entry = file.entry();
|
||||
let metadata = entry.metadata();
|
||||
let (file_name_os, file_name) = get_filename(entry)?;
|
||||
|
||||
if verbose {
|
||||
eprintln!("extracting: {}", file.path().display());
|
||||
}
|
||||
|
||||
match file.kind() {
|
||||
EntryKind::Directory => {
|
||||
extractor
|
||||
.enter_directory(file_name_os.to_owned(), metadata.clone(), true)
|
||||
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
|
||||
|
||||
let dir = file.enter_directory().await?;
|
||||
let mut seq_decoder = dir.decode_full().await?;
|
||||
seq_decoder.enable_goodbye_entries(true);
|
||||
seq_files_extractor(extractor, seq_decoder, verbose).await?;
|
||||
extractor.leave_directory()?;
|
||||
}
|
||||
EntryKind::File { size, .. } => {
|
||||
extractor
|
||||
.async_extract_file(
|
||||
&file_name,
|
||||
metadata,
|
||||
*size,
|
||||
&mut file.contents().await.map_err(|_| {
|
||||
format_err!("found regular file entry without contents in archive")
|
||||
})?,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
EntryKind::GoodbyeTable => {} // ignore
|
||||
_ => extract_special(extractor, entry, &file_name)?,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn seq_files_extractor<'a, T>(
|
||||
extractor: &'a mut Extractor,
|
||||
mut decoder: pxar::decoder::aio::Decoder<T>,
|
||||
verbose: bool,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: pxar::decoder::SeqRead,
|
||||
{
|
||||
let mut dir_level = 0;
|
||||
loop {
|
||||
let entry = match decoder.next().await {
|
||||
Some(entry) => entry?,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
let metadata = entry.metadata();
|
||||
let (file_name_os, file_name) = get_filename(&entry)?;
|
||||
|
||||
if verbose && !matches!(entry.kind(), EntryKind::GoodbyeTable) {
|
||||
eprintln!("extracting: {}", entry.path().display());
|
||||
}
|
||||
|
||||
if let Err(err) = async {
|
||||
match entry.kind() {
|
||||
EntryKind::Directory => {
|
||||
dir_level += 1;
|
||||
extractor
|
||||
.enter_directory(file_name_os.to_owned(), metadata.clone(), true)
|
||||
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
|
||||
}
|
||||
EntryKind::File { size, .. } => {
|
||||
extractor
|
||||
.async_extract_file(
|
||||
&file_name,
|
||||
metadata,
|
||||
*size,
|
||||
&mut decoder.contents().ok_or_else(|| {
|
||||
format_err!("found regular file entry without contents in archive")
|
||||
})?,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
EntryKind::GoodbyeTable => {
|
||||
dir_level -= 1;
|
||||
extractor.leave_directory()?;
|
||||
}
|
||||
_ => extract_special(extractor, &entry, &file_name)?,
|
||||
}
|
||||
Ok(()) as Result<(), Error>
|
||||
}
|
||||
.await
|
||||
{
|
||||
let display = entry.path().display().to_string();
|
||||
eprintln!(
|
||||
"error extracting {}: {}",
|
||||
if matches!(entry.kind(), EntryKind::GoodbyeTable) {
|
||||
"<directory>"
|
||||
} else {
|
||||
&display
|
||||
},
|
||||
err
|
||||
);
|
||||
}
|
||||
|
||||
if dir_level < 0 {
|
||||
// we've encountered one Goodbye more then Directory, meaning we've left the dir we
|
||||
// started in - exit early, otherwise the extractor might panic
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
378
pbs-client/src/pxar/flags.rs
Normal file
378
pbs-client/src/pxar/flags.rs
Normal file
@ -0,0 +1,378 @@
|
||||
//! Feature flags for *pxar* allow to control what is stored/restored in/from the
|
||||
//! archive.
|
||||
//! Flags for known supported features for a given filesystem can be derived
|
||||
//! from the superblocks magic number.
|
||||
|
||||
use libc::c_long;
|
||||
|
||||
use bitflags::bitflags;
|
||||
|
||||
bitflags! {
|
||||
pub struct Flags: u64 {
|
||||
/// FAT-style 2s time granularity
|
||||
const WITH_2SEC_TIME = 0x40;
|
||||
/// Preserve read only flag of files
|
||||
const WITH_READ_ONLY = 0x80;
|
||||
/// Preserve unix permissions
|
||||
const WITH_PERMISSIONS = 0x100;
|
||||
/// Include symbolik links
|
||||
const WITH_SYMLINKS = 0x200;
|
||||
/// Include device nodes
|
||||
const WITH_DEVICE_NODES = 0x400;
|
||||
/// Include FIFOs
|
||||
const WITH_FIFOS = 0x800;
|
||||
/// Include Sockets
|
||||
const WITH_SOCKETS = 0x1000;
|
||||
|
||||
/// Preserve DOS file flag `HIDDEN`
|
||||
const WITH_FLAG_HIDDEN = 0x2000;
|
||||
/// Preserve DOS file flag `SYSTEM`
|
||||
const WITH_FLAG_SYSTEM = 0x4000;
|
||||
/// Preserve DOS file flag `ARCHIVE`
|
||||
const WITH_FLAG_ARCHIVE = 0x8000;
|
||||
|
||||
// chattr() flags
|
||||
/// Linux file attribute `APPEND`
|
||||
const WITH_FLAG_APPEND = 0x10000;
|
||||
/// Linux file attribute `NOATIME`
|
||||
const WITH_FLAG_NOATIME = 0x20000;
|
||||
/// Linux file attribute `COMPR`
|
||||
const WITH_FLAG_COMPR = 0x40000;
|
||||
/// Linux file attribute `NOCOW`
|
||||
const WITH_FLAG_NOCOW = 0x80000;
|
||||
/// Linux file attribute `NODUMP`
|
||||
const WITH_FLAG_NODUMP = 0x0010_0000;
|
||||
/// Linux file attribute `DIRSYNC`
|
||||
const WITH_FLAG_DIRSYNC = 0x0020_0000;
|
||||
/// Linux file attribute `IMMUTABLE`
|
||||
const WITH_FLAG_IMMUTABLE = 0x0040_0000;
|
||||
/// Linux file attribute `SYNC`
|
||||
const WITH_FLAG_SYNC = 0x0080_0000;
|
||||
/// Linux file attribute `NOCOMP`
|
||||
const WITH_FLAG_NOCOMP = 0x0100_0000;
|
||||
/// Linux file attribute `PROJINHERIT`
|
||||
const WITH_FLAG_PROJINHERIT = 0x0200_0000;
|
||||
|
||||
|
||||
/// Preserve BTRFS subvolume flag
|
||||
const WITH_SUBVOLUME = 0x0400_0000;
|
||||
/// Preserve BTRFS read-only subvolume flag
|
||||
const WITH_SUBVOLUME_RO = 0x0800_0000;
|
||||
|
||||
/// Preserve Extended Attribute metadata
|
||||
const WITH_XATTRS = 0x1000_0000;
|
||||
/// Preserve Access Control List metadata
|
||||
const WITH_ACL = 0x2000_0000;
|
||||
/// Preserve SELinux security context
|
||||
const WITH_SELINUX = 0x4000_0000;
|
||||
/// Preserve "security.capability" xattr
|
||||
const WITH_FCAPS = 0x8000_0000;
|
||||
|
||||
/// Preserve XFS/ext4/ZFS project quota ID
|
||||
const WITH_QUOTA_PROJID = 0x0001_0000_0000;
|
||||
|
||||
/// Support ".pxarexclude" files
|
||||
const EXCLUDE_FILE = 0x1000_0000_0000_0000;
|
||||
/// Exclude submounts
|
||||
const EXCLUDE_SUBMOUNTS = 0x4000_0000_0000_0000;
|
||||
/// Exclude entries with chattr flag NODUMP
|
||||
const EXCLUDE_NODUMP = 0x8000_0000_0000_0000;
|
||||
|
||||
// Definitions of typical feature flags for the *pxar* encoder/decoder.
|
||||
// By this expensive syscalls for unsupported features are avoided.
|
||||
|
||||
/// All chattr file attributes
|
||||
const WITH_CHATTR =
|
||||
Flags::WITH_FLAG_APPEND.bits() |
|
||||
Flags::WITH_FLAG_NOATIME.bits() |
|
||||
Flags::WITH_FLAG_COMPR.bits() |
|
||||
Flags::WITH_FLAG_NOCOW.bits() |
|
||||
Flags::WITH_FLAG_NODUMP.bits() |
|
||||
Flags::WITH_FLAG_DIRSYNC.bits() |
|
||||
Flags::WITH_FLAG_IMMUTABLE.bits() |
|
||||
Flags::WITH_FLAG_SYNC.bits() |
|
||||
Flags::WITH_FLAG_NOCOMP.bits() |
|
||||
Flags::WITH_FLAG_PROJINHERIT.bits();
|
||||
|
||||
/// All FAT file attributes
|
||||
const WITH_FAT_ATTRS =
|
||||
Flags::WITH_FLAG_HIDDEN.bits() |
|
||||
Flags::WITH_FLAG_SYSTEM.bits() |
|
||||
Flags::WITH_FLAG_ARCHIVE.bits();
|
||||
|
||||
/// All bits that may also be exposed via fuse
|
||||
const WITH_FUSE =
|
||||
Flags::WITH_2SEC_TIME.bits() |
|
||||
Flags::WITH_READ_ONLY.bits() |
|
||||
Flags::WITH_PERMISSIONS.bits() |
|
||||
Flags::WITH_SYMLINKS.bits() |
|
||||
Flags::WITH_DEVICE_NODES.bits() |
|
||||
Flags::WITH_FIFOS.bits() |
|
||||
Flags::WITH_SOCKETS.bits() |
|
||||
Flags::WITH_FAT_ATTRS.bits() |
|
||||
Flags::WITH_CHATTR.bits() |
|
||||
Flags::WITH_XATTRS.bits();
|
||||
|
||||
|
||||
/// Default feature flags for encoder/decoder
|
||||
const DEFAULT =
|
||||
Flags::WITH_SYMLINKS.bits() |
|
||||
Flags::WITH_DEVICE_NODES.bits() |
|
||||
Flags::WITH_FIFOS.bits() |
|
||||
Flags::WITH_SOCKETS.bits() |
|
||||
Flags::WITH_FLAG_HIDDEN.bits() |
|
||||
Flags::WITH_FLAG_SYSTEM.bits() |
|
||||
Flags::WITH_FLAG_ARCHIVE.bits() |
|
||||
Flags::WITH_FLAG_APPEND.bits() |
|
||||
Flags::WITH_FLAG_NOATIME.bits() |
|
||||
Flags::WITH_FLAG_COMPR.bits() |
|
||||
Flags::WITH_FLAG_NOCOW.bits() |
|
||||
//WITH_FLAG_NODUMP.bits() |
|
||||
Flags::WITH_FLAG_DIRSYNC.bits() |
|
||||
Flags::WITH_FLAG_IMMUTABLE.bits() |
|
||||
Flags::WITH_FLAG_SYNC.bits() |
|
||||
Flags::WITH_FLAG_NOCOMP.bits() |
|
||||
Flags::WITH_FLAG_PROJINHERIT.bits() |
|
||||
Flags::WITH_SUBVOLUME.bits() |
|
||||
Flags::WITH_SUBVOLUME_RO.bits() |
|
||||
Flags::WITH_XATTRS.bits() |
|
||||
Flags::WITH_ACL.bits() |
|
||||
Flags::WITH_SELINUX.bits() |
|
||||
Flags::WITH_FCAPS.bits() |
|
||||
Flags::WITH_QUOTA_PROJID.bits() |
|
||||
Flags::EXCLUDE_NODUMP.bits() |
|
||||
Flags::EXCLUDE_FILE.bits();
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Flags {
|
||||
fn default() -> Flags {
|
||||
Flags::DEFAULT
|
||||
}
|
||||
}
|
||||
|
||||
// form /usr/include/linux/fs.h
|
||||
const FS_APPEND_FL: c_long = 0x0000_0020;
|
||||
const FS_NOATIME_FL: c_long = 0x0000_0080;
|
||||
const FS_COMPR_FL: c_long = 0x0000_0004;
|
||||
const FS_NOCOW_FL: c_long = 0x0080_0000;
|
||||
const FS_NODUMP_FL: c_long = 0x0000_0040;
|
||||
const FS_DIRSYNC_FL: c_long = 0x0001_0000;
|
||||
const FS_IMMUTABLE_FL: c_long = 0x0000_0010;
|
||||
const FS_SYNC_FL: c_long = 0x0000_0008;
|
||||
const FS_NOCOMP_FL: c_long = 0x0000_0400;
|
||||
const FS_PROJINHERIT_FL: c_long = 0x2000_0000;
|
||||
|
||||
pub(crate) const INITIAL_FS_FLAGS: c_long =
|
||||
FS_NOATIME_FL
|
||||
| FS_COMPR_FL
|
||||
| FS_NOCOW_FL
|
||||
| FS_NOCOMP_FL
|
||||
| FS_PROJINHERIT_FL;
|
||||
|
||||
#[rustfmt::skip]
|
||||
const CHATTR_MAP: [(Flags, c_long); 10] = [
|
||||
( Flags::WITH_FLAG_APPEND, FS_APPEND_FL ),
|
||||
( Flags::WITH_FLAG_NOATIME, FS_NOATIME_FL ),
|
||||
( Flags::WITH_FLAG_COMPR, FS_COMPR_FL ),
|
||||
( Flags::WITH_FLAG_NOCOW, FS_NOCOW_FL ),
|
||||
( Flags::WITH_FLAG_NODUMP, FS_NODUMP_FL ),
|
||||
( Flags::WITH_FLAG_DIRSYNC, FS_DIRSYNC_FL ),
|
||||
( Flags::WITH_FLAG_IMMUTABLE, FS_IMMUTABLE_FL ),
|
||||
( Flags::WITH_FLAG_SYNC, FS_SYNC_FL ),
|
||||
( Flags::WITH_FLAG_NOCOMP, FS_NOCOMP_FL ),
|
||||
( Flags::WITH_FLAG_PROJINHERIT, FS_PROJINHERIT_FL ),
|
||||
];
|
||||
|
||||
// from /usr/include/linux/msdos_fs.h
|
||||
const ATTR_HIDDEN: u32 = 2;
|
||||
const ATTR_SYS: u32 = 4;
|
||||
const ATTR_ARCH: u32 = 32;
|
||||
|
||||
#[rustfmt::skip]
|
||||
const FAT_ATTR_MAP: [(Flags, u32); 3] = [
|
||||
( Flags::WITH_FLAG_HIDDEN, ATTR_HIDDEN ),
|
||||
( Flags::WITH_FLAG_SYSTEM, ATTR_SYS ),
|
||||
( Flags::WITH_FLAG_ARCHIVE, ATTR_ARCH ),
|
||||
];
|
||||
|
||||
impl Flags {
|
||||
/// Get a set of feature flags from file attributes.
|
||||
pub fn from_chattr(attr: c_long) -> Flags {
|
||||
let mut flags = Flags::empty();
|
||||
|
||||
for (fe_flag, fs_flag) in &CHATTR_MAP {
|
||||
if (attr & fs_flag) != 0 {
|
||||
flags |= *fe_flag;
|
||||
}
|
||||
}
|
||||
|
||||
flags
|
||||
}
|
||||
|
||||
/// Get the chattr bit representation of these feature flags.
|
||||
pub fn to_chattr(self) -> c_long {
|
||||
let mut flags: c_long = 0;
|
||||
|
||||
for (fe_flag, fs_flag) in &CHATTR_MAP {
|
||||
if self.contains(*fe_flag) {
|
||||
flags |= *fs_flag;
|
||||
}
|
||||
}
|
||||
|
||||
flags
|
||||
}
|
||||
|
||||
pub fn to_initial_chattr(self) -> c_long {
|
||||
self.to_chattr() & INITIAL_FS_FLAGS
|
||||
}
|
||||
|
||||
/// Get a set of feature flags from FAT attributes.
|
||||
pub fn from_fat_attr(attr: u32) -> Flags {
|
||||
let mut flags = Flags::empty();
|
||||
|
||||
for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
|
||||
if (attr & fs_flag) != 0 {
|
||||
flags |= *fe_flag;
|
||||
}
|
||||
}
|
||||
|
||||
flags
|
||||
}
|
||||
|
||||
/// Get the fat attribute bit representation of these feature flags.
|
||||
pub fn to_fat_attr(self) -> u32 {
|
||||
let mut flags = 0u32;
|
||||
|
||||
for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
|
||||
if self.contains(*fe_flag) {
|
||||
flags |= *fs_flag;
|
||||
}
|
||||
}
|
||||
|
||||
flags
|
||||
}
|
||||
|
||||
/// Return the supported *pxar* feature flags based on the magic number of the filesystem.
|
||||
pub fn from_magic(magic: i64) -> Flags {
|
||||
use proxmox::sys::linux::magic::*;
|
||||
match magic {
|
||||
MSDOS_SUPER_MAGIC => {
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_FAT_ATTRS
|
||||
},
|
||||
EXT4_SUPER_MAGIC => {
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS |
|
||||
Flags::WITH_FLAG_APPEND |
|
||||
Flags::WITH_FLAG_NOATIME |
|
||||
Flags::WITH_FLAG_NODUMP |
|
||||
Flags::WITH_FLAG_DIRSYNC |
|
||||
Flags::WITH_FLAG_IMMUTABLE |
|
||||
Flags::WITH_FLAG_SYNC |
|
||||
Flags::WITH_XATTRS |
|
||||
Flags::WITH_ACL |
|
||||
Flags::WITH_SELINUX |
|
||||
Flags::WITH_FCAPS |
|
||||
Flags::WITH_QUOTA_PROJID
|
||||
},
|
||||
XFS_SUPER_MAGIC => {
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS |
|
||||
Flags::WITH_FLAG_APPEND |
|
||||
Flags::WITH_FLAG_NOATIME |
|
||||
Flags::WITH_FLAG_NODUMP |
|
||||
Flags::WITH_FLAG_IMMUTABLE |
|
||||
Flags::WITH_FLAG_SYNC |
|
||||
Flags::WITH_XATTRS |
|
||||
Flags::WITH_ACL |
|
||||
Flags::WITH_SELINUX |
|
||||
Flags::WITH_FCAPS |
|
||||
Flags::WITH_QUOTA_PROJID
|
||||
},
|
||||
ZFS_SUPER_MAGIC => {
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS |
|
||||
Flags::WITH_FLAG_APPEND |
|
||||
Flags::WITH_FLAG_NOATIME |
|
||||
Flags::WITH_FLAG_NODUMP |
|
||||
Flags::WITH_FLAG_DIRSYNC |
|
||||
Flags::WITH_FLAG_IMMUTABLE |
|
||||
Flags::WITH_FLAG_SYNC |
|
||||
Flags::WITH_XATTRS |
|
||||
Flags::WITH_ACL |
|
||||
Flags::WITH_SELINUX |
|
||||
Flags::WITH_FCAPS |
|
||||
Flags::WITH_QUOTA_PROJID
|
||||
},
|
||||
BTRFS_SUPER_MAGIC => {
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS |
|
||||
Flags::WITH_FLAG_APPEND |
|
||||
Flags::WITH_FLAG_NOATIME |
|
||||
Flags::WITH_FLAG_COMPR |
|
||||
Flags::WITH_FLAG_NOCOW |
|
||||
Flags::WITH_FLAG_NODUMP |
|
||||
Flags::WITH_FLAG_DIRSYNC |
|
||||
Flags::WITH_FLAG_IMMUTABLE |
|
||||
Flags::WITH_FLAG_SYNC |
|
||||
Flags::WITH_FLAG_NOCOMP |
|
||||
Flags::WITH_XATTRS |
|
||||
Flags::WITH_ACL |
|
||||
Flags::WITH_SELINUX |
|
||||
Flags::WITH_SUBVOLUME |
|
||||
Flags::WITH_SUBVOLUME_RO |
|
||||
Flags::WITH_FCAPS
|
||||
},
|
||||
TMPFS_MAGIC => {
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS |
|
||||
Flags::WITH_ACL |
|
||||
Flags::WITH_SELINUX
|
||||
},
|
||||
// FUSE mounts are special as the supported feature set
|
||||
// is not clear a priori.
|
||||
FUSE_SUPER_MAGIC => {
|
||||
Flags::WITH_FUSE
|
||||
},
|
||||
_ => {
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS |
|
||||
Flags::WITH_XATTRS |
|
||||
Flags::WITH_ACL |
|
||||
Flags::WITH_FCAPS
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
690
pbs-client/src/pxar/fuse.rs
Normal file
690
pbs-client/src/pxar/fuse.rs
Normal file
@ -0,0 +1,690 @@
|
||||
//! Asynchronous fuse implementation.
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::future::Future;
|
||||
use std::io;
|
||||
use std::mem;
|
||||
use std::ops::Range;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::path::Path;
|
||||
use std::pin::Pin;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::channel::mpsc::UnboundedSender;
|
||||
use futures::select;
|
||||
use futures::sink::SinkExt;
|
||||
use futures::stream::{StreamExt, TryStreamExt};
|
||||
|
||||
use proxmox::tools::vec;
|
||||
use pxar::accessor::{self, EntryRangeInfo, ReadAt};
|
||||
|
||||
use proxmox_fuse::requests::{self, FuseRequest};
|
||||
use proxmox_fuse::{EntryParam, Fuse, ReplyBufState, Request, ROOT_ID};
|
||||
|
||||
use pbs_tools::xattr;
|
||||
|
||||
/// We mark inodes for regular files this way so we know how to access them.
|
||||
const NON_DIRECTORY_INODE: u64 = 1u64 << 63;
|
||||
|
||||
#[inline]
|
||||
fn is_dir_inode(inode: u64) -> bool {
|
||||
0 == (inode & NON_DIRECTORY_INODE)
|
||||
}
|
||||
|
||||
/// Our reader type instance used for accessors.
|
||||
pub type Reader = Arc<dyn ReadAt + Send + Sync + 'static>;
|
||||
|
||||
/// Our Accessor type instance.
|
||||
pub type Accessor = accessor::aio::Accessor<Reader>;
|
||||
|
||||
/// Our Directory type instance.
|
||||
pub type Directory = accessor::aio::Directory<Reader>;
|
||||
|
||||
/// Our FileEntry type instance.
|
||||
pub type FileEntry = accessor::aio::FileEntry<Reader>;
|
||||
|
||||
/// Our FileContents type instance.
|
||||
pub type FileContents = accessor::aio::FileContents<Reader>;
|
||||
|
||||
pub struct Session {
|
||||
fut: Pin<Box<dyn Future<Output = Result<(), Error>> + Send + Sync + 'static>>,
|
||||
}
|
||||
|
||||
impl Session {
|
||||
/// Create a fuse session for an archive.
|
||||
pub async fn mount_path(
|
||||
archive_path: &Path,
|
||||
options: &OsStr,
|
||||
verbose: bool,
|
||||
mountpoint: &Path,
|
||||
) -> Result<Self, Error> {
|
||||
// TODO: Add a buffered/caching ReadAt layer?
|
||||
let file = std::fs::File::open(archive_path)?;
|
||||
let file_size = file.metadata()?.len();
|
||||
let reader: Reader = Arc::new(accessor::sync::FileReader::new(file));
|
||||
let accessor = Accessor::new(reader, file_size).await?;
|
||||
Self::mount(accessor, options, verbose, mountpoint)
|
||||
}
|
||||
|
||||
/// Create a new fuse session for the given pxar `Accessor`.
|
||||
pub fn mount(
|
||||
accessor: Accessor,
|
||||
options: &OsStr,
|
||||
verbose: bool,
|
||||
path: &Path,
|
||||
) -> Result<Self, Error> {
|
||||
let fuse = Fuse::builder("pxar-mount")?
|
||||
.debug()
|
||||
.options_os(options)?
|
||||
.enable_readdirplus()
|
||||
.enable_read()
|
||||
.enable_readlink()
|
||||
.enable_read_xattr()
|
||||
.build()?
|
||||
.mount(path)?;
|
||||
|
||||
let session = SessionImpl::new(accessor, verbose);
|
||||
|
||||
Ok(Self {
|
||||
fut: Box::pin(session.main(fuse)),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for Session {
|
||||
type Output = Result<(), Error>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
|
||||
Pin::new(&mut self.fut).poll(cx)
|
||||
}
|
||||
}
|
||||
|
||||
/// We use this to return an errno value back to the kernel.
|
||||
macro_rules! io_return {
|
||||
($errno:expr) => {
|
||||
return Err(::std::io::Error::from_raw_os_error($errno).into());
|
||||
};
|
||||
}
|
||||
|
||||
/// Format an "other" error, see `io_bail` below for details.
|
||||
macro_rules! io_format_err {
|
||||
($($fmt:tt)*) => {
|
||||
::std::io::Error::new(::std::io::ErrorKind::Other, format!($($fmt)*))
|
||||
}
|
||||
}
|
||||
|
||||
/// We use this to bail out of a functionin an unexpected error case. This will cause the fuse
|
||||
/// request to be answered with a generic `EIO` error code. The error message contained in here
|
||||
/// will be printed to stdout if the verbose flag is used, otherwise silently dropped.
|
||||
macro_rules! io_bail {
|
||||
($($fmt:tt)*) => { return Err(io_format_err!($($fmt)*).into()); }
|
||||
}
|
||||
|
||||
/// This is what we need to cache as a "lookup" entry. The kernel assumes that these are easily
|
||||
/// accessed.
|
||||
struct Lookup {
|
||||
refs: AtomicUsize,
|
||||
|
||||
inode: u64,
|
||||
parent: u64,
|
||||
entry_range_info: EntryRangeInfo,
|
||||
content_range: Option<Range<u64>>,
|
||||
}
|
||||
|
||||
impl Lookup {
|
||||
fn new(
|
||||
inode: u64,
|
||||
parent: u64,
|
||||
entry_range_info: EntryRangeInfo,
|
||||
content_range: Option<Range<u64>>,
|
||||
) -> Box<Lookup> {
|
||||
Box::new(Self {
|
||||
refs: AtomicUsize::new(1),
|
||||
inode,
|
||||
parent,
|
||||
entry_range_info,
|
||||
content_range,
|
||||
})
|
||||
}
|
||||
|
||||
/// Decrease the reference count by `count`. Note that this must not include the reference held
|
||||
/// by `self` itself, so this must not decrease the count below 2.
|
||||
fn forget(&self, count: usize) -> Result<(), Error> {
|
||||
loop {
|
||||
let old = self.refs.load(Ordering::Acquire);
|
||||
if count >= old {
|
||||
io_bail!("reference count underflow");
|
||||
}
|
||||
let new = old - count;
|
||||
match self
|
||||
.refs
|
||||
.compare_exchange(old, new, Ordering::SeqCst, Ordering::SeqCst)
|
||||
{
|
||||
Ok(_) => break Ok(()),
|
||||
Err(_) => continue,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_ref<'a>(&self, session: &'a SessionImpl) -> LookupRef<'a> {
|
||||
if self.refs.fetch_add(1, Ordering::AcqRel) == 0 {
|
||||
panic!("atomic refcount increased from 0 to 1");
|
||||
}
|
||||
|
||||
LookupRef {
|
||||
session,
|
||||
lookup: self as *const Lookup,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct LookupRef<'a> {
|
||||
session: &'a SessionImpl,
|
||||
lookup: *const Lookup,
|
||||
}
|
||||
|
||||
unsafe impl<'a> Send for LookupRef<'a> {}
|
||||
unsafe impl<'a> Sync for LookupRef<'a> {}
|
||||
|
||||
impl<'a> Clone for LookupRef<'a> {
|
||||
fn clone(&self) -> Self {
|
||||
self.get_ref(self.session)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> std::ops::Deref for LookupRef<'a> {
|
||||
type Target = Lookup;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
unsafe { &*self.lookup }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Drop for LookupRef<'a> {
|
||||
fn drop(&mut self) {
|
||||
if self.lookup.is_null() {
|
||||
return;
|
||||
}
|
||||
|
||||
if self.refs.fetch_sub(1, Ordering::AcqRel) == 1 {
|
||||
let inode = self.inode;
|
||||
drop(self.session.lookups.write().unwrap().remove(&inode));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> LookupRef<'a> {
|
||||
fn leak(mut self) -> &'a Lookup {
|
||||
unsafe { &*mem::replace(&mut self.lookup, std::ptr::null()) }
|
||||
}
|
||||
}
|
||||
|
||||
struct SessionImpl {
|
||||
accessor: Accessor,
|
||||
verbose: bool,
|
||||
lookups: RwLock<BTreeMap<u64, Box<Lookup>>>,
|
||||
}
|
||||
|
||||
impl SessionImpl {
|
||||
fn new(accessor: Accessor, verbose: bool) -> Self {
|
||||
let root = Lookup::new(
|
||||
ROOT_ID,
|
||||
ROOT_ID,
|
||||
EntryRangeInfo::toplevel(0..accessor.size()),
|
||||
None,
|
||||
);
|
||||
|
||||
let mut tree = BTreeMap::new();
|
||||
tree.insert(ROOT_ID, root);
|
||||
|
||||
Self {
|
||||
accessor,
|
||||
verbose,
|
||||
lookups: RwLock::new(tree),
|
||||
}
|
||||
}
|
||||
|
||||
/// Here's how we deal with errors:
|
||||
///
|
||||
/// Any error will be printed if the verbose flag was set, otherwise the message will be
|
||||
/// silently dropped.
|
||||
///
|
||||
/// Opaque errors will cause the fuse main loop to bail out with that error.
|
||||
///
|
||||
/// `io::Error`s will cause the fuse request to responded to with the given `io::Error`. An
|
||||
/// `io::ErrorKind::Other` translates to a generic `EIO`.
|
||||
async fn handle_err(
|
||||
&self,
|
||||
request: impl FuseRequest,
|
||||
err: Error,
|
||||
mut sender: UnboundedSender<Error>,
|
||||
) {
|
||||
let final_result = match err.downcast::<io::Error>() {
|
||||
Ok(err) => {
|
||||
if err.kind() == io::ErrorKind::Other && self.verbose {
|
||||
eprintln!("an IO error occurred: {}", err);
|
||||
}
|
||||
|
||||
// fail the request
|
||||
request.io_fail(err).map_err(Error::from)
|
||||
}
|
||||
Err(err) => {
|
||||
// `bail` (non-`io::Error`) is used for fatal errors which should actually cancel:
|
||||
if self.verbose {
|
||||
eprintln!("internal error: {}, bailing out", err);
|
||||
}
|
||||
Err(err)
|
||||
}
|
||||
};
|
||||
if let Err(err) = final_result {
|
||||
// either we failed to send the error code to fuse, or the above was not an
|
||||
// `io::Error`, so in this case notify the main loop:
|
||||
sender
|
||||
.send(err)
|
||||
.await
|
||||
.expect("failed to propagate error to main loop");
|
||||
}
|
||||
}
|
||||
|
||||
async fn main(self, fuse: Fuse) -> Result<(), Error> {
|
||||
Arc::new(self).main_do(fuse).await
|
||||
}
|
||||
|
||||
async fn main_do(self: Arc<Self>, fuse: Fuse) -> Result<(), Error> {
|
||||
let (err_send, mut err_recv) = futures::channel::mpsc::unbounded::<Error>();
|
||||
let mut fuse = fuse.fuse(); // make this a futures::stream::FusedStream!
|
||||
loop {
|
||||
select! {
|
||||
request = fuse.try_next() => match request? {
|
||||
Some(request) => {
|
||||
tokio::spawn(Arc::clone(&self).handle_request(request, err_send.clone()));
|
||||
}
|
||||
None => break,
|
||||
},
|
||||
err = err_recv.next() => match err {
|
||||
Some(err) => if self.verbose {
|
||||
eprintln!("cancelling fuse main loop due to error: {}", err);
|
||||
return Err(err);
|
||||
},
|
||||
None => panic!("error channel was closed unexpectedly"),
|
||||
},
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_request(
|
||||
self: Arc<Self>,
|
||||
request: Request,
|
||||
mut err_sender: UnboundedSender<Error>,
|
||||
) {
|
||||
let result: Result<(), Error> = match request {
|
||||
Request::Lookup(request) => {
|
||||
match self.lookup(request.parent, &request.file_name).await {
|
||||
Ok((entry, lookup)) => match request.reply(&entry) {
|
||||
Ok(()) => {
|
||||
lookup.leak();
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => Err(Error::from(err)),
|
||||
},
|
||||
Err(err) => return self.handle_err(request, err, err_sender).await,
|
||||
}
|
||||
}
|
||||
Request::Forget(request) => match self.forget(request.inode, request.count as usize) {
|
||||
Ok(()) => {
|
||||
request.reply();
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => return self.handle_err(request, err, err_sender).await,
|
||||
},
|
||||
Request::Getattr(request) => match self.getattr(request.inode).await {
|
||||
Ok(stat) => request.reply(&stat, std::f64::MAX).map_err(Error::from),
|
||||
Err(err) => return self.handle_err(request, err, err_sender).await,
|
||||
},
|
||||
Request::ReaddirPlus(mut request) => match self.readdirplus(&mut request).await {
|
||||
Ok(lookups) => match request.reply() {
|
||||
Ok(()) => {
|
||||
for i in lookups {
|
||||
i.leak();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => Err(Error::from(err)),
|
||||
},
|
||||
Err(err) => return self.handle_err(request, err, err_sender).await,
|
||||
},
|
||||
Request::Read(request) => {
|
||||
match self.read(request.inode, request.size, request.offset).await {
|
||||
Ok(data) => request.reply(&data).map_err(Error::from),
|
||||
Err(err) => return self.handle_err(request, err, err_sender).await,
|
||||
}
|
||||
}
|
||||
Request::Readlink(request) => match self.readlink(request.inode).await {
|
||||
Ok(data) => request.reply(&data).map_err(Error::from),
|
||||
Err(err) => return self.handle_err(request, err, err_sender).await,
|
||||
},
|
||||
Request::ListXAttrSize(request) => match self.listxattrs(request.inode).await {
|
||||
Ok(data) => request
|
||||
.reply(
|
||||
data.into_iter()
|
||||
.fold(0, |sum, i| sum + i.name().to_bytes_with_nul().len()),
|
||||
)
|
||||
.map_err(Error::from),
|
||||
Err(err) => return self.handle_err(request, err, err_sender).await,
|
||||
},
|
||||
Request::ListXAttr(mut request) => match self.listxattrs_into(&mut request).await {
|
||||
Ok(ReplyBufState::Ok) => request.reply().map_err(Error::from),
|
||||
Ok(ReplyBufState::Full) => request.fail_full().map_err(Error::from),
|
||||
Err(err) => return self.handle_err(request, err, err_sender).await,
|
||||
},
|
||||
Request::GetXAttrSize(request) => {
|
||||
match self.getxattr(request.inode, &request.attr_name).await {
|
||||
Ok(xattr) => request.reply(xattr.value().len()).map_err(Error::from),
|
||||
Err(err) => return self.handle_err(request, err, err_sender).await,
|
||||
}
|
||||
}
|
||||
Request::GetXAttr(request) => {
|
||||
match self.getxattr(request.inode, &request.attr_name).await {
|
||||
Ok(xattr) => request.reply(xattr.value()).map_err(Error::from),
|
||||
Err(err) => return self.handle_err(request, err, err_sender).await,
|
||||
}
|
||||
}
|
||||
other => {
|
||||
if self.verbose {
|
||||
eprintln!("Received unexpected fuse request");
|
||||
}
|
||||
other.fail(libc::ENOSYS).map_err(Error::from)
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(err) = result {
|
||||
err_sender
|
||||
.send(err)
|
||||
.await
|
||||
.expect("failed to propagate error to main loop");
|
||||
}
|
||||
}
|
||||
|
||||
fn get_lookup(&self, inode: u64) -> Result<LookupRef, Error> {
|
||||
let lookups = self.lookups.read().unwrap();
|
||||
if let Some(lookup) = lookups.get(&inode) {
|
||||
return Ok(lookup.get_ref(self));
|
||||
}
|
||||
io_return!(libc::ENOENT);
|
||||
}
|
||||
|
||||
async fn open_dir(&self, inode: u64) -> Result<Directory, Error> {
|
||||
if inode == ROOT_ID {
|
||||
Ok(self.accessor.open_root().await?)
|
||||
} else if !is_dir_inode(inode) {
|
||||
io_return!(libc::ENOTDIR);
|
||||
} else {
|
||||
Ok(unsafe { self.accessor.open_dir_at_end(inode).await? })
|
||||
}
|
||||
}
|
||||
|
||||
async fn open_entry(&self, lookup: &LookupRef<'_>) -> io::Result<FileEntry> {
|
||||
unsafe {
|
||||
self.accessor
|
||||
.open_file_at_range(&lookup.entry_range_info)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
fn open_content(&self, lookup: &LookupRef) -> Result<FileContents, Error> {
|
||||
if is_dir_inode(lookup.inode) {
|
||||
io_return!(libc::EISDIR);
|
||||
}
|
||||
|
||||
match lookup.content_range.clone() {
|
||||
Some(range) => Ok(unsafe { self.accessor.open_contents_at_range(range) }),
|
||||
None => io_return!(libc::EBADF),
|
||||
}
|
||||
}
|
||||
|
||||
fn make_lookup(&self, parent: u64, inode: u64, entry: &FileEntry) -> Result<LookupRef, Error> {
|
||||
let lookups = self.lookups.read().unwrap();
|
||||
if let Some(lookup) = lookups.get(&inode) {
|
||||
return Ok(lookup.get_ref(self));
|
||||
}
|
||||
drop(lookups);
|
||||
|
||||
let entry = Lookup::new(
|
||||
inode,
|
||||
parent,
|
||||
entry.entry_range_info().clone(),
|
||||
entry.content_range()?,
|
||||
);
|
||||
let reference = entry.get_ref(self);
|
||||
entry.refs.store(1, Ordering::Release);
|
||||
|
||||
let mut lookups = self.lookups.write().unwrap();
|
||||
if let Some(lookup) = lookups.get(&inode) {
|
||||
return Ok(lookup.get_ref(self));
|
||||
}
|
||||
|
||||
lookups.insert(inode, entry);
|
||||
drop(lookups);
|
||||
Ok(reference)
|
||||
}
|
||||
|
||||
fn forget(&self, inode: u64, count: usize) -> Result<(), Error> {
|
||||
let node = self.get_lookup(inode)?;
|
||||
node.forget(count)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn lookup(
|
||||
&'_ self,
|
||||
parent: u64,
|
||||
file_name: &OsStr,
|
||||
) -> Result<(EntryParam, LookupRef<'_>), Error> {
|
||||
let dir = self.open_dir(parent).await?;
|
||||
|
||||
let entry = match { dir }.lookup(file_name).await? {
|
||||
Some(entry) => entry,
|
||||
None => io_return!(libc::ENOENT),
|
||||
};
|
||||
|
||||
let entry = if let pxar::EntryKind::Hardlink(_) = entry.kind() {
|
||||
// we don't know the file's end-offset, so we'll just allow the decoder to decode the
|
||||
// entire rest of the archive until we figure out something better...
|
||||
let entry = self.accessor.follow_hardlink(&entry).await?;
|
||||
|
||||
if let pxar::EntryKind::Hardlink(_) = entry.kind() {
|
||||
// hardlinks must not point to other hardlinks...
|
||||
io_return!(libc::ELOOP);
|
||||
}
|
||||
|
||||
entry
|
||||
} else {
|
||||
entry
|
||||
};
|
||||
|
||||
let response = to_entry(&entry)?;
|
||||
let inode = response.inode;
|
||||
Ok((response, self.make_lookup(parent, inode, &entry)?))
|
||||
}
|
||||
|
||||
async fn getattr(&self, inode: u64) -> Result<libc::stat, Error> {
|
||||
let entry = unsafe {
|
||||
self.accessor.open_file_at_range(&self.get_lookup(inode)?.entry_range_info).await?
|
||||
};
|
||||
to_stat(inode, &entry)
|
||||
}
|
||||
|
||||
async fn readdirplus(
|
||||
&'_ self,
|
||||
request: &mut requests::ReaddirPlus,
|
||||
) -> Result<Vec<LookupRef<'_>>, Error> {
|
||||
let mut lookups = Vec::new();
|
||||
let offset = usize::try_from(request.offset)
|
||||
.map_err(|_| io_format_err!("directory offset out of range"))?;
|
||||
|
||||
let dir = self.open_dir(request.inode).await?;
|
||||
let dir_lookup = self.get_lookup(request.inode)?;
|
||||
|
||||
let entry_count = dir.read_dir().count() as isize;
|
||||
|
||||
let mut next = offset as isize;
|
||||
let mut iter = dir.read_dir().skip(offset);
|
||||
while let Some(file) = iter.next().await {
|
||||
next += 1;
|
||||
let file = file?.decode_entry().await?;
|
||||
let stat = to_stat(to_inode(&file), &file)?;
|
||||
let name = file.file_name();
|
||||
match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
|
||||
ReplyBufState::Ok => (),
|
||||
ReplyBufState::Full => return Ok(lookups),
|
||||
}
|
||||
lookups.push(self.make_lookup(request.inode, stat.st_ino, &file)?);
|
||||
}
|
||||
|
||||
if next == entry_count {
|
||||
next += 1;
|
||||
let file = dir.lookup_self().await?;
|
||||
let stat = to_stat(to_inode(&file), &file)?;
|
||||
let name = OsStr::new(".");
|
||||
match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
|
||||
ReplyBufState::Ok => (),
|
||||
ReplyBufState::Full => return Ok(lookups),
|
||||
}
|
||||
lookups.push(LookupRef::clone(&dir_lookup));
|
||||
}
|
||||
|
||||
if next == entry_count + 1 {
|
||||
next += 1;
|
||||
let lookup = self.get_lookup(dir_lookup.parent)?;
|
||||
let parent_dir = self.open_dir(lookup.inode).await?;
|
||||
let file = parent_dir.lookup_self().await?;
|
||||
let stat = to_stat(to_inode(&file), &file)?;
|
||||
let name = OsStr::new("..");
|
||||
match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
|
||||
ReplyBufState::Ok => (),
|
||||
ReplyBufState::Full => return Ok(lookups),
|
||||
}
|
||||
lookups.push(lookup);
|
||||
}
|
||||
|
||||
Ok(lookups)
|
||||
}
|
||||
|
||||
async fn read(&self, inode: u64, len: usize, offset: u64) -> Result<Vec<u8>, Error> {
|
||||
let file = self.get_lookup(inode)?;
|
||||
let content = self.open_content(&file)?;
|
||||
let mut buf = vec::undefined(len);
|
||||
let got = content.read_at(&mut buf, offset).await?;
|
||||
buf.truncate(got);
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
async fn readlink(&self, inode: u64) -> Result<OsString, Error> {
|
||||
let lookup = self.get_lookup(inode)?;
|
||||
let file = self.open_entry(&lookup).await?;
|
||||
match file.get_symlink() {
|
||||
None => io_return!(libc::EINVAL),
|
||||
Some(link) => Ok(link.to_owned()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn listxattrs(&self, inode: u64) -> Result<Vec<pxar::format::XAttr>, Error> {
|
||||
let lookup = self.get_lookup(inode)?;
|
||||
let metadata = self
|
||||
.open_entry(&lookup)
|
||||
.await?
|
||||
.into_entry()
|
||||
.into_metadata();
|
||||
|
||||
let mut xattrs = metadata.xattrs;
|
||||
|
||||
use pxar::format::XAttr;
|
||||
|
||||
if let Some(fcaps) = metadata.fcaps {
|
||||
xattrs.push(XAttr::new(xattr::xattr_name_fcaps().to_bytes(), fcaps.data));
|
||||
}
|
||||
|
||||
// TODO: Special cases:
|
||||
// b"system.posix_acl_access
|
||||
// b"system.posix_acl_default
|
||||
//
|
||||
// For these we need to be able to create posix acl format entries, at that point we could
|
||||
// just ditch libacl as well...
|
||||
|
||||
Ok(xattrs)
|
||||
}
|
||||
|
||||
async fn listxattrs_into(
|
||||
&self,
|
||||
request: &mut requests::ListXAttr,
|
||||
) -> Result<ReplyBufState, Error> {
|
||||
let xattrs = self.listxattrs(request.inode).await?;
|
||||
|
||||
for entry in xattrs {
|
||||
match request.add_c_string(entry.name()) {
|
||||
ReplyBufState::Ok => (),
|
||||
ReplyBufState::Full => return Ok(ReplyBufState::Full),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ReplyBufState::Ok)
|
||||
}
|
||||
|
||||
async fn getxattr(&self, inode: u64, xattr: &OsStr) -> Result<pxar::format::XAttr, Error> {
|
||||
// TODO: pxar::Accessor could probably get a more optimized method to fetch a specific
|
||||
// xattr for an entry...
|
||||
let xattrs = self.listxattrs(inode).await?;
|
||||
for entry in xattrs {
|
||||
if entry.name().to_bytes() == xattr.as_bytes() {
|
||||
return Ok(entry);
|
||||
}
|
||||
}
|
||||
io_return!(libc::ENODATA);
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn to_entry(entry: &FileEntry) -> Result<EntryParam, Error> {
|
||||
to_entry_param(to_inode(&entry), &entry)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn to_inode(entry: &FileEntry) -> u64 {
|
||||
if entry.is_dir() {
|
||||
entry.entry_range_info().entry_range.end
|
||||
} else {
|
||||
entry.entry_range_info().entry_range.start | NON_DIRECTORY_INODE
|
||||
}
|
||||
}
|
||||
|
||||
fn to_entry_param(inode: u64, entry: &pxar::Entry) -> Result<EntryParam, Error> {
|
||||
Ok(EntryParam::simple(inode, to_stat(inode, entry)?))
|
||||
}
|
||||
|
||||
fn to_stat(inode: u64, entry: &pxar::Entry) -> Result<libc::stat, Error> {
|
||||
let nlink = if entry.is_dir() { 2 } else { 1 };
|
||||
|
||||
let metadata = entry.metadata();
|
||||
|
||||
let mut stat: libc::stat = unsafe { mem::zeroed() };
|
||||
stat.st_ino = inode;
|
||||
stat.st_nlink = nlink;
|
||||
stat.st_mode = u32::try_from(metadata.stat.mode)
|
||||
.map_err(|err| format_err!("mode does not fit into st_mode field: {}", err))?;
|
||||
stat.st_size = i64::try_from(entry.file_size().unwrap_or(0))
|
||||
.map_err(|err| format_err!("size does not fit into st_size field: {}", err))?;
|
||||
stat.st_uid = metadata.stat.uid;
|
||||
stat.st_gid = metadata.stat.gid;
|
||||
stat.st_atime = metadata.stat.mtime.secs;
|
||||
stat.st_atime_nsec = metadata.stat.mtime.nanos as _;
|
||||
stat.st_mtime = metadata.stat.mtime.secs;
|
||||
stat.st_mtime_nsec = metadata.stat.mtime.nanos as _;
|
||||
stat.st_ctime = metadata.stat.mtime.secs;
|
||||
stat.st_ctime_nsec = metadata.stat.mtime.nanos as _;
|
||||
Ok(stat)
|
||||
}
|
407
pbs-client/src/pxar/metadata.rs
Normal file
407
pbs-client/src/pxar/metadata.rs
Normal file
@ -0,0 +1,407 @@
|
||||
use std::ffi::{CStr, CString};
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
use pxar::Metadata;
|
||||
|
||||
use proxmox::c_result;
|
||||
use proxmox::sys::error::SysError;
|
||||
use proxmox::tools::fd::RawFdNum;
|
||||
|
||||
use pbs_tools::{acl, fs, xattr};
|
||||
|
||||
use crate::pxar::tools::perms_from_metadata;
|
||||
use crate::pxar::Flags;
|
||||
|
||||
//
|
||||
// utility functions
|
||||
//
|
||||
|
||||
fn allow_notsupp<E: SysError>(err: E) -> Result<(), E> {
|
||||
if err.is_errno(Errno::EOPNOTSUPP) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
|
||||
fn allow_notsupp_remember<E: SysError>(err: E, not_supp: &mut bool) -> Result<(), E> {
|
||||
if err.is_errno(Errno::EOPNOTSUPP) {
|
||||
*not_supp = true;
|
||||
Ok(())
|
||||
} else {
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
|
||||
fn timestamp_to_update_timespec(mtime: &pxar::format::StatxTimestamp) -> [libc::timespec; 2] {
|
||||
// restore mtime
|
||||
const UTIME_OMIT: i64 = (1 << 30) - 2;
|
||||
|
||||
[
|
||||
libc::timespec {
|
||||
tv_sec: 0,
|
||||
tv_nsec: UTIME_OMIT,
|
||||
},
|
||||
libc::timespec {
|
||||
tv_sec: mtime.secs,
|
||||
tv_nsec: mtime.nanos as _,
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
//
|
||||
// metadata application:
|
||||
//
|
||||
|
||||
pub fn apply_at(
|
||||
flags: Flags,
|
||||
metadata: &Metadata,
|
||||
parent: RawFd,
|
||||
file_name: &CStr,
|
||||
path_info: &Path,
|
||||
on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
|
||||
) -> Result<(), Error> {
|
||||
let fd = proxmox::tools::fd::Fd::openat(
|
||||
&unsafe { RawFdNum::from_raw_fd(parent) },
|
||||
file_name,
|
||||
OFlag::O_PATH | OFlag::O_CLOEXEC | OFlag::O_NOFOLLOW,
|
||||
Mode::empty(),
|
||||
)?;
|
||||
|
||||
apply(flags, metadata, fd.as_raw_fd(), path_info, on_error)
|
||||
}
|
||||
|
||||
pub fn apply_initial_flags(
|
||||
flags: Flags,
|
||||
metadata: &Metadata,
|
||||
fd: RawFd,
|
||||
on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
|
||||
) -> Result<(), Error> {
|
||||
let entry_flags = Flags::from_bits_truncate(metadata.stat.flags);
|
||||
apply_chattr(
|
||||
fd,
|
||||
entry_flags.to_initial_chattr(),
|
||||
flags.to_initial_chattr(),
|
||||
)
|
||||
.or_else(on_error)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn apply(
|
||||
flags: Flags,
|
||||
metadata: &Metadata,
|
||||
fd: RawFd,
|
||||
path_info: &Path,
|
||||
on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
|
||||
) -> Result<(), Error> {
|
||||
let c_proc_path = CString::new(format!("/proc/self/fd/{}", fd)).unwrap();
|
||||
|
||||
unsafe {
|
||||
// UID and GID first, as this fails if we lose access anyway.
|
||||
c_result!(libc::chown(
|
||||
c_proc_path.as_ptr(),
|
||||
metadata.stat.uid,
|
||||
metadata.stat.gid
|
||||
))
|
||||
.map(drop)
|
||||
.or_else(allow_notsupp)
|
||||
.map_err(|err| format_err!("failed to set ownership: {}", err))
|
||||
.or_else(&mut *on_error)?;
|
||||
}
|
||||
|
||||
let mut skip_xattrs = false;
|
||||
apply_xattrs(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)
|
||||
.or_else(&mut *on_error)?;
|
||||
add_fcaps(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs).or_else(&mut *on_error)?;
|
||||
apply_acls(flags, &c_proc_path, metadata, path_info)
|
||||
.map_err(|err| format_err!("failed to apply acls: {}", err))
|
||||
.or_else(&mut *on_error)?;
|
||||
apply_quota_project_id(flags, fd, metadata).or_else(&mut *on_error)?;
|
||||
|
||||
// Finally mode and time. We may lose access with mode, but the changing the mode also
|
||||
// affects times.
|
||||
if !metadata.is_symlink() {
|
||||
c_result!(unsafe {
|
||||
libc::chmod(c_proc_path.as_ptr(), perms_from_metadata(metadata)?.bits())
|
||||
})
|
||||
.map(drop)
|
||||
.or_else(allow_notsupp)
|
||||
.map_err(|err| format_err!("failed to change file mode: {}", err))
|
||||
.or_else(&mut *on_error)?;
|
||||
}
|
||||
|
||||
let res = c_result!(unsafe {
|
||||
libc::utimensat(
|
||||
libc::AT_FDCWD,
|
||||
c_proc_path.as_ptr(),
|
||||
timestamp_to_update_timespec(&metadata.stat.mtime).as_ptr(),
|
||||
0,
|
||||
)
|
||||
});
|
||||
match res {
|
||||
Ok(_) => (),
|
||||
Err(ref err) if err.is_errno(Errno::EOPNOTSUPP) => (),
|
||||
Err(err) => {
|
||||
on_error(format_err!(
|
||||
"failed to restore mtime attribute on {:?}: {}",
|
||||
path_info,
|
||||
err
|
||||
))?;
|
||||
}
|
||||
}
|
||||
|
||||
if metadata.stat.flags != 0 {
|
||||
apply_flags(flags, fd, metadata.stat.flags).or_else(&mut *on_error)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_fcaps(
|
||||
flags: Flags,
|
||||
c_proc_path: *const libc::c_char,
|
||||
metadata: &Metadata,
|
||||
skip_xattrs: &mut bool,
|
||||
) -> Result<(), Error> {
|
||||
if *skip_xattrs || !flags.contains(Flags::WITH_FCAPS) {
|
||||
return Ok(());
|
||||
}
|
||||
let fcaps = match metadata.fcaps.as_ref() {
|
||||
Some(fcaps) => fcaps,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
c_result!(unsafe {
|
||||
libc::setxattr(
|
||||
c_proc_path,
|
||||
xattr::xattr_name_fcaps().as_ptr(),
|
||||
fcaps.data.as_ptr() as *const libc::c_void,
|
||||
fcaps.data.len(),
|
||||
0,
|
||||
)
|
||||
})
|
||||
.map(drop)
|
||||
.or_else(|err| allow_notsupp_remember(err, skip_xattrs))
|
||||
.map_err(|err| format_err!("failed to apply file capabilities: {}", err))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_xattrs(
|
||||
flags: Flags,
|
||||
c_proc_path: *const libc::c_char,
|
||||
metadata: &Metadata,
|
||||
skip_xattrs: &mut bool,
|
||||
) -> Result<(), Error> {
|
||||
if *skip_xattrs || !flags.contains(Flags::WITH_XATTRS) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
for xattr in &metadata.xattrs {
|
||||
if *skip_xattrs {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if !xattr::is_valid_xattr_name(xattr.name()) {
|
||||
eprintln!("skipping invalid xattr named {:?}", xattr.name());
|
||||
continue;
|
||||
}
|
||||
|
||||
c_result!(unsafe {
|
||||
libc::setxattr(
|
||||
c_proc_path,
|
||||
xattr.name().as_ptr() as *const libc::c_char,
|
||||
xattr.value().as_ptr() as *const libc::c_void,
|
||||
xattr.value().len(),
|
||||
0,
|
||||
)
|
||||
})
|
||||
.map(drop)
|
||||
.or_else(|err| allow_notsupp_remember(err, &mut *skip_xattrs))
|
||||
.map_err(|err| format_err!("failed to apply extended attributes: {}", err))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_acls(
|
||||
flags: Flags,
|
||||
c_proc_path: &CStr,
|
||||
metadata: &Metadata,
|
||||
path_info: &Path,
|
||||
) -> Result<(), Error> {
|
||||
if !flags.contains(Flags::WITH_ACL) || metadata.acl.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut acl = acl::ACL::init(5)?;
|
||||
|
||||
// acl type access:
|
||||
acl.add_entry_full(
|
||||
acl::ACL_USER_OBJ,
|
||||
None,
|
||||
acl::mode_user_to_acl_permissions(metadata.stat.mode),
|
||||
)?;
|
||||
|
||||
acl.add_entry_full(
|
||||
acl::ACL_OTHER,
|
||||
None,
|
||||
acl::mode_other_to_acl_permissions(metadata.stat.mode),
|
||||
)?;
|
||||
|
||||
match metadata.acl.group_obj.as_ref() {
|
||||
Some(group_obj) => {
|
||||
acl.add_entry_full(
|
||||
acl::ACL_MASK,
|
||||
None,
|
||||
acl::mode_group_to_acl_permissions(metadata.stat.mode),
|
||||
)?;
|
||||
acl.add_entry_full(acl::ACL_GROUP_OBJ, None, group_obj.permissions.0)?;
|
||||
}
|
||||
None => {
|
||||
let mode = acl::mode_group_to_acl_permissions(metadata.stat.mode);
|
||||
|
||||
acl.add_entry_full(acl::ACL_GROUP_OBJ, None, mode)?;
|
||||
|
||||
if !metadata.acl.users.is_empty() || !metadata.acl.groups.is_empty() {
|
||||
eprintln!(
|
||||
"Warning: {:?}: Missing GROUP_OBJ entry in ACL, resetting to value of MASK",
|
||||
path_info,
|
||||
);
|
||||
acl.add_entry_full(acl::ACL_MASK, None, mode)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for user in &metadata.acl.users {
|
||||
acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
|
||||
}
|
||||
|
||||
for group in &metadata.acl.groups {
|
||||
acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
|
||||
}
|
||||
|
||||
if !acl.is_valid() {
|
||||
bail!("Error while restoring ACL - ACL invalid");
|
||||
}
|
||||
|
||||
acl.set_file(c_proc_path, acl::ACL_TYPE_ACCESS)?;
|
||||
drop(acl);
|
||||
|
||||
// acl type default:
|
||||
if let Some(default) = metadata.acl.default.as_ref() {
|
||||
let mut acl = acl::ACL::init(5)?;
|
||||
|
||||
acl.add_entry_full(acl::ACL_USER_OBJ, None, default.user_obj_permissions.0)?;
|
||||
|
||||
acl.add_entry_full(acl::ACL_GROUP_OBJ, None, default.group_obj_permissions.0)?;
|
||||
|
||||
acl.add_entry_full(acl::ACL_OTHER, None, default.other_permissions.0)?;
|
||||
|
||||
if default.mask_permissions != pxar::format::acl::Permissions::NO_MASK {
|
||||
acl.add_entry_full(acl::ACL_MASK, None, default.mask_permissions.0)?;
|
||||
}
|
||||
|
||||
for user in &metadata.acl.default_users {
|
||||
acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
|
||||
}
|
||||
|
||||
for group in &metadata.acl.default_groups {
|
||||
acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
|
||||
}
|
||||
|
||||
if !acl.is_valid() {
|
||||
bail!("Error while restoring ACL - ACL invalid");
|
||||
}
|
||||
|
||||
acl.set_file(c_proc_path, acl::ACL_TYPE_DEFAULT)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_quota_project_id(flags: Flags, fd: RawFd, metadata: &Metadata) -> Result<(), Error> {
|
||||
if !flags.contains(Flags::WITH_QUOTA_PROJID) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let projid = match metadata.quota_project_id {
|
||||
Some(projid) => projid,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
let mut fsxattr = fs::FSXAttr::default();
|
||||
unsafe {
|
||||
fs::fs_ioc_fsgetxattr(fd, &mut fsxattr).map_err(|err| {
|
||||
format_err!(
|
||||
"error while getting fsxattr to restore quota project id - {}",
|
||||
err
|
||||
)
|
||||
})?;
|
||||
|
||||
fsxattr.fsx_projid = projid.projid as u32;
|
||||
|
||||
fs::fs_ioc_fssetxattr(fd, &fsxattr).map_err(|err| {
|
||||
format_err!(
|
||||
"error while setting fsxattr to restore quota project id - {}",
|
||||
err
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn errno_is_unsupported(errno: Errno) -> bool {
|
||||
matches!(errno, Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL)
|
||||
}
|
||||
|
||||
fn apply_chattr(fd: RawFd, chattr: libc::c_long, mask: libc::c_long) -> Result<(), Error> {
|
||||
if chattr == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut fattr: libc::c_long = 0;
|
||||
match unsafe { fs::read_attr_fd(fd, &mut fattr) } {
|
||||
Ok(_) => (),
|
||||
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
|
||||
return Ok(());
|
||||
}
|
||||
Err(err) => bail!("failed to read file attributes: {}", err),
|
||||
}
|
||||
|
||||
let attr = (chattr & mask) | (fattr & !mask);
|
||||
|
||||
if attr == fattr {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
match unsafe { fs::write_attr_fd(fd, &attr) } {
|
||||
Ok(_) => Ok(()),
|
||||
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => Ok(()),
|
||||
Err(err) => bail!("failed to set file attributes: {}", err),
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_flags(flags: Flags, fd: RawFd, entry_flags: u64) -> Result<(), Error> {
|
||||
let entry_flags = Flags::from_bits_truncate(entry_flags);
|
||||
|
||||
apply_chattr(fd, entry_flags.to_chattr(), flags.to_chattr())?;
|
||||
|
||||
let fatattr = (flags & entry_flags).to_fat_attr();
|
||||
if fatattr != 0 {
|
||||
match unsafe { fs::write_fat_attr_fd(fd, &fatattr) } {
|
||||
Ok(_) => (),
|
||||
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => (),
|
||||
Err(err) => bail!("failed to set file FAT attributes: {}", err),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
71
pbs-client/src/pxar/mod.rs
Normal file
71
pbs-client/src/pxar/mod.rs
Normal file
@ -0,0 +1,71 @@
|
||||
//! *pxar* Implementation (proxmox file archive format)
|
||||
//!
|
||||
//! This code implements a slightly modified version of the *catar*
|
||||
//! format used in the [casync](https://github.com/systemd/casync)
|
||||
//! toolkit (we are not 100\% binary compatible). It is a file archive
|
||||
//! format defined by 'Lennart Poettering', specially defined for
|
||||
//! efficient deduplication.
|
||||
|
||||
//! Every archive contains items in the following order:
|
||||
//! * `ENTRY` -- containing general stat() data and related bits
|
||||
//! * `USER` -- user name as text, if enabled
|
||||
//! * `GROUP` -- group name as text, if enabled
|
||||
//! * `XATTR` -- one extended attribute
|
||||
//! * ... -- more of these when there are multiple defined
|
||||
//! * `ACL_USER` -- one `USER ACL` entry
|
||||
//! * ... -- more of these when there are multiple defined
|
||||
//! * `ACL_GROUP` -- one `GROUP ACL` entry
|
||||
//! * ... -- more of these when there are multiple defined
|
||||
//! * `ACL_GROUP_OBJ` -- The `ACL_GROUP_OBJ`
|
||||
//! * `ACL_DEFAULT` -- The various default ACL fields if there's one defined
|
||||
//! * `ACL_DEFAULT_USER` -- one USER ACL entry
|
||||
//! * ... -- more of these when multiple are defined
|
||||
//! * `ACL_DEFAULT_GROUP` -- one GROUP ACL entry
|
||||
//! * ... -- more of these when multiple are defined
|
||||
//! * `FCAPS` -- file capability in Linux disk format
|
||||
//! * `QUOTA_PROJECT_ID` -- the ext4/xfs quota project ID
|
||||
//! * `PAYLOAD` -- file contents, if it is one
|
||||
//! * `SYMLINK` -- symlink target, if it is one
|
||||
//! * `DEVICE` -- device major/minor, if it is a block/char device
|
||||
//!
|
||||
//! If we are serializing a directory, then this is followed by:
|
||||
//!
|
||||
//! * `FILENAME` -- name of the first directory entry (strictly ordered!)
|
||||
//! * `<archive>` -- serialization of the first directory entry's metadata and contents,
|
||||
//! following the exact same archive format
|
||||
//! * `FILENAME` -- name of the second directory entry (strictly ordered!)
|
||||
//! * `<archive>` -- serialization of the second directory entry
|
||||
//! * ...
|
||||
//! * `GOODBYE` -- lookup table at the end of a list of directory entries
|
||||
|
||||
//!
|
||||
//! The original format has no way to deal with hardlinks, so we
|
||||
//! extended the format by a special `HARDLINK` tag, which can replace
|
||||
//! an `ENTRY` tag. The `HARDLINK` tag contains an 64bit offset which
|
||||
//! points to the linked `ENTRY` inside the archive, followed by the
|
||||
//! full path name of that `ENTRY`. `HARDLINK`s may not have further data
|
||||
//! (user, group, acl, ...) because this is already defined by the
|
||||
//! linked `ENTRY`.
|
||||
|
||||
pub(crate) mod create;
|
||||
pub(crate) mod dir_stack;
|
||||
pub(crate) mod extract;
|
||||
pub mod fuse;
|
||||
pub(crate) mod metadata;
|
||||
pub(crate) mod tools;
|
||||
|
||||
mod flags;
|
||||
pub use flags::Flags;
|
||||
|
||||
pub use create::{create_archive, PxarCreateOptions};
|
||||
pub use extract::{
|
||||
create_zip, extract_archive, extract_sub_dir, extract_sub_dir_seq, ErrorHandler,
|
||||
PxarExtractOptions,
|
||||
};
|
||||
|
||||
/// The format requires to build sorted directory lookup tables in
|
||||
/// memory, so we restrict the number of allowed entries to limit
|
||||
/// maximum memory usage.
|
||||
pub const ENCODER_MAX_ENTRIES: usize = 1024 * 1024;
|
||||
|
||||
pub use tools::{format_multi_line_entry, format_single_line_entry};
|
202
pbs-client/src/pxar/tools.rs
Normal file
202
pbs-client/src/pxar/tools.rs
Normal file
@ -0,0 +1,202 @@
|
||||
//! Some common methods used within the pxar code.
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
use pxar::{mode, Entry, EntryKind, Metadata, format::StatxTimestamp};
|
||||
|
||||
/// Get the file permissions as `nix::Mode`
|
||||
pub fn perms_from_metadata(meta: &Metadata) -> Result<Mode, Error> {
|
||||
let mode = meta.stat.get_permission_bits();
|
||||
u32::try_from(mode)
|
||||
.map_err(drop)
|
||||
.and_then(|mode| Mode::from_bits(mode).ok_or(()))
|
||||
.map_err(|_| format_err!("mode contains illegal bits: 0x{:x} (0o{:o})", mode, mode))
|
||||
}
|
||||
|
||||
/// Make sure path is relative and not '.' or '..'.
|
||||
pub fn assert_relative_path<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
|
||||
assert_relative_path_do(Path::new(path))
|
||||
}
|
||||
|
||||
/// Make sure path is a single component and not '.' or '..'.
|
||||
pub fn assert_single_path_component<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
|
||||
assert_single_path_component_do(Path::new(path))
|
||||
}
|
||||
|
||||
fn assert_relative_path_do(path: &Path) -> Result<(), Error> {
|
||||
if !path.is_relative() {
|
||||
bail!("bad absolute file name in archive: {:?}", path);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn assert_single_path_component_do(path: &Path) -> Result<(), Error> {
|
||||
assert_relative_path_do(path)?;
|
||||
|
||||
let mut components = path.components();
|
||||
match components.next() {
|
||||
Some(std::path::Component::Normal(_)) => (),
|
||||
_ => bail!("invalid path component in archive: {:?}", path),
|
||||
}
|
||||
|
||||
if components.next().is_some() {
|
||||
bail!(
|
||||
"invalid path with multiple components in archive: {:?}",
|
||||
path
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
fn symbolic_mode(c: u64, special: bool, special_x: u8, special_no_x: u8) -> [u8; 3] {
|
||||
[
|
||||
if 0 != c & 4 { b'r' } else { b'-' },
|
||||
if 0 != c & 2 { b'w' } else { b'-' },
|
||||
match (c & 1, special) {
|
||||
(0, false) => b'-',
|
||||
(0, true) => special_no_x,
|
||||
(_, false) => b'x',
|
||||
(_, true) => special_x,
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
fn mode_string(entry: &Entry) -> String {
|
||||
// https://www.gnu.org/software/coreutils/manual/html_node/What-information-is-listed.html#What-information-is-listed
|
||||
// additionally we use:
|
||||
// file type capital 'L' hard links
|
||||
// a second '+' after the mode to show non-acl xattr presence
|
||||
//
|
||||
// Trwxrwxrwx++ uid/gid size mtime filename [-> destination]
|
||||
|
||||
let meta = entry.metadata();
|
||||
let mode = meta.stat.mode;
|
||||
let type_char = if entry.is_hardlink() {
|
||||
'L'
|
||||
} else {
|
||||
match mode & mode::IFMT {
|
||||
mode::IFREG => '-',
|
||||
mode::IFBLK => 'b',
|
||||
mode::IFCHR => 'c',
|
||||
mode::IFDIR => 'd',
|
||||
mode::IFLNK => 'l',
|
||||
mode::IFIFO => 'p',
|
||||
mode::IFSOCK => 's',
|
||||
_ => '?',
|
||||
}
|
||||
};
|
||||
|
||||
let fmt_u = symbolic_mode((mode >> 6) & 7, 0 != mode & mode::ISUID, b's', b'S');
|
||||
let fmt_g = symbolic_mode((mode >> 3) & 7, 0 != mode & mode::ISGID, b's', b'S');
|
||||
let fmt_o = symbolic_mode((mode >> 3) & 7, 0 != mode & mode::ISVTX, b't', b'T');
|
||||
|
||||
let has_acls = if meta.acl.is_empty() { ' ' } else { '+' };
|
||||
|
||||
let has_xattrs = if meta.xattrs.is_empty() { ' ' } else { '+' };
|
||||
|
||||
format!(
|
||||
"{}{}{}{}{}{}",
|
||||
type_char,
|
||||
unsafe { std::str::from_utf8_unchecked(&fmt_u) },
|
||||
unsafe { std::str::from_utf8_unchecked(&fmt_g) },
|
||||
unsafe { std::str::from_utf8_unchecked(&fmt_o) },
|
||||
has_acls,
|
||||
has_xattrs,
|
||||
)
|
||||
}
|
||||
|
||||
fn format_mtime(mtime: &StatxTimestamp) -> String {
|
||||
if let Ok(s) = proxmox::tools::time::strftime_local("%Y-%m-%d %H:%M:%S", mtime.secs) {
|
||||
return s;
|
||||
}
|
||||
format!("{}.{}", mtime.secs, mtime.nanos)
|
||||
}
|
||||
|
||||
pub fn format_single_line_entry(entry: &Entry) -> String {
|
||||
let mode_string = mode_string(entry);
|
||||
|
||||
let meta = entry.metadata();
|
||||
|
||||
let (size, link) = match entry.kind() {
|
||||
EntryKind::File { size, .. } => (format!("{}", *size), String::new()),
|
||||
EntryKind::Symlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
|
||||
EntryKind::Hardlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
|
||||
EntryKind::Device(dev) => (format!("{},{}", dev.major, dev.minor), String::new()),
|
||||
_ => ("0".to_string(), String::new()),
|
||||
};
|
||||
|
||||
format!(
|
||||
"{} {:<13} {} {:>8} {:?}{}",
|
||||
mode_string,
|
||||
format!("{}/{}", meta.stat.uid, meta.stat.gid),
|
||||
format_mtime(&meta.stat.mtime),
|
||||
size,
|
||||
entry.path(),
|
||||
link,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn format_multi_line_entry(entry: &Entry) -> String {
|
||||
let mode_string = mode_string(entry);
|
||||
|
||||
let meta = entry.metadata();
|
||||
|
||||
let (size, link, type_name) = match entry.kind() {
|
||||
EntryKind::File { size, .. } => (format!("{}", *size), String::new(), "file"),
|
||||
EntryKind::Symlink(link) => (
|
||||
"0".to_string(),
|
||||
format!(" -> {:?}", link.as_os_str()),
|
||||
"symlink",
|
||||
),
|
||||
EntryKind::Hardlink(link) => (
|
||||
"0".to_string(),
|
||||
format!(" -> {:?}", link.as_os_str()),
|
||||
"symlink",
|
||||
),
|
||||
EntryKind::Device(dev) => (
|
||||
format!("{},{}", dev.major, dev.minor),
|
||||
String::new(),
|
||||
if meta.stat.is_chardev() {
|
||||
"characters pecial file"
|
||||
} else if meta.stat.is_blockdev() {
|
||||
"block special file"
|
||||
} else {
|
||||
"device"
|
||||
},
|
||||
),
|
||||
EntryKind::Socket => ("0".to_string(), String::new(), "socket"),
|
||||
EntryKind::Fifo => ("0".to_string(), String::new(), "fifo"),
|
||||
EntryKind::Directory => ("0".to_string(), String::new(), "directory"),
|
||||
EntryKind::GoodbyeTable => ("0".to_string(), String::new(), "bad entry"),
|
||||
};
|
||||
|
||||
let file_name = match std::str::from_utf8(entry.path().as_os_str().as_bytes()) {
|
||||
Ok(name) => std::borrow::Cow::Borrowed(name),
|
||||
Err(_) => std::borrow::Cow::Owned(format!("{:?}", entry.path())),
|
||||
};
|
||||
|
||||
format!(
|
||||
" File: {}{}\n \
|
||||
Size: {:<13} Type: {}\n\
|
||||
Access: ({:o}/{}) Uid: {:<5} Gid: {:<5}\n\
|
||||
Modify: {}\n",
|
||||
file_name,
|
||||
link,
|
||||
size,
|
||||
type_name,
|
||||
meta.file_mode(),
|
||||
mode_string,
|
||||
meta.stat.uid,
|
||||
meta.stat.gid,
|
||||
format_mtime(&meta.stat.mtime),
|
||||
)
|
||||
}
|
Reference in New Issue
Block a user