//! Tools and utilities //! //! This is a collection of small and useful tools. use failure::*; use nix::unistd; use nix::sys::stat; use lazy_static::lazy_static; use std::fs::{File, OpenOptions}; use std::io::Write; use std::path::Path; use std::io::Read; use std::io::ErrorKind; use std::time::Duration; use std::any::Any; use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; use std::collections::HashMap; use serde_json::Value; pub mod async_mutex; pub mod timer; pub mod wrapped_reader_stream; #[macro_use] pub mod common_regex; pub mod ticket; pub mod borrow; pub mod fs; pub mod tty; pub mod signalfd; pub mod daemon; pub mod procfs; pub mod read; pub mod write; pub mod acl; pub mod xattr; mod process_locker; pub use process_locker::*; #[macro_use] mod file_logger; pub use file_logger::*; mod broadcast_future; pub use broadcast_future::*; /// Macro to write error-handling blocks (like perl eval {}) /// /// #### Example: /// ``` /// # #[macro_use] extern crate proxmox_backup; /// # use failure::*; /// # let some_condition = false; /// let result = try_block!({ /// if (some_condition) { /// bail!("some error"); /// } /// Ok(()) /// }) /// .map_err(|e| format_err!("my try block returned an error - {}", e)); /// ``` #[macro_export] macro_rules! try_block { { $($token:tt)* } => {{ (|| -> Result<_,_> { $($token)* })() }} } /// The `BufferedRead` trait provides a single function /// `buffered_read`. It returns a reference to an internal buffer. The /// purpose of this traid is to avoid unnecessary data copies. pub trait BufferedRead { /// This functions tries to fill the internal buffers, then /// returns a reference to the available data. It returns an empty /// buffer if `offset` points to the end of the file. fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error>; } /// Directly map a type into a binary buffer. This is mostly useful /// for reading structured data from a byte stream (file). You need to /// make sure that the buffer location does not change, so please /// avoid vec resize while you use such map. /// /// This function panics if the buffer is not large enough. pub fn map_struct(buffer: &[u8]) -> Result<&T, Error> { if buffer.len() < ::std::mem::size_of::() { bail!("unable to map struct - buffer too small"); } Ok(unsafe { & * (buffer.as_ptr() as *const T) }) } /// Directly map a type into a mutable binary buffer. This is mostly /// useful for writing structured data into a byte stream (file). You /// need to make sure that the buffer location does not change, so /// please avoid vec resize while you use such map. /// /// This function panics if the buffer is not large enough. pub fn map_struct_mut(buffer: &mut [u8]) -> Result<&mut T, Error> { if buffer.len() < ::std::mem::size_of::() { bail!("unable to map struct - buffer too small"); } Ok(unsafe { &mut * (buffer.as_ptr() as *mut T) }) } pub fn file_read_firstline>(path: P) -> Result { let path = path.as_ref(); try_block!({ let file = std::fs::File::open(path)?; use std::io::{BufRead, BufReader}; let mut reader = BufReader::new(file); let mut line = String::new(); let _ = reader.read_line(&mut line)?; Ok(line) }).map_err(|err: Error| format_err!("unable to read {:?} - {}", path, err)) } pub fn file_get_contents>(path: P) -> Result, Error> { let path = path.as_ref(); try_block!({ std::fs::read(path) }).map_err(|err| format_err!("unable to read {:?} - {}", path, err)) } pub fn file_get_json>(path: P, default: Option) -> Result { let path = path.as_ref(); let raw = match std::fs::read(path) { Ok(v) => v, Err(err) => { if err.kind() == std::io::ErrorKind::NotFound { if let Some(v) = default { return Ok(v); } } bail!("unable to read json {:?} - {}", path, err); } }; try_block!({ let data = String::from_utf8(raw)?; let json = serde_json::from_str(&data)?; Ok(json) }).map_err(|err: Error| format_err!("unable to parse json from {:?} - {}", path, err)) } /// Atomically write a file /// /// We first create a temporary file, which is then renamed. pub fn file_set_contents>( path: P, data: &[u8], perm: Option, ) -> Result<(), Error> { file_set_contents_full(path, data, perm, None, None) } /// Atomically write a file with owner and group pub fn file_set_contents_full>( path: P, data: &[u8], perm: Option, owner: Option, group: Option, ) -> Result<(), Error> { let path = path.as_ref(); // Note: we use mkstemp heŕe, because this worka with different // processes, threads, and even tokio tasks. let mut template = path.to_owned(); template.set_extension("tmp_XXXXXX"); let (fd, tmp_path) = match unistd::mkstemp(&template) { Ok((fd, path)) => (fd, path), Err(err) => bail!("mkstemp {:?} failed: {}", template, err), }; let tmp_path = tmp_path.as_path(); let mode : stat::Mode = perm.unwrap_or(stat::Mode::from( stat::Mode::S_IRUSR | stat::Mode::S_IWUSR | stat::Mode::S_IRGRP | stat::Mode::S_IROTH )); if let Err(err) = stat::fchmod(fd, mode) { let _ = unistd::unlink(tmp_path); bail!("fchmod {:?} failed: {}", tmp_path, err); } if owner != None || group != None { if let Err(err) = fchown(fd, owner, group) { let _ = unistd::unlink(tmp_path); bail!("fchown {:?} failed: {}", tmp_path, err); } } use std::os::unix::io::FromRawFd; let mut file = unsafe { File::from_raw_fd(fd) }; if let Err(err) = file.write_all(data) { let _ = unistd::unlink(tmp_path); bail!("write failed: {}", err); } if let Err(err) = std::fs::rename(tmp_path, path) { let _ = unistd::unlink(tmp_path); bail!("Atomic rename failed for file {:?} - {}", path, err); } Ok(()) } /// Create a file lock using fntl. This function allows you to specify /// a timeout if you want to avoid infinite blocking. pub fn lock_file( file: &mut F, exclusive: bool, timeout: Option, ) -> Result<(), Error> { let lockarg = if exclusive { nix::fcntl::FlockArg::LockExclusive } else { nix::fcntl::FlockArg::LockShared }; let timeout = match timeout { None => { nix::fcntl::flock(file.as_raw_fd(), lockarg)?; return Ok(()); } Some(t) => t, }; // unblock the timeout signal temporarily let _sigblock_guard = timer::unblock_timeout_signal(); // setup a timeout timer let mut timer = timer::Timer::create( timer::Clock::Realtime, timer::TimerEvent::ThisThreadSignal(timer::SIGTIMEOUT))?; timer.arm(timer::TimerSpec::new() .value(Some(timeout)) .interval(Some(Duration::from_millis(10))))?; nix::fcntl::flock(file.as_raw_fd(), lockarg)?; Ok(()) } /// Open or create a lock file (append mode). Then try to /// aquire a lock using `lock_file()`. pub fn open_file_locked>(path: P, timeout: Duration) -> Result { let path = path.as_ref(); let mut file = match OpenOptions::new() .create(true) .append(true) .open(path) { Ok(file) => file, Err(err) => bail!("Unable to open lock {:?} - {}", path, err), }; match lock_file(&mut file, true, Some(timeout)) { Ok(_) => Ok(file), Err(err) => bail!("Unable to aquire lock {:?} - {}", path, err), } } /// Split a file into equal sized chunks. The last chunk may be /// smaller. Note: We cannot implement an `Iterator`, because iterators /// cannot return a borrowed buffer ref (we want zero-copy) pub fn file_chunker( mut file: R, chunk_size: usize, mut chunk_cb: C ) -> Result<(), Error> where C: FnMut(usize, &[u8]) -> Result, R: Read, { const READ_BUFFER_SIZE: usize = 4*1024*1024; // 4M if chunk_size > READ_BUFFER_SIZE { bail!("chunk size too large!"); } let mut buf = vec![0u8; READ_BUFFER_SIZE]; let mut pos = 0; let mut file_pos = 0; loop { let mut eof = false; let mut tmp = &mut buf[..]; // try to read large portions, at least chunk_size while pos < chunk_size { match file.read(tmp) { Ok(0) => { eof = true; break; }, Ok(n) => { pos += n; if pos > chunk_size { break; } tmp = &mut tmp[n..]; } Err(ref e) if e.kind() == ErrorKind::Interrupted => { /* try again */ } Err(e) => bail!("read chunk failed - {}", e.to_string()), } } let mut start = 0; while start + chunk_size <= pos { if !(chunk_cb)(file_pos, &buf[start..start+chunk_size])? { break; } file_pos += chunk_size; start += chunk_size; } if eof { if start < pos { (chunk_cb)(file_pos, &buf[start..pos])?; //file_pos += pos - start; } break; } else { let rest = pos - start; if rest > 0 { let ptr = buf.as_mut_ptr(); unsafe { std::ptr::copy_nonoverlapping(ptr.add(start), ptr, rest); } pos = rest; } else { pos = 0; } } } Ok(()) } /// Returns the Unix uid/gid for the sepcified system user. pub fn getpwnam_ugid(username: &str) -> Result<(libc::uid_t,libc::gid_t), Error> { let info = unsafe { libc::getpwnam(std::ffi::CString::new(username).unwrap().as_ptr()) }; if info == std::ptr::null_mut() { bail!("getwpnam '{}' failed", username); } let info = unsafe { *info }; Ok((info.pw_uid, info.pw_gid)) } /// Creates directory at the provided path with specified ownership /// /// Simply returns if the directory already exists. pub fn create_dir_chown>( path: P, perm: Option, owner: Option, group: Option, ) -> Result<(), nix::Error> { let mode : stat::Mode = perm.unwrap_or(stat::Mode::from_bits_truncate(0o770)); let path = path.as_ref(); match nix::unistd::mkdir(path, mode) { Ok(()) => {}, Err(nix::Error::Sys(nix::errno::Errno::EEXIST)) => { return Ok(()); }, err => return err, } unistd::chown(path, owner, group)?; Ok(()) } /// Change ownership of an open file handle pub fn fchown( fd: RawFd, owner: Option, group: Option ) -> Result<(), Error> { // According to the POSIX specification, -1 is used to indicate that owner and group // are not to be changed. Since uid_t and gid_t are unsigned types, we have to wrap // around to get -1 (copied fron nix crate). let uid = owner.map(Into::into).unwrap_or((0 as libc::uid_t).wrapping_sub(1)); let gid = group.map(Into::into).unwrap_or((0 as libc::gid_t).wrapping_sub(1)); let res = unsafe { libc::fchown(fd, uid, gid) }; nix::errno::Errno::result(res)?; Ok(()) } // Returns the hosts node name (UTS node name) pub fn nodename() -> &'static str { lazy_static!{ static ref NODENAME: String = { nix::sys::utsname::uname() .nodename() .split('.') .next() .unwrap() .to_owned() }; } &NODENAME } pub fn json_object_to_query(data: Value) -> Result { let mut query = url::form_urlencoded::Serializer::new(String::new()); let object = data.as_object().ok_or_else(|| { format_err!("json_object_to_query: got wrong data type (expected object).") })?; for (key, value) in object { match value { Value::Bool(b) => { query.append_pair(key, &b.to_string()); } Value::Number(n) => { query.append_pair(key, &n.to_string()); } Value::String(s) => { query.append_pair(key, &s); } Value::Array(arr) => { for element in arr { match element { Value::Bool(b) => { query.append_pair(key, &b.to_string()); } Value::Number(n) => { query.append_pair(key, &n.to_string()); } Value::String(s) => { query.append_pair(key, &s); } _ => bail!("json_object_to_query: unable to handle complex array data types."), } } } _ => bail!("json_object_to_query: unable to handle complex data types."), } } Ok(query.finish()) } pub fn required_string_param<'a>(param: &'a Value, name: &str) -> Result<&'a str, Error> { match param[name].as_str() { Some(s) => Ok(s), None => bail!("missing parameter '{}'", name), } } pub fn required_integer_param<'a>(param: &'a Value, name: &str) -> Result { match param[name].as_i64() { Some(s) => Ok(s), None => bail!("missing parameter '{}'", name), } } pub fn required_array_param<'a>(param: &'a Value, name: &str) -> Result, Error> { match param[name].as_array() { Some(s) => Ok(s.to_vec()), None => bail!("missing parameter '{}'", name), } } pub fn complete_file_name(arg: &str, _param: &HashMap) -> Vec { let mut result = vec![]; use nix::fcntl::OFlag; use nix::sys::stat::Mode; use nix::fcntl::AtFlags; let mut dirname = std::path::PathBuf::from(if arg.len() == 0 { "./" } else { arg }); let is_dir = match nix::sys::stat::fstatat(libc::AT_FDCWD, &dirname, AtFlags::empty()) { Ok(stat) => (stat.st_mode & libc::S_IFMT) == libc::S_IFDIR, Err(_) => false, }; if !is_dir { if let Some(parent) = dirname.parent() { dirname = parent.to_owned(); } } let mut dir = match nix::dir::Dir::openat(libc::AT_FDCWD, &dirname, OFlag::O_DIRECTORY, Mode::empty()) { Ok(d) => d, Err(_) => return result, }; for item in dir.iter() { if let Ok(entry) = item { if let Ok(name) = entry.file_name().to_str() { if name == "." || name == ".." { continue; } let mut newpath = dirname.clone(); newpath.push(name); if let Ok(stat) = nix::sys::stat::fstatat(libc::AT_FDCWD, &newpath, AtFlags::empty()) { if (stat.st_mode & libc::S_IFMT) == libc::S_IFDIR { newpath.push(""); if let Some(newpath) = newpath.to_str() { result.push(newpath.to_owned()); } continue; } } if let Some(newpath) = newpath.to_str() { result.push(newpath.to_owned()); } } } } result } /// Scan directory for matching file names. /// /// Scan through all directory entries and call `callback()` function /// if the entry name matches the regular expression. This function /// used unix `openat()`, so you can pass absolute or relative file /// names. This function simply skips non-UTF8 encoded names. pub fn scandir( dirfd: RawFd, path: &P, regex: ®ex::Regex, mut callback: F ) -> Result<(), Error> where F: FnMut(RawFd, &str, nix::dir::Type) -> Result<(), Error>, P: ?Sized + nix::NixPath, { for entry in self::fs::scan_subdir(dirfd, path, regex)? { let entry = entry?; let file_type = match entry.file_type() { Some(file_type) => file_type, None => bail!("unable to detect file type"), }; callback(entry.parent_fd(), unsafe { entry.file_name_utf8_unchecked() }, file_type)?; } Ok(()) } pub fn get_hardware_address() -> Result { static FILENAME: &str = "/etc/ssh/ssh_host_rsa_key.pub"; let contents = file_get_contents(FILENAME)?; let digest = md5::compute(contents); Ok(format!("{:0x}", digest)) } const HEX_CHARS: &'static [u8; 16] = b"0123456789abcdef"; pub fn digest_to_hex(digest: &[u8]) -> String { let mut buf = Vec::::with_capacity(digest.len()*2); for i in 0..digest.len() { buf.push(HEX_CHARS[(digest[i] >> 4) as usize]); buf.push(HEX_CHARS[(digest[i] & 0xf) as usize]); } unsafe { String::from_utf8_unchecked(buf) } } pub fn hex_to_digest(hex: &str) -> Result<[u8; 32], Error> { let mut digest = [0u8; 32]; let bytes = hex.as_bytes(); if bytes.len() != 64 { bail!("got wrong digest length."); } let val = |c| { if c >= b'0' && c <= b'9' { return Ok(c - b'0'); } if c >= b'a' && c <= b'f' { return Ok(c - b'a' + 10); } if c >= b'A' && c <= b'F' { return Ok(c - b'A' + 10); } bail!("found illegal hex character."); }; let mut pos = 0; for pair in bytes.chunks(2) { if pos >= digest.len() { bail!("hex digest too long."); } let h = val(pair[0])?; let l = val(pair[1])?; digest[pos] = (h<<4)|l; pos +=1; } if pos != digest.len() { bail!("hex digest too short."); } Ok(digest) } pub fn assert_if_modified(digest1: &str, digest2: &str) -> Result<(), Error> { if digest1 != digest2 { bail!("detected modified configuration - file changed by other user? Try again."); } Ok(()) } /// Extract authentication cookie from cookie header. /// We assume cookie_name is already url encoded. pub fn extract_auth_cookie(cookie: &str, cookie_name: &str) -> Option { for pair in cookie.split(';') { let (name, value) = match pair.find('=') { Some(i) => (pair[..i].trim(), pair[(i + 1)..].trim()), None => return None, // Cookie format error }; if name == cookie_name { use url::percent_encoding::percent_decode; if let Ok(value) = percent_decode(value.as_bytes()).decode_utf8() { return Some(value.into()); } else { return None; // Cookie format error } } } None } pub fn join(data: &Vec, sep: char) -> String { let mut list = String::new(); for item in data { if list.len() != 0 { list.push(sep); } list.push_str(item); } list } /// normalize uri path /// /// Do not allow ".", "..", or hidden files ".XXXX" /// Also remove empty path components pub fn normalize_uri_path(path: &str) -> Result<(String, Vec<&str>), Error> { let items = path.split('/'); let mut path = String::new(); let mut components = vec![]; for name in items { if name.is_empty() { continue; } if name.starts_with(".") { bail!("Path contains illegal components."); } path.push('/'); path.push_str(name); components.push(name); } Ok((path, components)) } pub fn fd_change_cloexec(fd: RawFd, on: bool) -> Result<(), Error> { use nix::fcntl::{fcntl, F_GETFD, F_SETFD, FdFlag}; let mut flags = FdFlag::from_bits(fcntl(fd, F_GETFD)?) .ok_or_else(|| format_err!("unhandled file flags"))?; // nix crate is stupid this way... flags.set(FdFlag::FD_CLOEXEC, on); fcntl(fd, F_SETFD(flags))?; Ok(()) } static mut SHUTDOWN_REQUESTED: bool = false; pub fn request_shutdown() { unsafe { SHUTDOWN_REQUESTED = true; } crate::server::server_shutdown(); } #[inline(always)] pub fn shutdown_requested() -> bool { unsafe { SHUTDOWN_REQUESTED } } pub fn fail_on_shutdown() -> Result<(), Error> { if shutdown_requested() { bail!("Server shutdown requested - aborting task"); } Ok(()) } /// Guard a raw file descriptor with a drop handler. This is mostly useful when access to an owned /// `RawFd` is required without the corresponding handler object (such as when only the file /// descriptor number is required in a closure which may be dropped instead of being executed). pub struct Fd(pub RawFd); impl Drop for Fd { fn drop(&mut self) { if self.0 != -1 { unsafe { libc::close(self.0); } } } } impl AsRawFd for Fd { fn as_raw_fd(&self) -> RawFd { self.0 } } impl IntoRawFd for Fd { fn into_raw_fd(mut self) -> RawFd { let fd = self.0; self.0 = -1; fd } } impl FromRawFd for Fd { unsafe fn from_raw_fd(fd: RawFd) -> Self { Self(fd) } } // wrap nix::unistd::pipe2 + O_CLOEXEC into something returning guarded file descriptors pub fn pipe() -> Result<(Fd, Fd), Error> { let (pin, pout) = nix::unistd::pipe2(nix::fcntl::OFlag::O_CLOEXEC)?; Ok((Fd(pin), Fd(pout))) } /// An easy way to convert types to Any /// /// Mostly useful to downcast trait objects (see RpcEnvironment). pub trait AsAny { fn as_any(&self) -> &Any; } impl AsAny for T { fn as_any(&self) -> &Any { self } }