move client to pbs-client subcrate

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
This commit is contained in:
Wolfgang Bumiller
2021-07-19 10:50:18 +02:00
parent 72fbe9ffa5
commit 2b7f8dd5ea
74 changed files with 802 additions and 753 deletions

40
pbs-client/Cargo.toml Normal file
View File

@ -0,0 +1,40 @@
[package]
name = "pbs-client"
version = "0.1.0"
authors = ["Wolfgang Bumiller <w.bumiller@proxmox.com>"]
edition = "2018"
description = "The main proxmox backup client crate"
[dependencies]
anyhow = "1.0"
bitflags = "1.2.1"
bytes = "1.0"
futures = "0.3"
h2 = { version = "0.3", features = [ "stream" ] }
http = "0.2"
hyper = { version = "0.14", features = [ "full" ] }
lazy_static = "1.4"
libc = "0.2"
nix = "0.19.1"
openssl = "0.10"
percent-encoding = "2.1"
pin-project = "1.0"
regex = "1.2"
rustyline = "7"
serde_json = "1.0"
tokio = { version = "1.6", features = [ "fs", "signal" ] }
tokio-stream = "0.1.0"
tower-service = "0.3.0"
xdg = "2.2"
pathpatterns = "0.1.2"
proxmox = { version = "0.11.5", default-features = false, features = [ "cli" ] }
proxmox-fuse = "0.1.1"
proxmox-http = { version = "0.2.1", features = [ "client", "http-helpers", "websocket" ] }
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
pbs-api-types = { path = "../pbs-api-types" }
pbs-buildcfg = { path = "../pbs-buildcfg" }
pbs-datastore = { path = "../pbs-datastore" }
pbs-runtime = { path = "../pbs-runtime" }
pbs-tools = { path = "../pbs-tools" }

View File

@ -0,0 +1,229 @@
use anyhow::{format_err, Error};
use std::io::{Write, Seek, SeekFrom};
use std::fs::File;
use std::sync::Arc;
use std::os::unix::fs::OpenOptionsExt;
use futures::future::AbortHandle;
use serde_json::{json, Value};
use proxmox::tools::digest_to_hex;
use pbs_datastore::{PROXMOX_BACKUP_READER_PROTOCOL_ID_V1, CryptConfig, BackupManifest};
use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::data_blob_reader::DataBlobReader;
use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::MANIFEST_BLOB_NAME;
use pbs_tools::sha::sha256;
use super::{HttpClient, H2Client};
/// Backup Reader
pub struct BackupReader {
h2: H2Client,
abort: AbortHandle,
crypt_config: Option<Arc<CryptConfig>>,
}
impl Drop for BackupReader {
fn drop(&mut self) {
self.abort.abort();
}
}
impl BackupReader {
fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>) -> Arc<Self> {
Arc::new(Self { h2, abort, crypt_config})
}
/// Create a new instance by upgrading the connection at '/api2/json/reader'
pub async fn start(
client: HttpClient,
crypt_config: Option<Arc<CryptConfig>>,
datastore: &str,
backup_type: &str,
backup_id: &str,
backup_time: i64,
debug: bool,
) -> Result<Arc<BackupReader>, Error> {
let param = json!({
"backup-type": backup_type,
"backup-id": backup_id,
"backup-time": backup_time,
"store": datastore,
"debug": debug,
});
let req = HttpClient::request_builder(client.server(), client.port(), "GET", "/api2/json/reader", Some(param)).unwrap();
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!())).await?;
Ok(BackupReader::new(h2, abort, crypt_config))
}
/// Execute a GET request
pub async fn get(
&self,
path: &str,
param: Option<Value>,
) -> Result<Value, Error> {
self.h2.get(path, param).await
}
/// Execute a PUT request
pub async fn put(
&self,
path: &str,
param: Option<Value>,
) -> Result<Value, Error> {
self.h2.put(path, param).await
}
/// Execute a POST request
pub async fn post(
&self,
path: &str,
param: Option<Value>,
) -> Result<Value, Error> {
self.h2.post(path, param).await
}
/// Execute a GET request and send output to a writer
pub async fn download<W: Write + Send>(
&self,
file_name: &str,
output: W,
) -> Result<(), Error> {
let path = "download";
let param = json!({ "file-name": file_name });
self.h2.download(path, Some(param), output).await
}
/// Execute a special GET request and send output to a writer
///
/// This writes random data, and is only useful to test download speed.
pub async fn speedtest<W: Write + Send>(
&self,
output: W,
) -> Result<(), Error> {
self.h2.download("speedtest", None, output).await
}
/// Download a specific chunk
pub async fn download_chunk<W: Write + Send>(
&self,
digest: &[u8; 32],
output: W,
) -> Result<(), Error> {
let path = "chunk";
let param = json!({ "digest": digest_to_hex(digest) });
self.h2.download(path, Some(param), output).await
}
pub fn force_close(self) {
self.abort.abort();
}
/// Download backup manifest (index.json)
///
/// The manifest signature is verified if we have a crypt_config.
pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec<u8>), Error> {
let mut raw_data = Vec::with_capacity(64 * 1024);
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
// no expected digest available
let data = blob.decode(None, None)?;
let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
Ok((manifest, data))
}
/// Download a .blob file
///
/// This creates a temporary file in /tmp (using O_TMPFILE). The data is verified using
/// the provided manifest.
pub async fn download_blob(
&self,
manifest: &BackupManifest,
name: &str,
) -> Result<DataBlobReader<'_, File>, Error> {
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
.custom_flags(libc::O_TMPFILE)
.open("/tmp")?;
self.download(name, &mut tmpfile).await?;
tmpfile.seek(SeekFrom::Start(0))?;
let (csum, size) = sha256(&mut tmpfile)?;
manifest.verify_file(name, &csum, size)?;
tmpfile.seek(SeekFrom::Start(0))?;
DataBlobReader::new(tmpfile, self.crypt_config.clone())
}
/// Download dynamic index file
///
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
/// the provided manifest.
pub async fn download_dynamic_index(
&self,
manifest: &BackupManifest,
name: &str,
) -> Result<DynamicIndexReader, Error> {
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
.custom_flags(libc::O_TMPFILE)
.open("/tmp")?;
self.download(name, &mut tmpfile).await?;
let index = DynamicIndexReader::new(tmpfile)
.map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?;
// Note: do not use values stored in index (not trusted) - instead, computed them again
let (csum, size) = index.compute_csum();
manifest.verify_file(name, &csum, size)?;
Ok(index)
}
/// Download fixed index file
///
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
/// the provided manifest.
pub async fn download_fixed_index(
&self,
manifest: &BackupManifest,
name: &str,
) -> Result<FixedIndexReader, Error> {
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
.custom_flags(libc::O_TMPFILE)
.open("/tmp")?;
self.download(name, &mut tmpfile).await?;
let index = FixedIndexReader::new(tmpfile)
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?;
// Note: do not use values stored in index (not trusted) - instead, computed them again
let (csum, size) = index.compute_csum();
manifest.verify_file(name, &csum, size)?;
Ok(index)
}
}

View File

@ -0,0 +1,101 @@
use std::convert::TryFrom;
use std::fmt;
use anyhow::{format_err, Error};
use pbs_api_types::{BACKUP_REPO_URL_REGEX, IP_V6_REGEX, Authid, Userid};
/// Reference remote backup locations
///
#[derive(Debug)]
pub struct BackupRepository {
/// The user name used for Authentication
auth_id: Option<Authid>,
/// The host name or IP address
host: Option<String>,
/// The port
port: Option<u16>,
/// The name of the datastore
store: String,
}
impl BackupRepository {
pub fn new(auth_id: Option<Authid>, host: Option<String>, port: Option<u16>, store: String) -> Self {
let host = match host {
Some(host) if (IP_V6_REGEX.regex_obj)().is_match(&host) => {
Some(format!("[{}]", host))
},
other => other,
};
Self { auth_id, host, port, store }
}
pub fn auth_id(&self) -> &Authid {
if let Some(ref auth_id) = self.auth_id {
return auth_id;
}
&Authid::root_auth_id()
}
pub fn user(&self) -> &Userid {
if let Some(auth_id) = &self.auth_id {
return auth_id.user();
}
Userid::root_userid()
}
pub fn host(&self) -> &str {
if let Some(ref host) = self.host {
return host;
}
"localhost"
}
pub fn port(&self) -> u16 {
if let Some(port) = self.port {
return port;
}
8007
}
pub fn store(&self) -> &str {
&self.store
}
}
impl fmt::Display for BackupRepository {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match (&self.auth_id, &self.host, self.port) {
(Some(auth_id), _, _) => write!(f, "{}@{}:{}:{}", auth_id, self.host(), self.port(), self.store),
(None, Some(host), None) => write!(f, "{}:{}", host, self.store),
(None, _, Some(port)) => write!(f, "{}:{}:{}", self.host(), port, self.store),
(None, None, None) => write!(f, "{}", self.store),
}
}
}
impl std::str::FromStr for BackupRepository {
type Err = Error;
/// Parse a repository URL.
///
/// This parses strings like `user@host:datastore`. The `user` and
/// `host` parts are optional, where `host` defaults to the local
/// host, and `user` defaults to `root@pam`.
fn from_str(url: &str) -> Result<Self, Self::Err> {
let cap = (BACKUP_REPO_URL_REGEX.regex_obj)().captures(url)
.ok_or_else(|| format_err!("unable to parse repository url '{}'", url))?;
Ok(Self {
auth_id: cap.get(1).map(|m| Authid::try_from(m.as_str().to_owned())).transpose()?,
host: cap.get(2).map(|m| m.as_str().to_owned()),
port: cap.get(3).map(|m| m.as_str().parse::<u16>()).transpose()?,
store: cap[4].to_owned(),
})
}
}

View File

@ -0,0 +1,39 @@
use anyhow::{bail, Error};
use proxmox::api::schema::*;
proxmox::const_regex! {
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
}
pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
"Backup source specification ([<label>:<path>]).")
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
.schema();
pub enum BackupSpecificationType { PXAR, IMAGE, CONFIG, LOGFILE }
pub struct BackupSpecification {
pub archive_name: String, // left part
pub config_string: String, // right part
pub spec_type: BackupSpecificationType,
}
pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Error> {
if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) {
let archive_name = caps.get(1).unwrap().as_str().into();
let extension = caps.get(2).unwrap().as_str();
let config_string = caps.get(3).unwrap().as_str().into();
let spec_type = match extension {
"pxar" => BackupSpecificationType::PXAR,
"img" => BackupSpecificationType::IMAGE,
"conf" => BackupSpecificationType::CONFIG,
"log" => BackupSpecificationType::LOGFILE,
_ => bail!("unknown backup source type '{}'", extension),
};
return Ok(BackupSpecification { archive_name, config_string, spec_type });
}
bail!("unable to parse backup source specification '{}'", value);
}

View File

@ -0,0 +1,842 @@
use std::collections::HashSet;
use std::future::Future;
use std::os::unix::fs::OpenOptionsExt;
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use anyhow::{bail, format_err, Error};
use futures::future::{self, AbortHandle, Either, FutureExt, TryFutureExt};
use futures::stream::{Stream, StreamExt, TryStreamExt};
use serde_json::{json, Value};
use tokio::io::AsyncReadExt;
use tokio::sync::{mpsc, oneshot};
use tokio_stream::wrappers::ReceiverStream;
use proxmox::tools::digest_to_hex;
use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1, CryptConfig};
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME};
use pbs_tools::format::HumanByte;
use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
use super::{H2Client, HttpClient};
pub struct BackupWriter {
h2: H2Client,
abort: AbortHandle,
verbose: bool,
crypt_config: Option<Arc<CryptConfig>>,
}
impl Drop for BackupWriter {
fn drop(&mut self) {
self.abort.abort();
}
}
pub struct BackupStats {
pub size: u64,
pub csum: [u8; 32],
}
/// Options for uploading blobs/streams to the server
#[derive(Default, Clone)]
pub struct UploadOptions {
pub previous_manifest: Option<Arc<BackupManifest>>,
pub compress: bool,
pub encrypt: bool,
pub fixed_size: Option<u64>,
}
struct UploadStats {
chunk_count: usize,
chunk_reused: usize,
size: usize,
size_reused: usize,
size_compressed: usize,
duration: std::time::Duration,
csum: [u8; 32],
}
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>;
type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
impl BackupWriter {
fn new(
h2: H2Client,
abort: AbortHandle,
crypt_config: Option<Arc<CryptConfig>>,
verbose: bool,
) -> Arc<Self> {
Arc::new(Self {
h2,
abort,
crypt_config,
verbose,
})
}
// FIXME: extract into (flattened) parameter struct?
#[allow(clippy::too_many_arguments)]
pub async fn start(
client: HttpClient,
crypt_config: Option<Arc<CryptConfig>>,
datastore: &str,
backup_type: &str,
backup_id: &str,
backup_time: i64,
debug: bool,
benchmark: bool,
) -> Result<Arc<BackupWriter>, Error> {
let param = json!({
"backup-type": backup_type,
"backup-id": backup_id,
"backup-time": backup_time,
"store": datastore,
"debug": debug,
"benchmark": benchmark
});
let req = HttpClient::request_builder(
client.server(),
client.port(),
"GET",
"/api2/json/backup",
Some(param),
)
.unwrap();
let (h2, abort) = client
.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
.await?;
Ok(BackupWriter::new(h2, abort, crypt_config, debug))
}
pub async fn get(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
self.h2.get(path, param).await
}
pub async fn put(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
self.h2.put(path, param).await
}
pub async fn post(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
self.h2.post(path, param).await
}
pub async fn upload_post(
&self,
path: &str,
param: Option<Value>,
content_type: &str,
data: Vec<u8>,
) -> Result<Value, Error> {
self.h2
.upload("POST", path, param, content_type, data)
.await
}
pub async fn send_upload_request(
&self,
method: &str,
path: &str,
param: Option<Value>,
content_type: &str,
data: Vec<u8>,
) -> Result<h2::client::ResponseFuture, Error> {
let request =
H2Client::request_builder("localhost", method, path, param, Some(content_type))
.unwrap();
let response_future = self
.h2
.send_request(request, Some(bytes::Bytes::from(data.clone())))
.await?;
Ok(response_future)
}
pub async fn upload_put(
&self,
path: &str,
param: Option<Value>,
content_type: &str,
data: Vec<u8>,
) -> Result<Value, Error> {
self.h2.upload("PUT", path, param, content_type, data).await
}
pub async fn finish(self: Arc<Self>) -> Result<(), Error> {
let h2 = self.h2.clone();
h2.post("finish", None)
.map_ok(move |_| {
self.abort.abort();
})
.await
}
pub fn cancel(&self) {
self.abort.abort();
}
pub async fn upload_blob<R: std::io::Read>(
&self,
mut reader: R,
file_name: &str,
) -> Result<BackupStats, Error> {
let mut raw_data = Vec::new();
// fixme: avoid loading into memory
reader.read_to_end(&mut raw_data)?;
let csum = openssl::sha::sha256(&raw_data);
let param = json!({"encoded-size": raw_data.len(), "file-name": file_name });
let size = raw_data.len() as u64;
let _value = self
.h2
.upload(
"POST",
"blob",
Some(param),
"application/octet-stream",
raw_data,
)
.await?;
Ok(BackupStats { size, csum })
}
pub async fn upload_blob_from_data(
&self,
data: Vec<u8>,
file_name: &str,
options: UploadOptions,
) -> Result<BackupStats, Error> {
let blob = match (options.encrypt, &self.crypt_config) {
(false, _) => DataBlob::encode(&data, None, options.compress)?,
(true, None) => bail!("requested encryption without a crypt config"),
(true, Some(crypt_config)) => {
DataBlob::encode(&data, Some(crypt_config), options.compress)?
}
};
let raw_data = blob.into_inner();
let size = raw_data.len() as u64;
let csum = openssl::sha::sha256(&raw_data);
let param = json!({"encoded-size": size, "file-name": file_name });
let _value = self
.h2
.upload(
"POST",
"blob",
Some(param),
"application/octet-stream",
raw_data,
)
.await?;
Ok(BackupStats { size, csum })
}
pub async fn upload_blob_from_file<P: AsRef<std::path::Path>>(
&self,
src_path: P,
file_name: &str,
options: UploadOptions,
) -> Result<BackupStats, Error> {
let src_path = src_path.as_ref();
let mut file = tokio::fs::File::open(src_path)
.await
.map_err(|err| format_err!("unable to open file {:?} - {}", src_path, err))?;
let mut contents = Vec::new();
file.read_to_end(&mut contents)
.await
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
self.upload_blob_from_data(contents, file_name, options)
.await
}
pub async fn upload_stream(
&self,
archive_name: &str,
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
options: UploadOptions,
) -> Result<BackupStats, Error> {
let known_chunks = Arc::new(Mutex::new(HashSet::new()));
let mut param = json!({ "archive-name": archive_name });
let prefix = if let Some(size) = options.fixed_size {
param["size"] = size.into();
"fixed"
} else {
"dynamic"
};
if options.encrypt && self.crypt_config.is_none() {
bail!("requested encryption without a crypt config");
}
let index_path = format!("{}_index", prefix);
let close_path = format!("{}_close", prefix);
if let Some(manifest) = options.previous_manifest {
// try, but ignore errors
match ArchiveType::from_path(archive_name) {
Ok(ArchiveType::FixedIndex) => {
let _ = self
.download_previous_fixed_index(
archive_name,
&manifest,
known_chunks.clone(),
)
.await;
}
Ok(ArchiveType::DynamicIndex) => {
let _ = self
.download_previous_dynamic_index(
archive_name,
&manifest,
known_chunks.clone(),
)
.await;
}
_ => { /* do nothing */ }
}
}
let wid = self
.h2
.post(&index_path, Some(param))
.await?
.as_u64()
.unwrap();
let upload_stats = Self::upload_chunk_info_stream(
self.h2.clone(),
wid,
stream,
&prefix,
known_chunks.clone(),
if options.encrypt {
self.crypt_config.clone()
} else {
None
},
options.compress,
self.verbose,
)
.await?;
let size_dirty = upload_stats.size - upload_stats.size_reused;
let size: HumanByte = upload_stats.size.into();
let archive = if self.verbose {
archive_name.to_string()
} else {
pbs_tools::format::strip_server_file_extension(archive_name)
};
if archive_name != CATALOG_NAME {
let speed: HumanByte =
((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into();
let size_dirty: HumanByte = size_dirty.into();
let size_compressed: HumanByte = upload_stats.size_compressed.into();
println!(
"{}: had to backup {} of {} (compressed {}) in {:.2}s",
archive,
size_dirty,
size,
size_compressed,
upload_stats.duration.as_secs_f64()
);
println!("{}: average backup speed: {}/s", archive, speed);
} else {
println!("Uploaded backup catalog ({})", size);
}
if upload_stats.size_reused > 0 && upload_stats.size > 1024 * 1024 {
let reused_percent = upload_stats.size_reused as f64 * 100. / upload_stats.size as f64;
let reused: HumanByte = upload_stats.size_reused.into();
println!(
"{}: backup was done incrementally, reused {} ({:.1}%)",
archive, reused, reused_percent
);
}
if self.verbose && upload_stats.chunk_count > 0 {
println!(
"{}: Reused {} from {} chunks.",
archive, upload_stats.chunk_reused, upload_stats.chunk_count
);
println!(
"{}: Average chunk size was {}.",
archive,
HumanByte::from(upload_stats.size / upload_stats.chunk_count)
);
println!(
"{}: Average time per request: {} microseconds.",
archive,
(upload_stats.duration.as_micros()) / (upload_stats.chunk_count as u128)
);
}
let param = json!({
"wid": wid ,
"chunk-count": upload_stats.chunk_count,
"size": upload_stats.size,
"csum": proxmox::tools::digest_to_hex(&upload_stats.csum),
});
let _value = self.h2.post(&close_path, Some(param)).await?;
Ok(BackupStats {
size: upload_stats.size as u64,
csum: upload_stats.csum,
})
}
fn response_queue(
verbose: bool,
) -> (
mpsc::Sender<h2::client::ResponseFuture>,
oneshot::Receiver<Result<(), Error>>,
) {
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
let (verify_result_tx, verify_result_rx) = oneshot::channel();
// FIXME: check if this works as expected as replacement for the combinator below?
// tokio::spawn(async move {
// let result: Result<(), Error> = (async move {
// while let Some(response) = verify_queue_rx.recv().await {
// match H2Client::h2api_response(response.await?).await {
// Ok(result) => println!("RESPONSE: {:?}", result),
// Err(err) => bail!("pipelined request failed: {}", err),
// }
// }
// Ok(())
// }).await;
// let _ignore_closed_channel = verify_result_tx.send(result);
// });
// old code for reference?
tokio::spawn(
ReceiverStream::new(verify_queue_rx)
.map(Ok::<_, Error>)
.try_for_each(move |response: h2::client::ResponseFuture| {
response
.map_err(Error::from)
.and_then(H2Client::h2api_response)
.map_ok(move |result| {
if verbose {
println!("RESPONSE: {:?}", result)
}
})
.map_err(|err| format_err!("pipelined request failed: {}", err))
})
.map(|result| {
let _ignore_closed_channel = verify_result_tx.send(result);
}),
);
(verify_queue_tx, verify_result_rx)
}
fn append_chunk_queue(
h2: H2Client,
wid: u64,
path: String,
verbose: bool,
) -> (UploadQueueSender, UploadResultReceiver) {
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(64);
let (verify_result_tx, verify_result_rx) = oneshot::channel();
// FIXME: async-block-ify this code!
tokio::spawn(
ReceiverStream::new(verify_queue_rx)
.map(Ok::<_, Error>)
.and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<h2::client::ResponseFuture>)| {
match (response, merged_chunk_info) {
(Some(response), MergedChunkInfo::Known(list)) => {
Either::Left(
response
.map_err(Error::from)
.and_then(H2Client::h2api_response)
.and_then(move |_result| {
future::ok(MergedChunkInfo::Known(list))
})
)
}
(None, MergedChunkInfo::Known(list)) => {
Either::Right(future::ok(MergedChunkInfo::Known(list)))
}
_ => unreachable!(),
}
})
.merge_known_chunks()
.and_then(move |merged_chunk_info| {
match merged_chunk_info {
MergedChunkInfo::Known(chunk_list) => {
let mut digest_list = vec![];
let mut offset_list = vec![];
for (offset, digest) in chunk_list {
digest_list.push(digest_to_hex(&digest));
offset_list.push(offset);
}
if verbose { println!("append chunks list len ({})", digest_list.len()); }
let param = json!({ "wid": wid, "digest-list": digest_list, "offset-list": offset_list });
let request = H2Client::request_builder("localhost", "PUT", &path, None, Some("application/json")).unwrap();
let param_data = bytes::Bytes::from(param.to_string().into_bytes());
let upload_data = Some(param_data);
h2.send_request(request, upload_data)
.and_then(move |response| {
response
.map_err(Error::from)
.and_then(H2Client::h2api_response)
.map_ok(|_| ())
})
.map_err(|err| format_err!("pipelined request failed: {}", err))
}
_ => unreachable!(),
}
})
.try_for_each(|_| future::ok(()))
.map(|result| {
let _ignore_closed_channel = verify_result_tx.send(result);
})
);
(verify_queue_tx, verify_result_rx)
}
pub async fn download_previous_fixed_index(
&self,
archive_name: &str,
manifest: &BackupManifest,
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
) -> Result<FixedIndexReader, Error> {
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
.custom_flags(libc::O_TMPFILE)
.open("/tmp")?;
let param = json!({ "archive-name": archive_name });
self.h2
.download("previous", Some(param), &mut tmpfile)
.await?;
let index = FixedIndexReader::new(tmpfile).map_err(|err| {
format_err!("unable to read fixed index '{}' - {}", archive_name, err)
})?;
// Note: do not use values stored in index (not trusted) - instead, computed them again
let (csum, size) = index.compute_csum();
manifest.verify_file(archive_name, &csum, size)?;
// add index chunks to known chunks
let mut known_chunks = known_chunks.lock().unwrap();
for i in 0..index.index_count() {
known_chunks.insert(*index.index_digest(i).unwrap());
}
if self.verbose {
println!(
"{}: known chunks list length is {}",
archive_name,
index.index_count()
);
}
Ok(index)
}
pub async fn download_previous_dynamic_index(
&self,
archive_name: &str,
manifest: &BackupManifest,
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
) -> Result<DynamicIndexReader, Error> {
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
.custom_flags(libc::O_TMPFILE)
.open("/tmp")?;
let param = json!({ "archive-name": archive_name });
self.h2
.download("previous", Some(param), &mut tmpfile)
.await?;
let index = DynamicIndexReader::new(tmpfile).map_err(|err| {
format_err!("unable to read dynmamic index '{}' - {}", archive_name, err)
})?;
// Note: do not use values stored in index (not trusted) - instead, computed them again
let (csum, size) = index.compute_csum();
manifest.verify_file(archive_name, &csum, size)?;
// add index chunks to known chunks
let mut known_chunks = known_chunks.lock().unwrap();
for i in 0..index.index_count() {
known_chunks.insert(*index.index_digest(i).unwrap());
}
if self.verbose {
println!(
"{}: known chunks list length is {}",
archive_name,
index.index_count()
);
}
Ok(index)
}
/// Retrieve backup time of last backup
pub async fn previous_backup_time(&self) -> Result<Option<i64>, Error> {
let data = self.h2.get("previous_backup_time", None).await?;
serde_json::from_value(data).map_err(|err| {
format_err!(
"Failed to parse backup time value returned by server - {}",
err
)
})
}
/// Download backup manifest (index.json) of last backup
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
let mut raw_data = Vec::with_capacity(64 * 1024);
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
self.h2
.download("previous", Some(param), &mut raw_data)
.await?;
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
// no expected digest available
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref), None)?;
let manifest =
BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
Ok(manifest)
}
// We have no `self` here for `h2` and `verbose`, the only other arg "common" with 1 other
// function in the same path is `wid`, so those 3 could be in a struct, but there's no real use
// since this is a private method.
#[allow(clippy::too_many_arguments)]
fn upload_chunk_info_stream(
h2: H2Client,
wid: u64,
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
prefix: &str,
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
crypt_config: Option<Arc<CryptConfig>>,
compress: bool,
verbose: bool,
) -> impl Future<Output = Result<UploadStats, Error>> {
let total_chunks = Arc::new(AtomicUsize::new(0));
let total_chunks2 = total_chunks.clone();
let known_chunk_count = Arc::new(AtomicUsize::new(0));
let known_chunk_count2 = known_chunk_count.clone();
let stream_len = Arc::new(AtomicUsize::new(0));
let stream_len2 = stream_len.clone();
let compressed_stream_len = Arc::new(AtomicU64::new(0));
let compressed_stream_len2 = compressed_stream_len.clone();
let reused_len = Arc::new(AtomicUsize::new(0));
let reused_len2 = reused_len.clone();
let append_chunk_path = format!("{}_index", prefix);
let upload_chunk_path = format!("{}_chunk", prefix);
let is_fixed_chunk_size = prefix == "fixed";
let (upload_queue, upload_result) =
Self::append_chunk_queue(h2.clone(), wid, append_chunk_path, verbose);
let start_time = std::time::Instant::now();
let index_csum = Arc::new(Mutex::new(Some(openssl::sha::Sha256::new())));
let index_csum_2 = index_csum.clone();
stream
.and_then(move |data| {
let chunk_len = data.len();
total_chunks.fetch_add(1, Ordering::SeqCst);
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
let mut chunk_builder = DataChunkBuilder::new(data.as_ref()).compress(compress);
if let Some(ref crypt_config) = crypt_config {
chunk_builder = chunk_builder.crypt_config(crypt_config);
}
let mut known_chunks = known_chunks.lock().unwrap();
let digest = chunk_builder.digest();
let mut guard = index_csum.lock().unwrap();
let csum = guard.as_mut().unwrap();
let chunk_end = offset + chunk_len as u64;
if !is_fixed_chunk_size {
csum.update(&chunk_end.to_le_bytes());
}
csum.update(digest);
let chunk_is_known = known_chunks.contains(digest);
if chunk_is_known {
known_chunk_count.fetch_add(1, Ordering::SeqCst);
reused_len.fetch_add(chunk_len, Ordering::SeqCst);
future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
} else {
let compressed_stream_len2 = compressed_stream_len.clone();
known_chunks.insert(*digest);
future::ready(chunk_builder.build().map(move |(chunk, digest)| {
compressed_stream_len2.fetch_add(chunk.raw_size(), Ordering::SeqCst);
MergedChunkInfo::New(ChunkInfo {
chunk,
digest,
chunk_len: chunk_len as u64,
offset,
})
}))
}
})
.merge_known_chunks()
.try_for_each(move |merged_chunk_info| {
let upload_queue = upload_queue.clone();
if let MergedChunkInfo::New(chunk_info) = merged_chunk_info {
let offset = chunk_info.offset;
let digest = chunk_info.digest;
let digest_str = digest_to_hex(&digest);
/* too verbose, needs finer verbosity setting granularity
if verbose {
println!("upload new chunk {} ({} bytes, offset {})", digest_str,
chunk_info.chunk_len, offset);
}
*/
let chunk_data = chunk_info.chunk.into_inner();
let param = json!({
"wid": wid,
"digest": digest_str,
"size": chunk_info.chunk_len,
"encoded-size": chunk_data.len(),
});
let ct = "application/octet-stream";
let request = H2Client::request_builder(
"localhost",
"POST",
&upload_chunk_path,
Some(param),
Some(ct),
)
.unwrap();
let upload_data = Some(bytes::Bytes::from(chunk_data));
let new_info = MergedChunkInfo::Known(vec![(offset, digest)]);
Either::Left(h2.send_request(request, upload_data).and_then(
move |response| async move {
upload_queue
.send((new_info, Some(response)))
.await
.map_err(|err| {
format_err!("failed to send to upload queue: {}", err)
})
},
))
} else {
Either::Right(async move {
upload_queue
.send((merged_chunk_info, None))
.await
.map_err(|err| format_err!("failed to send to upload queue: {}", err))
})
}
})
.then(move |result| async move { upload_result.await?.and(result) }.boxed())
.and_then(move |_| {
let duration = start_time.elapsed();
let chunk_count = total_chunks2.load(Ordering::SeqCst);
let chunk_reused = known_chunk_count2.load(Ordering::SeqCst);
let size = stream_len2.load(Ordering::SeqCst);
let size_reused = reused_len2.load(Ordering::SeqCst);
let size_compressed = compressed_stream_len2.load(Ordering::SeqCst) as usize;
let mut guard = index_csum_2.lock().unwrap();
let csum = guard.take().unwrap().finish();
futures::future::ok(UploadStats {
chunk_count,
chunk_reused,
size,
size_reused,
size_compressed,
duration,
csum,
})
})
}
/// Upload speed test - prints result to stderr
pub async fn upload_speedtest(&self, verbose: bool) -> Result<f64, Error> {
let mut data = vec![];
// generate pseudo random byte sequence
for i in 0..1024 * 1024 {
for j in 0..4 {
let byte = ((i >> (j << 3)) & 0xff) as u8;
data.push(byte);
}
}
let item_len = data.len();
let mut repeat = 0;
let (upload_queue, upload_result) = Self::response_queue(verbose);
let start_time = std::time::Instant::now();
loop {
repeat += 1;
if start_time.elapsed().as_secs() >= 5 {
break;
}
if verbose {
eprintln!("send test data ({} bytes)", data.len());
}
let request =
H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
let request_future = self
.h2
.send_request(request, Some(bytes::Bytes::from(data.clone())))
.await?;
upload_queue.send(request_future).await?;
}
drop(upload_queue); // close queue
let _ = upload_result.await?;
eprintln!(
"Uploaded {} chunks in {} seconds.",
repeat,
start_time.elapsed().as_secs()
);
let speed = ((item_len * (repeat as usize)) as f64) / start_time.elapsed().as_secs_f64();
eprintln!(
"Time per request: {} microseconds.",
(start_time.elapsed().as_micros()) / (repeat as u128)
);
Ok(speed)
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

70
pbs-client/src/lib.rs Normal file
View File

@ -0,0 +1,70 @@
//! Client side interface to the proxmox backup server
//!
//! This library implements the client side to access the backups
//! server using https.
use anyhow::Error;
use pbs_api_types::{Authid, Userid};
use pbs_tools::ticket::Ticket;
use pbs_tools::cert::CertInfo;
use pbs_tools::auth::private_auth_key;
pub mod catalog_shell;
pub mod pxar;
pub mod tools;
mod merge_known_chunks;
pub mod pipe_to_stream;
mod http_client;
pub use http_client::*;
mod vsock_client;
pub use vsock_client::*;
mod task_log;
pub use task_log::*;
mod backup_reader;
pub use backup_reader::*;
mod backup_writer;
pub use backup_writer::*;
mod remote_chunk_reader;
pub use remote_chunk_reader::*;
mod pxar_backup_stream;
pub use pxar_backup_stream::*;
mod backup_repo;
pub use backup_repo::*;
mod backup_specification;
pub use backup_specification::*;
pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
/// Connect to localhost:8007 as root@pam
///
/// This automatically creates a ticket if run as 'root' user.
pub fn connect_to_localhost() -> Result<HttpClient, Error> {
let uid = nix::unistd::Uid::current();
let client = if uid.is_root() {
let ticket = Ticket::new("PBS", Userid::root_userid())?
.sign(private_auth_key(), None)?;
let fingerprint = CertInfo::new()?.fingerprint()?;
let options = HttpClientOptions::new_non_interactive(ticket, Some(fingerprint));
HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
} else {
let options = HttpClientOptions::new_interactive(None, None);
HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
};
Ok(client)
}

View File

@ -0,0 +1,97 @@
use std::pin::Pin;
use std::task::{Context, Poll};
use anyhow::Error;
use futures::{ready, Stream};
use pin_project::pin_project;
use pbs_datastore::data_blob::ChunkInfo;
pub enum MergedChunkInfo {
Known(Vec<(u64, [u8; 32])>),
New(ChunkInfo),
}
pub trait MergeKnownChunks: Sized {
fn merge_known_chunks(self) -> MergeKnownChunksQueue<Self>;
}
#[pin_project]
pub struct MergeKnownChunksQueue<S> {
#[pin]
input: S,
buffer: Option<MergedChunkInfo>,
}
impl<S> MergeKnownChunks for S
where
S: Stream<Item = Result<MergedChunkInfo, Error>>,
{
fn merge_known_chunks(self) -> MergeKnownChunksQueue<Self> {
MergeKnownChunksQueue {
input: self,
buffer: None,
}
}
}
impl<S> Stream for MergeKnownChunksQueue<S>
where
S: Stream<Item = Result<MergedChunkInfo, Error>>,
{
type Item = Result<MergedChunkInfo, Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
let mut this = self.project();
loop {
match ready!(this.input.as_mut().poll_next(cx)) {
Some(Err(err)) => return Poll::Ready(Some(Err(err))),
None => {
if let Some(last) = this.buffer.take() {
return Poll::Ready(Some(Ok(last)));
} else {
return Poll::Ready(None);
}
}
Some(Ok(mergerd_chunk_info)) => {
match mergerd_chunk_info {
MergedChunkInfo::Known(list) => {
let last = this.buffer.take();
match last {
None => {
*this.buffer = Some(MergedChunkInfo::Known(list));
// continue
}
Some(MergedChunkInfo::Known(mut last_list)) => {
last_list.extend_from_slice(&list);
let len = last_list.len();
*this.buffer = Some(MergedChunkInfo::Known(last_list));
if len >= 64 {
return Poll::Ready(this.buffer.take().map(Ok));
}
// continue
}
Some(MergedChunkInfo::New(_)) => {
*this.buffer = Some(MergedChunkInfo::Known(list));
return Poll::Ready(last.map(Ok));
}
}
}
MergedChunkInfo::New(chunk_info) => {
let new = MergedChunkInfo::New(chunk_info);
if let Some(last) = this.buffer.take() {
*this.buffer = Some(new);
return Poll::Ready(Some(Ok(last)));
} else {
return Poll::Ready(Some(Ok(new)));
}
}
}
}
}
}
}
}

View File

@ -0,0 +1,70 @@
// Implement simple flow control for h2 client
//
// See also: hyper/src/proto/h2/mod.rs
use std::pin::Pin;
use std::task::{Context, Poll};
use anyhow::{format_err, Error};
use bytes::Bytes;
use futures::{ready, Future};
use h2::SendStream;
pub struct PipeToSendStream {
body_tx: SendStream<Bytes>,
data: Option<Bytes>,
}
impl PipeToSendStream {
pub fn new(data: Bytes, tx: SendStream<Bytes>) -> PipeToSendStream {
PipeToSendStream {
body_tx: tx,
data: Some(data),
}
}
}
impl Future for PipeToSendStream {
type Output = Result<(), Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let this = self.get_mut();
if this.data != None {
// just reserve 1 byte to make sure there's some
// capacity available. h2 will handle the capacity
// management for the actual body chunk.
this.body_tx.reserve_capacity(1);
if this.body_tx.capacity() == 0 {
loop {
match ready!(this.body_tx.poll_capacity(cx)) {
Some(Err(err)) => return Poll::Ready(Err(Error::from(err))),
Some(Ok(0)) => {}
Some(Ok(_)) => break,
None => return Poll::Ready(Err(format_err!("protocol canceled"))),
}
}
} else if let Poll::Ready(reset) = this.body_tx.poll_reset(cx) {
return Poll::Ready(Err(match reset {
Ok(reason) => format_err!("stream received RST_STREAM: {:?}", reason),
Err(err) => Error::from(err),
}));
}
this.body_tx
.send_data(this.data.take().unwrap(), true)
.map_err(Error::from)?;
Poll::Ready(Ok(()))
} else {
if let Poll::Ready(reset) = this.body_tx.poll_reset(cx) {
return Poll::Ready(Err(match reset {
Ok(reason) => format_err!("stream received RST_STREAM: {:?}", reason),
Err(err) => Error::from(err),
}));
}
Poll::Ready(Ok(()))
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,162 @@
use std::ffi::OsString;
use std::os::unix::io::{AsRawFd, RawFd};
use std::path::{Path, PathBuf};
use anyhow::{bail, format_err, Error};
use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::{mkdirat, Mode};
use proxmox::sys::error::SysError;
use proxmox::tools::fd::BorrowedFd;
use pxar::Metadata;
use crate::pxar::tools::{assert_single_path_component, perms_from_metadata};
pub struct PxarDir {
file_name: OsString,
metadata: Metadata,
dir: Option<Dir>,
}
impl PxarDir {
pub fn new(file_name: OsString, metadata: Metadata) -> Self {
Self {
file_name,
metadata,
dir: None,
}
}
pub fn with_dir(dir: Dir, metadata: Metadata) -> Self {
Self {
file_name: OsString::from("."),
metadata,
dir: Some(dir),
}
}
fn create_dir(
&mut self,
parent: RawFd,
allow_existing_dirs: bool,
) -> Result<BorrowedFd, Error> {
match mkdirat(
parent,
self.file_name.as_os_str(),
perms_from_metadata(&self.metadata)?,
) {
Ok(()) => (),
Err(err) => {
if !(allow_existing_dirs && err.already_exists()) {
return Err(err.into());
}
}
}
self.open_dir(parent)
}
fn open_dir(&mut self, parent: RawFd) -> Result<BorrowedFd, Error> {
let dir = Dir::openat(
parent,
self.file_name.as_os_str(),
OFlag::O_DIRECTORY,
Mode::empty(),
)?;
let fd = BorrowedFd::new(&dir);
self.dir = Some(dir);
Ok(fd)
}
pub fn try_as_borrowed_fd(&self) -> Option<BorrowedFd> {
self.dir.as_ref().map(BorrowedFd::new)
}
pub fn metadata(&self) -> &Metadata {
&self.metadata
}
}
pub struct PxarDirStack {
dirs: Vec<PxarDir>,
path: PathBuf,
created: usize,
}
impl PxarDirStack {
pub fn new(root: Dir, metadata: Metadata) -> Self {
Self {
dirs: vec![PxarDir::with_dir(root, metadata)],
path: PathBuf::from("/"),
created: 1, // the root directory exists
}
}
pub fn is_empty(&self) -> bool {
self.dirs.is_empty()
}
pub fn push(&mut self, file_name: OsString, metadata: Metadata) -> Result<(), Error> {
assert_single_path_component(&file_name)?;
self.path.push(&file_name);
self.dirs.push(PxarDir::new(file_name, metadata));
Ok(())
}
pub fn pop(&mut self) -> Result<Option<PxarDir>, Error> {
let out = self.dirs.pop();
if !self.path.pop() {
if self.path.as_os_str() == "/" {
// we just finished the root directory, make sure this can only happen once:
self.path = PathBuf::new();
} else {
bail!("lost track of path");
}
}
self.created = self.created.min(self.dirs.len());
Ok(out)
}
pub fn last_dir_fd(&mut self, allow_existing_dirs: bool) -> Result<BorrowedFd, Error> {
// should not be possible given the way we use it:
assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
let dirs_len = self.dirs.len();
let mut fd = self.dirs[self.created - 1]
.try_as_borrowed_fd()
.ok_or_else(|| format_err!("lost track of directory file descriptors"))?
.as_raw_fd();
while self.created < dirs_len {
fd = self.dirs[self.created]
.create_dir(fd, allow_existing_dirs)?
.as_raw_fd();
self.created += 1;
}
self.dirs[self.created - 1]
.try_as_borrowed_fd()
.ok_or_else(|| format_err!("lost track of directory file descriptors"))
}
pub fn create_last_dir(&mut self, allow_existing_dirs: bool) -> Result<(), Error> {
let _: BorrowedFd = self.last_dir_fd(allow_existing_dirs)?;
Ok(())
}
pub fn root_dir_fd(&self) -> Result<BorrowedFd, Error> {
// should not be possible given the way we use it:
assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
self.dirs[0]
.try_as_borrowed_fd()
.ok_or_else(|| format_err!("lost track of directory file descriptors"))
}
pub fn path(&self) -> &Path {
&self.path
}
}

View File

@ -0,0 +1,864 @@
//! Code for extraction of pxar contents onto the file system.
use std::convert::TryFrom;
use std::ffi::{CStr, CString, OsStr, OsString};
use std::io;
use std::os::unix::ffi::OsStrExt;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use std::pin::Pin;
use futures::future::Future;
use anyhow::{bail, format_err, Error};
use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
use pathpatterns::{MatchEntry, MatchList, MatchType};
use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
use pxar::decoder::aio::Decoder;
use pxar::format::Device;
use pxar::{Entry, EntryKind, Metadata};
use proxmox::c_result;
use proxmox::tools::{
fs::{create_path, CreateOptions},
io::{sparse_copy, sparse_copy_async},
};
use pbs_tools::zip::{ZipEncoder, ZipEntry};
use crate::pxar::dir_stack::PxarDirStack;
use crate::pxar::metadata;
use crate::pxar::Flags;
pub struct PxarExtractOptions<'a> {
pub match_list: &'a[MatchEntry],
pub extract_match_default: bool,
pub allow_existing_dirs: bool,
pub on_error: Option<ErrorHandler>,
}
pub type ErrorHandler = Box<dyn FnMut(Error) -> Result<(), Error> + Send>;
pub fn extract_archive<T, F>(
mut decoder: pxar::decoder::Decoder<T>,
destination: &Path,
feature_flags: Flags,
mut callback: F,
options: PxarExtractOptions,
) -> Result<(), Error>
where
T: pxar::decoder::SeqRead,
F: FnMut(&Path),
{
// we use this to keep track of our directory-traversal
decoder.enable_goodbye_entries(true);
let root = decoder
.next()
.ok_or_else(|| format_err!("found empty pxar archive"))?
.map_err(|err| format_err!("error reading pxar archive: {}", err))?;
if !root.is_dir() {
bail!("pxar archive does not start with a directory entry!");
}
create_path(
&destination,
None,
Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
)
.map_err(|err| format_err!("error creating directory {:?}: {}", destination, err))?;
let dir = Dir::open(
destination,
OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
Mode::empty(),
)
.map_err(|err| format_err!("unable to open target directory {:?}: {}", destination, err,))?;
let mut extractor = Extractor::new(
dir,
root.metadata().clone(),
options.allow_existing_dirs,
feature_flags,
);
if let Some(on_error) = options.on_error {
extractor.on_error(on_error);
}
let mut match_stack = Vec::new();
let mut err_path_stack = vec![OsString::from("/")];
let mut current_match = options.extract_match_default;
while let Some(entry) = decoder.next() {
let entry = entry.map_err(|err| format_err!("error reading pxar archive: {}", err))?;
let file_name_os = entry.file_name();
// safety check: a file entry in an archive must never contain slashes:
if file_name_os.as_bytes().contains(&b'/') {
bail!("archive file entry contains slashes, which is invalid and a security concern");
}
let file_name = CString::new(file_name_os.as_bytes())
.map_err(|_| format_err!("encountered file name with null-bytes"))?;
let metadata = entry.metadata();
extractor.set_path(entry.path().as_os_str().to_owned());
let match_result = options.match_list.matches(
entry.path().as_os_str().as_bytes(),
Some(metadata.file_type() as u32),
);
let did_match = match match_result {
Some(MatchType::Include) => true,
Some(MatchType::Exclude) => false,
None => current_match,
};
match (did_match, entry.kind()) {
(_, EntryKind::Directory) => {
callback(entry.path());
let create = current_match && match_result != Some(MatchType::Exclude);
extractor
.enter_directory(file_name_os.to_owned(), metadata.clone(), create)
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
// We're starting a new directory, push our old matching state and replace it with
// our new one:
match_stack.push(current_match);
current_match = did_match;
// When we hit the goodbye table we'll try to apply metadata to the directory, but
// the Goodbye entry will not contain the path, so push it to our path stack for
// error messages:
err_path_stack.push(extractor.clone_path());
Ok(())
}
(_, EntryKind::GoodbyeTable) => {
// go up a directory
extractor.set_path(err_path_stack.pop().ok_or_else(|| {
format_err!(
"error at entry {:?}: unexpected end of directory",
file_name_os
)
})?);
extractor
.leave_directory()
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
// We left a directory, also get back our previous matching state. This is in sync
// with `dir_stack` so this should never be empty except for the final goodbye
// table, in which case we get back to the default of `true`.
current_match = match_stack.pop().unwrap_or(true);
Ok(())
}
(true, EntryKind::Symlink(link)) => {
callback(entry.path());
extractor.extract_symlink(&file_name, metadata, link.as_ref())
}
(true, EntryKind::Hardlink(link)) => {
callback(entry.path());
extractor.extract_hardlink(&file_name, link.as_os_str())
}
(true, EntryKind::Device(dev)) => {
if extractor.contains_flags(Flags::WITH_DEVICE_NODES) {
callback(entry.path());
extractor.extract_device(&file_name, metadata, dev)
} else {
Ok(())
}
}
(true, EntryKind::Fifo) => {
if extractor.contains_flags(Flags::WITH_FIFOS) {
callback(entry.path());
extractor.extract_special(&file_name, metadata, 0)
} else {
Ok(())
}
}
(true, EntryKind::Socket) => {
if extractor.contains_flags(Flags::WITH_SOCKETS) {
callback(entry.path());
extractor.extract_special(&file_name, metadata, 0)
} else {
Ok(())
}
}
(true, EntryKind::File { size, .. }) => extractor.extract_file(
&file_name,
metadata,
*size,
&mut decoder.contents().ok_or_else(|| {
format_err!("found regular file entry without contents in archive")
})?,
),
(false, _) => Ok(()), // skip this
}
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
}
if !extractor.dir_stack.is_empty() {
bail!("unexpected eof while decoding pxar archive");
}
Ok(())
}
/// Common state for file extraction.
pub struct Extractor {
feature_flags: Flags,
allow_existing_dirs: bool,
dir_stack: PxarDirStack,
/// For better error output we need to track the current path in the Extractor state.
current_path: Arc<Mutex<OsString>>,
/// Error callback. Includes `current_path` in the reformatted error, should return `Ok` to
/// continue extracting or the passed error as `Err` to bail out.
on_error: ErrorHandler,
}
impl Extractor {
/// Create a new extractor state for a target directory.
pub fn new(
root_dir: Dir,
metadata: Metadata,
allow_existing_dirs: bool,
feature_flags: Flags,
) -> Self {
Self {
dir_stack: PxarDirStack::new(root_dir, metadata),
allow_existing_dirs,
feature_flags,
current_path: Arc::new(Mutex::new(OsString::new())),
on_error: Box::new(Err),
}
}
/// We call this on errors. The error will be reformatted to include `current_path`. The
/// callback should decide whether this error was fatal (simply return it) to bail out early,
/// or log/remember/accumulate errors somewhere and return `Ok(())` in its place to continue
/// extracting.
pub fn on_error(&mut self, mut on_error: Box<dyn FnMut(Error) -> Result<(), Error> + Send>) {
let path = Arc::clone(&self.current_path);
self.on_error = Box::new(move |err: Error| -> Result<(), Error> {
on_error(format_err!("error at {:?}: {}", path.lock().unwrap(), err))
});
}
pub fn set_path(&mut self, path: OsString) {
*self.current_path.lock().unwrap() = path;
}
pub fn clone_path(&self) -> OsString {
self.current_path.lock().unwrap().clone()
}
/// When encountering a directory during extraction, this is used to keep track of it. If
/// `create` is true it is immediately created and its metadata will be updated once we leave
/// it. If `create` is false it will only be created if it is going to have any actual content.
pub fn enter_directory(
&mut self,
file_name: OsString,
metadata: Metadata,
create: bool,
) -> Result<(), Error> {
self.dir_stack.push(file_name, metadata)?;
if create {
self.dir_stack.create_last_dir(self.allow_existing_dirs)?;
}
Ok(())
}
/// When done with a directory we can apply its metadata if it has been created.
pub fn leave_directory(&mut self) -> Result<(), Error> {
let path_info = self.dir_stack.path().to_owned();
let dir = self
.dir_stack
.pop()
.map_err(|err| format_err!("unexpected end of directory entry: {}", err))?
.ok_or_else(|| format_err!("broken pxar archive (directory stack underrun)"))?;
if let Some(fd) = dir.try_as_borrowed_fd() {
metadata::apply(
self.feature_flags,
dir.metadata(),
fd.as_raw_fd(),
&path_info,
&mut self.on_error,
)
.map_err(|err| format_err!("failed to apply directory metadata: {}", err))?;
}
Ok(())
}
fn contains_flags(&self, flag: Flags) -> bool {
self.feature_flags.contains(flag)
}
fn parent_fd(&mut self) -> Result<RawFd, Error> {
self.dir_stack
.last_dir_fd(self.allow_existing_dirs)
.map(|d| d.as_raw_fd())
.map_err(|err| format_err!("failed to get parent directory file descriptor: {}", err))
}
pub fn extract_symlink(
&mut self,
file_name: &CStr,
metadata: &Metadata,
link: &OsStr,
) -> Result<(), Error> {
let parent = self.parent_fd()?;
nix::unistd::symlinkat(link, Some(parent), file_name)?;
metadata::apply_at(
self.feature_flags,
metadata,
parent,
file_name,
self.dir_stack.path(),
&mut self.on_error,
)
}
pub fn extract_hardlink(&mut self, file_name: &CStr, link: &OsStr) -> Result<(), Error> {
crate::pxar::tools::assert_relative_path(link)?;
let parent = self.parent_fd()?;
let root = self.dir_stack.root_dir_fd()?;
let target = CString::new(link.as_bytes())?;
nix::unistd::linkat(
Some(root.as_raw_fd()),
target.as_c_str(),
Some(parent),
file_name,
nix::unistd::LinkatFlags::NoSymlinkFollow,
)?;
Ok(())
}
pub fn extract_device(
&mut self,
file_name: &CStr,
metadata: &Metadata,
device: &Device,
) -> Result<(), Error> {
self.extract_special(file_name, metadata, device.to_dev_t())
}
pub fn extract_special(
&mut self,
file_name: &CStr,
metadata: &Metadata,
device: libc::dev_t,
) -> Result<(), Error> {
let mode = metadata.stat.mode;
let mode = u32::try_from(mode).map_err(|_| {
format_err!(
"device node's mode contains illegal bits: 0x{:x} (0o{:o})",
mode,
mode,
)
})?;
let parent = self.parent_fd()?;
unsafe { c_result!(libc::mknodat(parent, file_name.as_ptr(), mode, device)) }
.map_err(|err| format_err!("failed to create device node: {}", err))?;
metadata::apply_at(
self.feature_flags,
metadata,
parent,
file_name,
self.dir_stack.path(),
&mut self.on_error,
)
}
pub fn extract_file(
&mut self,
file_name: &CStr,
metadata: &Metadata,
size: u64,
contents: &mut dyn io::Read,
) -> Result<(), Error> {
let parent = self.parent_fd()?;
let mut file = unsafe {
std::fs::File::from_raw_fd(
nix::fcntl::openat(
parent,
file_name,
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
Mode::from_bits(0o600).unwrap(),
)
.map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?,
)
};
metadata::apply_initial_flags(
self.feature_flags,
metadata,
file.as_raw_fd(),
&mut self.on_error,
)
.map_err(|err| format_err!("failed to apply initial flags: {}", err))?;
let result = sparse_copy(&mut *contents, &mut file)
.map_err(|err| format_err!("failed to copy file contents: {}", err))?;
if size != result.written {
bail!(
"extracted {} bytes of a file of {} bytes",
result.written,
size
);
}
if result.seeked_last {
while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
Ok(_) => false,
Err(nix::Error::Sys(errno)) if errno == nix::errno::Errno::EINTR => true,
Err(err) => bail!("error setting file size: {}", err),
} {}
}
metadata::apply(
self.feature_flags,
metadata,
file.as_raw_fd(),
self.dir_stack.path(),
&mut self.on_error,
)
}
pub async fn async_extract_file<T: tokio::io::AsyncRead + Unpin>(
&mut self,
file_name: &CStr,
metadata: &Metadata,
size: u64,
contents: &mut T,
) -> Result<(), Error> {
let parent = self.parent_fd()?;
let mut file = tokio::fs::File::from_std(unsafe {
std::fs::File::from_raw_fd(
nix::fcntl::openat(
parent,
file_name,
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
Mode::from_bits(0o600).unwrap(),
)
.map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?,
)
});
metadata::apply_initial_flags(
self.feature_flags,
metadata,
file.as_raw_fd(),
&mut self.on_error,
)
.map_err(|err| format_err!("failed to apply initial flags: {}", err))?;
let result = sparse_copy_async(&mut *contents, &mut file)
.await
.map_err(|err| format_err!("failed to copy file contents: {}", err))?;
if size != result.written {
bail!(
"extracted {} bytes of a file of {} bytes",
result.written,
size
);
}
if result.seeked_last {
while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
Ok(_) => false,
Err(nix::Error::Sys(errno)) if errno == nix::errno::Errno::EINTR => true,
Err(err) => bail!("error setting file size: {}", err),
} {}
}
metadata::apply(
self.feature_flags,
metadata,
file.as_raw_fd(),
self.dir_stack.path(),
&mut self.on_error,
)
}
}
pub async fn create_zip<T, W, P>(
output: W,
decoder: Accessor<T>,
path: P,
verbose: bool,
) -> Result<(), Error>
where
T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
W: tokio::io::AsyncWrite + Unpin + Send + 'static,
P: AsRef<Path>,
{
let root = decoder.open_root().await?;
let file = root
.lookup(&path).await?
.ok_or(format_err!("error opening '{:?}'", path.as_ref()))?;
let mut prefix = PathBuf::new();
let mut components = file.entry().path().components();
components.next_back(); // discar last
for comp in components {
prefix.push(comp);
}
let mut zipencoder = ZipEncoder::new(output);
let mut decoder = decoder;
recurse_files_zip(&mut zipencoder, &mut decoder, &prefix, file, verbose)
.await
.map_err(|err| {
eprintln!("error during creating of zip: {}", err);
err
})?;
zipencoder
.finish()
.await
.map_err(|err| {
eprintln!("error during finishing of zip: {}", err);
err
})
}
fn recurse_files_zip<'a, T, W>(
zip: &'a mut ZipEncoder<W>,
decoder: &'a mut Accessor<T>,
prefix: &'a Path,
file: FileEntry<T>,
verbose: bool,
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
where
T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
W: tokio::io::AsyncWrite + Unpin + Send + 'static,
{
Box::pin(async move {
let metadata = file.entry().metadata();
let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
match file.kind() {
EntryKind::File { .. } => {
if verbose {
eprintln!("adding '{}' to zip", path.display());
}
let entry = ZipEntry::new(
path,
metadata.stat.mtime.secs,
metadata.stat.mode as u16,
true,
);
zip.add_entry(entry, Some(file.contents().await?))
.await
.map_err(|err| format_err!("could not send file entry: {}", err))?;
}
EntryKind::Hardlink(_) => {
let realfile = decoder.follow_hardlink(&file).await?;
if verbose {
eprintln!("adding '{}' to zip", path.display());
}
let entry = ZipEntry::new(
path,
metadata.stat.mtime.secs,
metadata.stat.mode as u16,
true,
);
zip.add_entry(entry, Some(realfile.contents().await?))
.await
.map_err(|err| format_err!("could not send file entry: {}", err))?;
}
EntryKind::Directory => {
let dir = file.enter_directory().await?;
let mut readdir = dir.read_dir();
if verbose {
eprintln!("adding '{}' to zip", path.display());
}
let entry = ZipEntry::new(
path,
metadata.stat.mtime.secs,
metadata.stat.mode as u16,
false,
);
zip.add_entry::<FileContents<T>>(entry, None).await?;
while let Some(entry) = readdir.next().await {
let entry = entry?.decode_entry().await?;
recurse_files_zip(zip, decoder, prefix, entry, verbose).await?;
}
}
_ => {} // ignore all else
};
Ok(())
})
}
fn get_extractor<DEST>(destination: DEST, metadata: Metadata) -> Result<Extractor, Error>
where
DEST: AsRef<Path>,
{
create_path(
&destination,
None,
Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
)
.map_err(|err| {
format_err!(
"error creating directory {:?}: {}",
destination.as_ref(),
err
)
})?;
let dir = Dir::open(
destination.as_ref(),
OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
Mode::empty(),
)
.map_err(|err| {
format_err!(
"unable to open target directory {:?}: {}",
destination.as_ref(),
err,
)
})?;
Ok(Extractor::new(dir, metadata, false, Flags::DEFAULT))
}
pub async fn extract_sub_dir<T, DEST, PATH>(
destination: DEST,
decoder: Accessor<T>,
path: PATH,
verbose: bool,
) -> Result<(), Error>
where
T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
DEST: AsRef<Path>,
PATH: AsRef<Path>,
{
let root = decoder.open_root().await?;
let mut extractor = get_extractor(
destination,
root.lookup_self().await?.entry().metadata().clone(),
)?;
let file = root
.lookup(&path)
.await?
.ok_or(format_err!("error opening '{:?}'", path.as_ref()))?;
recurse_files_extractor(&mut extractor, file, verbose).await
}
pub async fn extract_sub_dir_seq<S, DEST>(
destination: DEST,
mut decoder: Decoder<S>,
verbose: bool,
) -> Result<(), Error>
where
S: pxar::decoder::SeqRead + Unpin + Send + 'static,
DEST: AsRef<Path>,
{
decoder.enable_goodbye_entries(true);
let root = match decoder.next().await {
Some(Ok(root)) => root,
Some(Err(err)) => bail!("error getting root entry from pxar: {}", err),
None => bail!("cannot extract empty archive"),
};
let mut extractor = get_extractor(destination, root.metadata().clone())?;
if let Err(err) = seq_files_extractor(&mut extractor, decoder, verbose).await {
eprintln!("error extracting pxar archive: {}", err);
}
Ok(())
}
fn extract_special(
extractor: &mut Extractor,
entry: &Entry,
file_name: &CStr,
) -> Result<(), Error> {
let metadata = entry.metadata();
match entry.kind() {
EntryKind::Symlink(link) => {
extractor.extract_symlink(file_name, metadata, link.as_ref())?;
}
EntryKind::Hardlink(link) => {
extractor.extract_hardlink(file_name, link.as_os_str())?;
}
EntryKind::Device(dev) => {
if extractor.contains_flags(Flags::WITH_DEVICE_NODES) {
extractor.extract_device(file_name, metadata, dev)?;
}
}
EntryKind::Fifo => {
if extractor.contains_flags(Flags::WITH_FIFOS) {
extractor.extract_special(file_name, metadata, 0)?;
}
}
EntryKind::Socket => {
if extractor.contains_flags(Flags::WITH_SOCKETS) {
extractor.extract_special(file_name, metadata, 0)?;
}
}
_ => bail!("extract_special used with unsupported entry kind"),
}
Ok(())
}
fn get_filename(entry: &Entry) -> Result<(OsString, CString), Error> {
let file_name_os = entry.file_name().to_owned();
// safety check: a file entry in an archive must never contain slashes:
if file_name_os.as_bytes().contains(&b'/') {
bail!("archive file entry contains slashes, which is invalid and a security concern");
}
let file_name = CString::new(file_name_os.as_bytes())
.map_err(|_| format_err!("encountered file name with null-bytes"))?;
Ok((file_name_os, file_name))
}
async fn recurse_files_extractor<'a, T>(
extractor: &'a mut Extractor,
file: FileEntry<T>,
verbose: bool,
) -> Result<(), Error>
where
T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
{
let entry = file.entry();
let metadata = entry.metadata();
let (file_name_os, file_name) = get_filename(entry)?;
if verbose {
eprintln!("extracting: {}", file.path().display());
}
match file.kind() {
EntryKind::Directory => {
extractor
.enter_directory(file_name_os.to_owned(), metadata.clone(), true)
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
let dir = file.enter_directory().await?;
let mut seq_decoder = dir.decode_full().await?;
seq_decoder.enable_goodbye_entries(true);
seq_files_extractor(extractor, seq_decoder, verbose).await?;
extractor.leave_directory()?;
}
EntryKind::File { size, .. } => {
extractor
.async_extract_file(
&file_name,
metadata,
*size,
&mut file.contents().await.map_err(|_| {
format_err!("found regular file entry without contents in archive")
})?,
)
.await?
}
EntryKind::GoodbyeTable => {} // ignore
_ => extract_special(extractor, entry, &file_name)?,
}
Ok(())
}
async fn seq_files_extractor<'a, T>(
extractor: &'a mut Extractor,
mut decoder: pxar::decoder::aio::Decoder<T>,
verbose: bool,
) -> Result<(), Error>
where
T: pxar::decoder::SeqRead,
{
let mut dir_level = 0;
loop {
let entry = match decoder.next().await {
Some(entry) => entry?,
None => return Ok(()),
};
let metadata = entry.metadata();
let (file_name_os, file_name) = get_filename(&entry)?;
if verbose && !matches!(entry.kind(), EntryKind::GoodbyeTable) {
eprintln!("extracting: {}", entry.path().display());
}
if let Err(err) = async {
match entry.kind() {
EntryKind::Directory => {
dir_level += 1;
extractor
.enter_directory(file_name_os.to_owned(), metadata.clone(), true)
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
}
EntryKind::File { size, .. } => {
extractor
.async_extract_file(
&file_name,
metadata,
*size,
&mut decoder.contents().ok_or_else(|| {
format_err!("found regular file entry without contents in archive")
})?,
)
.await?
}
EntryKind::GoodbyeTable => {
dir_level -= 1;
extractor.leave_directory()?;
}
_ => extract_special(extractor, &entry, &file_name)?,
}
Ok(()) as Result<(), Error>
}
.await
{
let display = entry.path().display().to_string();
eprintln!(
"error extracting {}: {}",
if matches!(entry.kind(), EntryKind::GoodbyeTable) {
"<directory>"
} else {
&display
},
err
);
}
if dir_level < 0 {
// we've encountered one Goodbye more then Directory, meaning we've left the dir we
// started in - exit early, otherwise the extractor might panic
return Ok(());
}
}
}

View File

@ -0,0 +1,378 @@
//! Feature flags for *pxar* allow to control what is stored/restored in/from the
//! archive.
//! Flags for known supported features for a given filesystem can be derived
//! from the superblocks magic number.
use libc::c_long;
use bitflags::bitflags;
bitflags! {
pub struct Flags: u64 {
/// FAT-style 2s time granularity
const WITH_2SEC_TIME = 0x40;
/// Preserve read only flag of files
const WITH_READ_ONLY = 0x80;
/// Preserve unix permissions
const WITH_PERMISSIONS = 0x100;
/// Include symbolik links
const WITH_SYMLINKS = 0x200;
/// Include device nodes
const WITH_DEVICE_NODES = 0x400;
/// Include FIFOs
const WITH_FIFOS = 0x800;
/// Include Sockets
const WITH_SOCKETS = 0x1000;
/// Preserve DOS file flag `HIDDEN`
const WITH_FLAG_HIDDEN = 0x2000;
/// Preserve DOS file flag `SYSTEM`
const WITH_FLAG_SYSTEM = 0x4000;
/// Preserve DOS file flag `ARCHIVE`
const WITH_FLAG_ARCHIVE = 0x8000;
// chattr() flags
/// Linux file attribute `APPEND`
const WITH_FLAG_APPEND = 0x10000;
/// Linux file attribute `NOATIME`
const WITH_FLAG_NOATIME = 0x20000;
/// Linux file attribute `COMPR`
const WITH_FLAG_COMPR = 0x40000;
/// Linux file attribute `NOCOW`
const WITH_FLAG_NOCOW = 0x80000;
/// Linux file attribute `NODUMP`
const WITH_FLAG_NODUMP = 0x0010_0000;
/// Linux file attribute `DIRSYNC`
const WITH_FLAG_DIRSYNC = 0x0020_0000;
/// Linux file attribute `IMMUTABLE`
const WITH_FLAG_IMMUTABLE = 0x0040_0000;
/// Linux file attribute `SYNC`
const WITH_FLAG_SYNC = 0x0080_0000;
/// Linux file attribute `NOCOMP`
const WITH_FLAG_NOCOMP = 0x0100_0000;
/// Linux file attribute `PROJINHERIT`
const WITH_FLAG_PROJINHERIT = 0x0200_0000;
/// Preserve BTRFS subvolume flag
const WITH_SUBVOLUME = 0x0400_0000;
/// Preserve BTRFS read-only subvolume flag
const WITH_SUBVOLUME_RO = 0x0800_0000;
/// Preserve Extended Attribute metadata
const WITH_XATTRS = 0x1000_0000;
/// Preserve Access Control List metadata
const WITH_ACL = 0x2000_0000;
/// Preserve SELinux security context
const WITH_SELINUX = 0x4000_0000;
/// Preserve "security.capability" xattr
const WITH_FCAPS = 0x8000_0000;
/// Preserve XFS/ext4/ZFS project quota ID
const WITH_QUOTA_PROJID = 0x0001_0000_0000;
/// Support ".pxarexclude" files
const EXCLUDE_FILE = 0x1000_0000_0000_0000;
/// Exclude submounts
const EXCLUDE_SUBMOUNTS = 0x4000_0000_0000_0000;
/// Exclude entries with chattr flag NODUMP
const EXCLUDE_NODUMP = 0x8000_0000_0000_0000;
// Definitions of typical feature flags for the *pxar* encoder/decoder.
// By this expensive syscalls for unsupported features are avoided.
/// All chattr file attributes
const WITH_CHATTR =
Flags::WITH_FLAG_APPEND.bits() |
Flags::WITH_FLAG_NOATIME.bits() |
Flags::WITH_FLAG_COMPR.bits() |
Flags::WITH_FLAG_NOCOW.bits() |
Flags::WITH_FLAG_NODUMP.bits() |
Flags::WITH_FLAG_DIRSYNC.bits() |
Flags::WITH_FLAG_IMMUTABLE.bits() |
Flags::WITH_FLAG_SYNC.bits() |
Flags::WITH_FLAG_NOCOMP.bits() |
Flags::WITH_FLAG_PROJINHERIT.bits();
/// All FAT file attributes
const WITH_FAT_ATTRS =
Flags::WITH_FLAG_HIDDEN.bits() |
Flags::WITH_FLAG_SYSTEM.bits() |
Flags::WITH_FLAG_ARCHIVE.bits();
/// All bits that may also be exposed via fuse
const WITH_FUSE =
Flags::WITH_2SEC_TIME.bits() |
Flags::WITH_READ_ONLY.bits() |
Flags::WITH_PERMISSIONS.bits() |
Flags::WITH_SYMLINKS.bits() |
Flags::WITH_DEVICE_NODES.bits() |
Flags::WITH_FIFOS.bits() |
Flags::WITH_SOCKETS.bits() |
Flags::WITH_FAT_ATTRS.bits() |
Flags::WITH_CHATTR.bits() |
Flags::WITH_XATTRS.bits();
/// Default feature flags for encoder/decoder
const DEFAULT =
Flags::WITH_SYMLINKS.bits() |
Flags::WITH_DEVICE_NODES.bits() |
Flags::WITH_FIFOS.bits() |
Flags::WITH_SOCKETS.bits() |
Flags::WITH_FLAG_HIDDEN.bits() |
Flags::WITH_FLAG_SYSTEM.bits() |
Flags::WITH_FLAG_ARCHIVE.bits() |
Flags::WITH_FLAG_APPEND.bits() |
Flags::WITH_FLAG_NOATIME.bits() |
Flags::WITH_FLAG_COMPR.bits() |
Flags::WITH_FLAG_NOCOW.bits() |
//WITH_FLAG_NODUMP.bits() |
Flags::WITH_FLAG_DIRSYNC.bits() |
Flags::WITH_FLAG_IMMUTABLE.bits() |
Flags::WITH_FLAG_SYNC.bits() |
Flags::WITH_FLAG_NOCOMP.bits() |
Flags::WITH_FLAG_PROJINHERIT.bits() |
Flags::WITH_SUBVOLUME.bits() |
Flags::WITH_SUBVOLUME_RO.bits() |
Flags::WITH_XATTRS.bits() |
Flags::WITH_ACL.bits() |
Flags::WITH_SELINUX.bits() |
Flags::WITH_FCAPS.bits() |
Flags::WITH_QUOTA_PROJID.bits() |
Flags::EXCLUDE_NODUMP.bits() |
Flags::EXCLUDE_FILE.bits();
}
}
impl Default for Flags {
fn default() -> Flags {
Flags::DEFAULT
}
}
// form /usr/include/linux/fs.h
const FS_APPEND_FL: c_long = 0x0000_0020;
const FS_NOATIME_FL: c_long = 0x0000_0080;
const FS_COMPR_FL: c_long = 0x0000_0004;
const FS_NOCOW_FL: c_long = 0x0080_0000;
const FS_NODUMP_FL: c_long = 0x0000_0040;
const FS_DIRSYNC_FL: c_long = 0x0001_0000;
const FS_IMMUTABLE_FL: c_long = 0x0000_0010;
const FS_SYNC_FL: c_long = 0x0000_0008;
const FS_NOCOMP_FL: c_long = 0x0000_0400;
const FS_PROJINHERIT_FL: c_long = 0x2000_0000;
pub(crate) const INITIAL_FS_FLAGS: c_long =
FS_NOATIME_FL
| FS_COMPR_FL
| FS_NOCOW_FL
| FS_NOCOMP_FL
| FS_PROJINHERIT_FL;
#[rustfmt::skip]
const CHATTR_MAP: [(Flags, c_long); 10] = [
( Flags::WITH_FLAG_APPEND, FS_APPEND_FL ),
( Flags::WITH_FLAG_NOATIME, FS_NOATIME_FL ),
( Flags::WITH_FLAG_COMPR, FS_COMPR_FL ),
( Flags::WITH_FLAG_NOCOW, FS_NOCOW_FL ),
( Flags::WITH_FLAG_NODUMP, FS_NODUMP_FL ),
( Flags::WITH_FLAG_DIRSYNC, FS_DIRSYNC_FL ),
( Flags::WITH_FLAG_IMMUTABLE, FS_IMMUTABLE_FL ),
( Flags::WITH_FLAG_SYNC, FS_SYNC_FL ),
( Flags::WITH_FLAG_NOCOMP, FS_NOCOMP_FL ),
( Flags::WITH_FLAG_PROJINHERIT, FS_PROJINHERIT_FL ),
];
// from /usr/include/linux/msdos_fs.h
const ATTR_HIDDEN: u32 = 2;
const ATTR_SYS: u32 = 4;
const ATTR_ARCH: u32 = 32;
#[rustfmt::skip]
const FAT_ATTR_MAP: [(Flags, u32); 3] = [
( Flags::WITH_FLAG_HIDDEN, ATTR_HIDDEN ),
( Flags::WITH_FLAG_SYSTEM, ATTR_SYS ),
( Flags::WITH_FLAG_ARCHIVE, ATTR_ARCH ),
];
impl Flags {
/// Get a set of feature flags from file attributes.
pub fn from_chattr(attr: c_long) -> Flags {
let mut flags = Flags::empty();
for (fe_flag, fs_flag) in &CHATTR_MAP {
if (attr & fs_flag) != 0 {
flags |= *fe_flag;
}
}
flags
}
/// Get the chattr bit representation of these feature flags.
pub fn to_chattr(self) -> c_long {
let mut flags: c_long = 0;
for (fe_flag, fs_flag) in &CHATTR_MAP {
if self.contains(*fe_flag) {
flags |= *fs_flag;
}
}
flags
}
pub fn to_initial_chattr(self) -> c_long {
self.to_chattr() & INITIAL_FS_FLAGS
}
/// Get a set of feature flags from FAT attributes.
pub fn from_fat_attr(attr: u32) -> Flags {
let mut flags = Flags::empty();
for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
if (attr & fs_flag) != 0 {
flags |= *fe_flag;
}
}
flags
}
/// Get the fat attribute bit representation of these feature flags.
pub fn to_fat_attr(self) -> u32 {
let mut flags = 0u32;
for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
if self.contains(*fe_flag) {
flags |= *fs_flag;
}
}
flags
}
/// Return the supported *pxar* feature flags based on the magic number of the filesystem.
pub fn from_magic(magic: i64) -> Flags {
use proxmox::sys::linux::magic::*;
match magic {
MSDOS_SUPER_MAGIC => {
Flags::WITH_2SEC_TIME |
Flags::WITH_READ_ONLY |
Flags::WITH_FAT_ATTRS
},
EXT4_SUPER_MAGIC => {
Flags::WITH_2SEC_TIME |
Flags::WITH_READ_ONLY |
Flags::WITH_PERMISSIONS |
Flags::WITH_SYMLINKS |
Flags::WITH_DEVICE_NODES |
Flags::WITH_FIFOS |
Flags::WITH_SOCKETS |
Flags::WITH_FLAG_APPEND |
Flags::WITH_FLAG_NOATIME |
Flags::WITH_FLAG_NODUMP |
Flags::WITH_FLAG_DIRSYNC |
Flags::WITH_FLAG_IMMUTABLE |
Flags::WITH_FLAG_SYNC |
Flags::WITH_XATTRS |
Flags::WITH_ACL |
Flags::WITH_SELINUX |
Flags::WITH_FCAPS |
Flags::WITH_QUOTA_PROJID
},
XFS_SUPER_MAGIC => {
Flags::WITH_2SEC_TIME |
Flags::WITH_READ_ONLY |
Flags::WITH_PERMISSIONS |
Flags::WITH_SYMLINKS |
Flags::WITH_DEVICE_NODES |
Flags::WITH_FIFOS |
Flags::WITH_SOCKETS |
Flags::WITH_FLAG_APPEND |
Flags::WITH_FLAG_NOATIME |
Flags::WITH_FLAG_NODUMP |
Flags::WITH_FLAG_IMMUTABLE |
Flags::WITH_FLAG_SYNC |
Flags::WITH_XATTRS |
Flags::WITH_ACL |
Flags::WITH_SELINUX |
Flags::WITH_FCAPS |
Flags::WITH_QUOTA_PROJID
},
ZFS_SUPER_MAGIC => {
Flags::WITH_2SEC_TIME |
Flags::WITH_READ_ONLY |
Flags::WITH_PERMISSIONS |
Flags::WITH_SYMLINKS |
Flags::WITH_DEVICE_NODES |
Flags::WITH_FIFOS |
Flags::WITH_SOCKETS |
Flags::WITH_FLAG_APPEND |
Flags::WITH_FLAG_NOATIME |
Flags::WITH_FLAG_NODUMP |
Flags::WITH_FLAG_DIRSYNC |
Flags::WITH_FLAG_IMMUTABLE |
Flags::WITH_FLAG_SYNC |
Flags::WITH_XATTRS |
Flags::WITH_ACL |
Flags::WITH_SELINUX |
Flags::WITH_FCAPS |
Flags::WITH_QUOTA_PROJID
},
BTRFS_SUPER_MAGIC => {
Flags::WITH_2SEC_TIME |
Flags::WITH_READ_ONLY |
Flags::WITH_PERMISSIONS |
Flags::WITH_SYMLINKS |
Flags::WITH_DEVICE_NODES |
Flags::WITH_FIFOS |
Flags::WITH_SOCKETS |
Flags::WITH_FLAG_APPEND |
Flags::WITH_FLAG_NOATIME |
Flags::WITH_FLAG_COMPR |
Flags::WITH_FLAG_NOCOW |
Flags::WITH_FLAG_NODUMP |
Flags::WITH_FLAG_DIRSYNC |
Flags::WITH_FLAG_IMMUTABLE |
Flags::WITH_FLAG_SYNC |
Flags::WITH_FLAG_NOCOMP |
Flags::WITH_XATTRS |
Flags::WITH_ACL |
Flags::WITH_SELINUX |
Flags::WITH_SUBVOLUME |
Flags::WITH_SUBVOLUME_RO |
Flags::WITH_FCAPS
},
TMPFS_MAGIC => {
Flags::WITH_2SEC_TIME |
Flags::WITH_READ_ONLY |
Flags::WITH_PERMISSIONS |
Flags::WITH_SYMLINKS |
Flags::WITH_DEVICE_NODES |
Flags::WITH_FIFOS |
Flags::WITH_SOCKETS |
Flags::WITH_ACL |
Flags::WITH_SELINUX
},
// FUSE mounts are special as the supported feature set
// is not clear a priori.
FUSE_SUPER_MAGIC => {
Flags::WITH_FUSE
},
_ => {
Flags::WITH_2SEC_TIME |
Flags::WITH_READ_ONLY |
Flags::WITH_PERMISSIONS |
Flags::WITH_SYMLINKS |
Flags::WITH_DEVICE_NODES |
Flags::WITH_FIFOS |
Flags::WITH_SOCKETS |
Flags::WITH_XATTRS |
Flags::WITH_ACL |
Flags::WITH_FCAPS
},
}
}
}

690
pbs-client/src/pxar/fuse.rs Normal file
View File

@ -0,0 +1,690 @@
//! Asynchronous fuse implementation.
use std::collections::BTreeMap;
use std::convert::TryFrom;
use std::ffi::{OsStr, OsString};
use std::future::Future;
use std::io;
use std::mem;
use std::ops::Range;
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
use std::pin::Pin;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, RwLock};
use std::task::{Context, Poll};
use anyhow::{format_err, Error};
use futures::channel::mpsc::UnboundedSender;
use futures::select;
use futures::sink::SinkExt;
use futures::stream::{StreamExt, TryStreamExt};
use proxmox::tools::vec;
use pxar::accessor::{self, EntryRangeInfo, ReadAt};
use proxmox_fuse::requests::{self, FuseRequest};
use proxmox_fuse::{EntryParam, Fuse, ReplyBufState, Request, ROOT_ID};
use pbs_tools::xattr;
/// We mark inodes for regular files this way so we know how to access them.
const NON_DIRECTORY_INODE: u64 = 1u64 << 63;
#[inline]
fn is_dir_inode(inode: u64) -> bool {
0 == (inode & NON_DIRECTORY_INODE)
}
/// Our reader type instance used for accessors.
pub type Reader = Arc<dyn ReadAt + Send + Sync + 'static>;
/// Our Accessor type instance.
pub type Accessor = accessor::aio::Accessor<Reader>;
/// Our Directory type instance.
pub type Directory = accessor::aio::Directory<Reader>;
/// Our FileEntry type instance.
pub type FileEntry = accessor::aio::FileEntry<Reader>;
/// Our FileContents type instance.
pub type FileContents = accessor::aio::FileContents<Reader>;
pub struct Session {
fut: Pin<Box<dyn Future<Output = Result<(), Error>> + Send + Sync + 'static>>,
}
impl Session {
/// Create a fuse session for an archive.
pub async fn mount_path(
archive_path: &Path,
options: &OsStr,
verbose: bool,
mountpoint: &Path,
) -> Result<Self, Error> {
// TODO: Add a buffered/caching ReadAt layer?
let file = std::fs::File::open(archive_path)?;
let file_size = file.metadata()?.len();
let reader: Reader = Arc::new(accessor::sync::FileReader::new(file));
let accessor = Accessor::new(reader, file_size).await?;
Self::mount(accessor, options, verbose, mountpoint)
}
/// Create a new fuse session for the given pxar `Accessor`.
pub fn mount(
accessor: Accessor,
options: &OsStr,
verbose: bool,
path: &Path,
) -> Result<Self, Error> {
let fuse = Fuse::builder("pxar-mount")?
.debug()
.options_os(options)?
.enable_readdirplus()
.enable_read()
.enable_readlink()
.enable_read_xattr()
.build()?
.mount(path)?;
let session = SessionImpl::new(accessor, verbose);
Ok(Self {
fut: Box::pin(session.main(fuse)),
})
}
}
impl Future for Session {
type Output = Result<(), Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
Pin::new(&mut self.fut).poll(cx)
}
}
/// We use this to return an errno value back to the kernel.
macro_rules! io_return {
($errno:expr) => {
return Err(::std::io::Error::from_raw_os_error($errno).into());
};
}
/// Format an "other" error, see `io_bail` below for details.
macro_rules! io_format_err {
($($fmt:tt)*) => {
::std::io::Error::new(::std::io::ErrorKind::Other, format!($($fmt)*))
}
}
/// We use this to bail out of a functionin an unexpected error case. This will cause the fuse
/// request to be answered with a generic `EIO` error code. The error message contained in here
/// will be printed to stdout if the verbose flag is used, otherwise silently dropped.
macro_rules! io_bail {
($($fmt:tt)*) => { return Err(io_format_err!($($fmt)*).into()); }
}
/// This is what we need to cache as a "lookup" entry. The kernel assumes that these are easily
/// accessed.
struct Lookup {
refs: AtomicUsize,
inode: u64,
parent: u64,
entry_range_info: EntryRangeInfo,
content_range: Option<Range<u64>>,
}
impl Lookup {
fn new(
inode: u64,
parent: u64,
entry_range_info: EntryRangeInfo,
content_range: Option<Range<u64>>,
) -> Box<Lookup> {
Box::new(Self {
refs: AtomicUsize::new(1),
inode,
parent,
entry_range_info,
content_range,
})
}
/// Decrease the reference count by `count`. Note that this must not include the reference held
/// by `self` itself, so this must not decrease the count below 2.
fn forget(&self, count: usize) -> Result<(), Error> {
loop {
let old = self.refs.load(Ordering::Acquire);
if count >= old {
io_bail!("reference count underflow");
}
let new = old - count;
match self
.refs
.compare_exchange(old, new, Ordering::SeqCst, Ordering::SeqCst)
{
Ok(_) => break Ok(()),
Err(_) => continue,
}
}
}
fn get_ref<'a>(&self, session: &'a SessionImpl) -> LookupRef<'a> {
if self.refs.fetch_add(1, Ordering::AcqRel) == 0 {
panic!("atomic refcount increased from 0 to 1");
}
LookupRef {
session,
lookup: self as *const Lookup,
}
}
}
struct LookupRef<'a> {
session: &'a SessionImpl,
lookup: *const Lookup,
}
unsafe impl<'a> Send for LookupRef<'a> {}
unsafe impl<'a> Sync for LookupRef<'a> {}
impl<'a> Clone for LookupRef<'a> {
fn clone(&self) -> Self {
self.get_ref(self.session)
}
}
impl<'a> std::ops::Deref for LookupRef<'a> {
type Target = Lookup;
fn deref(&self) -> &Self::Target {
unsafe { &*self.lookup }
}
}
impl<'a> Drop for LookupRef<'a> {
fn drop(&mut self) {
if self.lookup.is_null() {
return;
}
if self.refs.fetch_sub(1, Ordering::AcqRel) == 1 {
let inode = self.inode;
drop(self.session.lookups.write().unwrap().remove(&inode));
}
}
}
impl<'a> LookupRef<'a> {
fn leak(mut self) -> &'a Lookup {
unsafe { &*mem::replace(&mut self.lookup, std::ptr::null()) }
}
}
struct SessionImpl {
accessor: Accessor,
verbose: bool,
lookups: RwLock<BTreeMap<u64, Box<Lookup>>>,
}
impl SessionImpl {
fn new(accessor: Accessor, verbose: bool) -> Self {
let root = Lookup::new(
ROOT_ID,
ROOT_ID,
EntryRangeInfo::toplevel(0..accessor.size()),
None,
);
let mut tree = BTreeMap::new();
tree.insert(ROOT_ID, root);
Self {
accessor,
verbose,
lookups: RwLock::new(tree),
}
}
/// Here's how we deal with errors:
///
/// Any error will be printed if the verbose flag was set, otherwise the message will be
/// silently dropped.
///
/// Opaque errors will cause the fuse main loop to bail out with that error.
///
/// `io::Error`s will cause the fuse request to responded to with the given `io::Error`. An
/// `io::ErrorKind::Other` translates to a generic `EIO`.
async fn handle_err(
&self,
request: impl FuseRequest,
err: Error,
mut sender: UnboundedSender<Error>,
) {
let final_result = match err.downcast::<io::Error>() {
Ok(err) => {
if err.kind() == io::ErrorKind::Other && self.verbose {
eprintln!("an IO error occurred: {}", err);
}
// fail the request
request.io_fail(err).map_err(Error::from)
}
Err(err) => {
// `bail` (non-`io::Error`) is used for fatal errors which should actually cancel:
if self.verbose {
eprintln!("internal error: {}, bailing out", err);
}
Err(err)
}
};
if let Err(err) = final_result {
// either we failed to send the error code to fuse, or the above was not an
// `io::Error`, so in this case notify the main loop:
sender
.send(err)
.await
.expect("failed to propagate error to main loop");
}
}
async fn main(self, fuse: Fuse) -> Result<(), Error> {
Arc::new(self).main_do(fuse).await
}
async fn main_do(self: Arc<Self>, fuse: Fuse) -> Result<(), Error> {
let (err_send, mut err_recv) = futures::channel::mpsc::unbounded::<Error>();
let mut fuse = fuse.fuse(); // make this a futures::stream::FusedStream!
loop {
select! {
request = fuse.try_next() => match request? {
Some(request) => {
tokio::spawn(Arc::clone(&self).handle_request(request, err_send.clone()));
}
None => break,
},
err = err_recv.next() => match err {
Some(err) => if self.verbose {
eprintln!("cancelling fuse main loop due to error: {}", err);
return Err(err);
},
None => panic!("error channel was closed unexpectedly"),
},
}
}
Ok(())
}
async fn handle_request(
self: Arc<Self>,
request: Request,
mut err_sender: UnboundedSender<Error>,
) {
let result: Result<(), Error> = match request {
Request::Lookup(request) => {
match self.lookup(request.parent, &request.file_name).await {
Ok((entry, lookup)) => match request.reply(&entry) {
Ok(()) => {
lookup.leak();
Ok(())
}
Err(err) => Err(Error::from(err)),
},
Err(err) => return self.handle_err(request, err, err_sender).await,
}
}
Request::Forget(request) => match self.forget(request.inode, request.count as usize) {
Ok(()) => {
request.reply();
Ok(())
}
Err(err) => return self.handle_err(request, err, err_sender).await,
},
Request::Getattr(request) => match self.getattr(request.inode).await {
Ok(stat) => request.reply(&stat, std::f64::MAX).map_err(Error::from),
Err(err) => return self.handle_err(request, err, err_sender).await,
},
Request::ReaddirPlus(mut request) => match self.readdirplus(&mut request).await {
Ok(lookups) => match request.reply() {
Ok(()) => {
for i in lookups {
i.leak();
}
Ok(())
}
Err(err) => Err(Error::from(err)),
},
Err(err) => return self.handle_err(request, err, err_sender).await,
},
Request::Read(request) => {
match self.read(request.inode, request.size, request.offset).await {
Ok(data) => request.reply(&data).map_err(Error::from),
Err(err) => return self.handle_err(request, err, err_sender).await,
}
}
Request::Readlink(request) => match self.readlink(request.inode).await {
Ok(data) => request.reply(&data).map_err(Error::from),
Err(err) => return self.handle_err(request, err, err_sender).await,
},
Request::ListXAttrSize(request) => match self.listxattrs(request.inode).await {
Ok(data) => request
.reply(
data.into_iter()
.fold(0, |sum, i| sum + i.name().to_bytes_with_nul().len()),
)
.map_err(Error::from),
Err(err) => return self.handle_err(request, err, err_sender).await,
},
Request::ListXAttr(mut request) => match self.listxattrs_into(&mut request).await {
Ok(ReplyBufState::Ok) => request.reply().map_err(Error::from),
Ok(ReplyBufState::Full) => request.fail_full().map_err(Error::from),
Err(err) => return self.handle_err(request, err, err_sender).await,
},
Request::GetXAttrSize(request) => {
match self.getxattr(request.inode, &request.attr_name).await {
Ok(xattr) => request.reply(xattr.value().len()).map_err(Error::from),
Err(err) => return self.handle_err(request, err, err_sender).await,
}
}
Request::GetXAttr(request) => {
match self.getxattr(request.inode, &request.attr_name).await {
Ok(xattr) => request.reply(xattr.value()).map_err(Error::from),
Err(err) => return self.handle_err(request, err, err_sender).await,
}
}
other => {
if self.verbose {
eprintln!("Received unexpected fuse request");
}
other.fail(libc::ENOSYS).map_err(Error::from)
}
};
if let Err(err) = result {
err_sender
.send(err)
.await
.expect("failed to propagate error to main loop");
}
}
fn get_lookup(&self, inode: u64) -> Result<LookupRef, Error> {
let lookups = self.lookups.read().unwrap();
if let Some(lookup) = lookups.get(&inode) {
return Ok(lookup.get_ref(self));
}
io_return!(libc::ENOENT);
}
async fn open_dir(&self, inode: u64) -> Result<Directory, Error> {
if inode == ROOT_ID {
Ok(self.accessor.open_root().await?)
} else if !is_dir_inode(inode) {
io_return!(libc::ENOTDIR);
} else {
Ok(unsafe { self.accessor.open_dir_at_end(inode).await? })
}
}
async fn open_entry(&self, lookup: &LookupRef<'_>) -> io::Result<FileEntry> {
unsafe {
self.accessor
.open_file_at_range(&lookup.entry_range_info)
.await
}
}
fn open_content(&self, lookup: &LookupRef) -> Result<FileContents, Error> {
if is_dir_inode(lookup.inode) {
io_return!(libc::EISDIR);
}
match lookup.content_range.clone() {
Some(range) => Ok(unsafe { self.accessor.open_contents_at_range(range) }),
None => io_return!(libc::EBADF),
}
}
fn make_lookup(&self, parent: u64, inode: u64, entry: &FileEntry) -> Result<LookupRef, Error> {
let lookups = self.lookups.read().unwrap();
if let Some(lookup) = lookups.get(&inode) {
return Ok(lookup.get_ref(self));
}
drop(lookups);
let entry = Lookup::new(
inode,
parent,
entry.entry_range_info().clone(),
entry.content_range()?,
);
let reference = entry.get_ref(self);
entry.refs.store(1, Ordering::Release);
let mut lookups = self.lookups.write().unwrap();
if let Some(lookup) = lookups.get(&inode) {
return Ok(lookup.get_ref(self));
}
lookups.insert(inode, entry);
drop(lookups);
Ok(reference)
}
fn forget(&self, inode: u64, count: usize) -> Result<(), Error> {
let node = self.get_lookup(inode)?;
node.forget(count)?;
Ok(())
}
async fn lookup(
&'_ self,
parent: u64,
file_name: &OsStr,
) -> Result<(EntryParam, LookupRef<'_>), Error> {
let dir = self.open_dir(parent).await?;
let entry = match { dir }.lookup(file_name).await? {
Some(entry) => entry,
None => io_return!(libc::ENOENT),
};
let entry = if let pxar::EntryKind::Hardlink(_) = entry.kind() {
// we don't know the file's end-offset, so we'll just allow the decoder to decode the
// entire rest of the archive until we figure out something better...
let entry = self.accessor.follow_hardlink(&entry).await?;
if let pxar::EntryKind::Hardlink(_) = entry.kind() {
// hardlinks must not point to other hardlinks...
io_return!(libc::ELOOP);
}
entry
} else {
entry
};
let response = to_entry(&entry)?;
let inode = response.inode;
Ok((response, self.make_lookup(parent, inode, &entry)?))
}
async fn getattr(&self, inode: u64) -> Result<libc::stat, Error> {
let entry = unsafe {
self.accessor.open_file_at_range(&self.get_lookup(inode)?.entry_range_info).await?
};
to_stat(inode, &entry)
}
async fn readdirplus(
&'_ self,
request: &mut requests::ReaddirPlus,
) -> Result<Vec<LookupRef<'_>>, Error> {
let mut lookups = Vec::new();
let offset = usize::try_from(request.offset)
.map_err(|_| io_format_err!("directory offset out of range"))?;
let dir = self.open_dir(request.inode).await?;
let dir_lookup = self.get_lookup(request.inode)?;
let entry_count = dir.read_dir().count() as isize;
let mut next = offset as isize;
let mut iter = dir.read_dir().skip(offset);
while let Some(file) = iter.next().await {
next += 1;
let file = file?.decode_entry().await?;
let stat = to_stat(to_inode(&file), &file)?;
let name = file.file_name();
match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
ReplyBufState::Ok => (),
ReplyBufState::Full => return Ok(lookups),
}
lookups.push(self.make_lookup(request.inode, stat.st_ino, &file)?);
}
if next == entry_count {
next += 1;
let file = dir.lookup_self().await?;
let stat = to_stat(to_inode(&file), &file)?;
let name = OsStr::new(".");
match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
ReplyBufState::Ok => (),
ReplyBufState::Full => return Ok(lookups),
}
lookups.push(LookupRef::clone(&dir_lookup));
}
if next == entry_count + 1 {
next += 1;
let lookup = self.get_lookup(dir_lookup.parent)?;
let parent_dir = self.open_dir(lookup.inode).await?;
let file = parent_dir.lookup_self().await?;
let stat = to_stat(to_inode(&file), &file)?;
let name = OsStr::new("..");
match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
ReplyBufState::Ok => (),
ReplyBufState::Full => return Ok(lookups),
}
lookups.push(lookup);
}
Ok(lookups)
}
async fn read(&self, inode: u64, len: usize, offset: u64) -> Result<Vec<u8>, Error> {
let file = self.get_lookup(inode)?;
let content = self.open_content(&file)?;
let mut buf = vec::undefined(len);
let got = content.read_at(&mut buf, offset).await?;
buf.truncate(got);
Ok(buf)
}
async fn readlink(&self, inode: u64) -> Result<OsString, Error> {
let lookup = self.get_lookup(inode)?;
let file = self.open_entry(&lookup).await?;
match file.get_symlink() {
None => io_return!(libc::EINVAL),
Some(link) => Ok(link.to_owned()),
}
}
async fn listxattrs(&self, inode: u64) -> Result<Vec<pxar::format::XAttr>, Error> {
let lookup = self.get_lookup(inode)?;
let metadata = self
.open_entry(&lookup)
.await?
.into_entry()
.into_metadata();
let mut xattrs = metadata.xattrs;
use pxar::format::XAttr;
if let Some(fcaps) = metadata.fcaps {
xattrs.push(XAttr::new(xattr::xattr_name_fcaps().to_bytes(), fcaps.data));
}
// TODO: Special cases:
// b"system.posix_acl_access
// b"system.posix_acl_default
//
// For these we need to be able to create posix acl format entries, at that point we could
// just ditch libacl as well...
Ok(xattrs)
}
async fn listxattrs_into(
&self,
request: &mut requests::ListXAttr,
) -> Result<ReplyBufState, Error> {
let xattrs = self.listxattrs(request.inode).await?;
for entry in xattrs {
match request.add_c_string(entry.name()) {
ReplyBufState::Ok => (),
ReplyBufState::Full => return Ok(ReplyBufState::Full),
}
}
Ok(ReplyBufState::Ok)
}
async fn getxattr(&self, inode: u64, xattr: &OsStr) -> Result<pxar::format::XAttr, Error> {
// TODO: pxar::Accessor could probably get a more optimized method to fetch a specific
// xattr for an entry...
let xattrs = self.listxattrs(inode).await?;
for entry in xattrs {
if entry.name().to_bytes() == xattr.as_bytes() {
return Ok(entry);
}
}
io_return!(libc::ENODATA);
}
}
#[inline]
fn to_entry(entry: &FileEntry) -> Result<EntryParam, Error> {
to_entry_param(to_inode(&entry), &entry)
}
#[inline]
fn to_inode(entry: &FileEntry) -> u64 {
if entry.is_dir() {
entry.entry_range_info().entry_range.end
} else {
entry.entry_range_info().entry_range.start | NON_DIRECTORY_INODE
}
}
fn to_entry_param(inode: u64, entry: &pxar::Entry) -> Result<EntryParam, Error> {
Ok(EntryParam::simple(inode, to_stat(inode, entry)?))
}
fn to_stat(inode: u64, entry: &pxar::Entry) -> Result<libc::stat, Error> {
let nlink = if entry.is_dir() { 2 } else { 1 };
let metadata = entry.metadata();
let mut stat: libc::stat = unsafe { mem::zeroed() };
stat.st_ino = inode;
stat.st_nlink = nlink;
stat.st_mode = u32::try_from(metadata.stat.mode)
.map_err(|err| format_err!("mode does not fit into st_mode field: {}", err))?;
stat.st_size = i64::try_from(entry.file_size().unwrap_or(0))
.map_err(|err| format_err!("size does not fit into st_size field: {}", err))?;
stat.st_uid = metadata.stat.uid;
stat.st_gid = metadata.stat.gid;
stat.st_atime = metadata.stat.mtime.secs;
stat.st_atime_nsec = metadata.stat.mtime.nanos as _;
stat.st_mtime = metadata.stat.mtime.secs;
stat.st_mtime_nsec = metadata.stat.mtime.nanos as _;
stat.st_ctime = metadata.stat.mtime.secs;
stat.st_ctime_nsec = metadata.stat.mtime.nanos as _;
Ok(stat)
}

View File

@ -0,0 +1,407 @@
use std::ffi::{CStr, CString};
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::Path;
use anyhow::{bail, format_err, Error};
use nix::errno::Errno;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
use pxar::Metadata;
use proxmox::c_result;
use proxmox::sys::error::SysError;
use proxmox::tools::fd::RawFdNum;
use pbs_tools::{acl, fs, xattr};
use crate::pxar::tools::perms_from_metadata;
use crate::pxar::Flags;
//
// utility functions
//
fn allow_notsupp<E: SysError>(err: E) -> Result<(), E> {
if err.is_errno(Errno::EOPNOTSUPP) {
Ok(())
} else {
Err(err)
}
}
fn allow_notsupp_remember<E: SysError>(err: E, not_supp: &mut bool) -> Result<(), E> {
if err.is_errno(Errno::EOPNOTSUPP) {
*not_supp = true;
Ok(())
} else {
Err(err)
}
}
fn timestamp_to_update_timespec(mtime: &pxar::format::StatxTimestamp) -> [libc::timespec; 2] {
// restore mtime
const UTIME_OMIT: i64 = (1 << 30) - 2;
[
libc::timespec {
tv_sec: 0,
tv_nsec: UTIME_OMIT,
},
libc::timespec {
tv_sec: mtime.secs,
tv_nsec: mtime.nanos as _,
},
]
}
//
// metadata application:
//
pub fn apply_at(
flags: Flags,
metadata: &Metadata,
parent: RawFd,
file_name: &CStr,
path_info: &Path,
on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
) -> Result<(), Error> {
let fd = proxmox::tools::fd::Fd::openat(
&unsafe { RawFdNum::from_raw_fd(parent) },
file_name,
OFlag::O_PATH | OFlag::O_CLOEXEC | OFlag::O_NOFOLLOW,
Mode::empty(),
)?;
apply(flags, metadata, fd.as_raw_fd(), path_info, on_error)
}
pub fn apply_initial_flags(
flags: Flags,
metadata: &Metadata,
fd: RawFd,
on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
) -> Result<(), Error> {
let entry_flags = Flags::from_bits_truncate(metadata.stat.flags);
apply_chattr(
fd,
entry_flags.to_initial_chattr(),
flags.to_initial_chattr(),
)
.or_else(on_error)?;
Ok(())
}
pub fn apply(
flags: Flags,
metadata: &Metadata,
fd: RawFd,
path_info: &Path,
on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
) -> Result<(), Error> {
let c_proc_path = CString::new(format!("/proc/self/fd/{}", fd)).unwrap();
unsafe {
// UID and GID first, as this fails if we lose access anyway.
c_result!(libc::chown(
c_proc_path.as_ptr(),
metadata.stat.uid,
metadata.stat.gid
))
.map(drop)
.or_else(allow_notsupp)
.map_err(|err| format_err!("failed to set ownership: {}", err))
.or_else(&mut *on_error)?;
}
let mut skip_xattrs = false;
apply_xattrs(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)
.or_else(&mut *on_error)?;
add_fcaps(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs).or_else(&mut *on_error)?;
apply_acls(flags, &c_proc_path, metadata, path_info)
.map_err(|err| format_err!("failed to apply acls: {}", err))
.or_else(&mut *on_error)?;
apply_quota_project_id(flags, fd, metadata).or_else(&mut *on_error)?;
// Finally mode and time. We may lose access with mode, but the changing the mode also
// affects times.
if !metadata.is_symlink() {
c_result!(unsafe {
libc::chmod(c_proc_path.as_ptr(), perms_from_metadata(metadata)?.bits())
})
.map(drop)
.or_else(allow_notsupp)
.map_err(|err| format_err!("failed to change file mode: {}", err))
.or_else(&mut *on_error)?;
}
let res = c_result!(unsafe {
libc::utimensat(
libc::AT_FDCWD,
c_proc_path.as_ptr(),
timestamp_to_update_timespec(&metadata.stat.mtime).as_ptr(),
0,
)
});
match res {
Ok(_) => (),
Err(ref err) if err.is_errno(Errno::EOPNOTSUPP) => (),
Err(err) => {
on_error(format_err!(
"failed to restore mtime attribute on {:?}: {}",
path_info,
err
))?;
}
}
if metadata.stat.flags != 0 {
apply_flags(flags, fd, metadata.stat.flags).or_else(&mut *on_error)?;
}
Ok(())
}
fn add_fcaps(
flags: Flags,
c_proc_path: *const libc::c_char,
metadata: &Metadata,
skip_xattrs: &mut bool,
) -> Result<(), Error> {
if *skip_xattrs || !flags.contains(Flags::WITH_FCAPS) {
return Ok(());
}
let fcaps = match metadata.fcaps.as_ref() {
Some(fcaps) => fcaps,
None => return Ok(()),
};
c_result!(unsafe {
libc::setxattr(
c_proc_path,
xattr::xattr_name_fcaps().as_ptr(),
fcaps.data.as_ptr() as *const libc::c_void,
fcaps.data.len(),
0,
)
})
.map(drop)
.or_else(|err| allow_notsupp_remember(err, skip_xattrs))
.map_err(|err| format_err!("failed to apply file capabilities: {}", err))?;
Ok(())
}
fn apply_xattrs(
flags: Flags,
c_proc_path: *const libc::c_char,
metadata: &Metadata,
skip_xattrs: &mut bool,
) -> Result<(), Error> {
if *skip_xattrs || !flags.contains(Flags::WITH_XATTRS) {
return Ok(());
}
for xattr in &metadata.xattrs {
if *skip_xattrs {
return Ok(());
}
if !xattr::is_valid_xattr_name(xattr.name()) {
eprintln!("skipping invalid xattr named {:?}", xattr.name());
continue;
}
c_result!(unsafe {
libc::setxattr(
c_proc_path,
xattr.name().as_ptr() as *const libc::c_char,
xattr.value().as_ptr() as *const libc::c_void,
xattr.value().len(),
0,
)
})
.map(drop)
.or_else(|err| allow_notsupp_remember(err, &mut *skip_xattrs))
.map_err(|err| format_err!("failed to apply extended attributes: {}", err))?;
}
Ok(())
}
fn apply_acls(
flags: Flags,
c_proc_path: &CStr,
metadata: &Metadata,
path_info: &Path,
) -> Result<(), Error> {
if !flags.contains(Flags::WITH_ACL) || metadata.acl.is_empty() {
return Ok(());
}
let mut acl = acl::ACL::init(5)?;
// acl type access:
acl.add_entry_full(
acl::ACL_USER_OBJ,
None,
acl::mode_user_to_acl_permissions(metadata.stat.mode),
)?;
acl.add_entry_full(
acl::ACL_OTHER,
None,
acl::mode_other_to_acl_permissions(metadata.stat.mode),
)?;
match metadata.acl.group_obj.as_ref() {
Some(group_obj) => {
acl.add_entry_full(
acl::ACL_MASK,
None,
acl::mode_group_to_acl_permissions(metadata.stat.mode),
)?;
acl.add_entry_full(acl::ACL_GROUP_OBJ, None, group_obj.permissions.0)?;
}
None => {
let mode = acl::mode_group_to_acl_permissions(metadata.stat.mode);
acl.add_entry_full(acl::ACL_GROUP_OBJ, None, mode)?;
if !metadata.acl.users.is_empty() || !metadata.acl.groups.is_empty() {
eprintln!(
"Warning: {:?}: Missing GROUP_OBJ entry in ACL, resetting to value of MASK",
path_info,
);
acl.add_entry_full(acl::ACL_MASK, None, mode)?;
}
}
}
for user in &metadata.acl.users {
acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
}
for group in &metadata.acl.groups {
acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
}
if !acl.is_valid() {
bail!("Error while restoring ACL - ACL invalid");
}
acl.set_file(c_proc_path, acl::ACL_TYPE_ACCESS)?;
drop(acl);
// acl type default:
if let Some(default) = metadata.acl.default.as_ref() {
let mut acl = acl::ACL::init(5)?;
acl.add_entry_full(acl::ACL_USER_OBJ, None, default.user_obj_permissions.0)?;
acl.add_entry_full(acl::ACL_GROUP_OBJ, None, default.group_obj_permissions.0)?;
acl.add_entry_full(acl::ACL_OTHER, None, default.other_permissions.0)?;
if default.mask_permissions != pxar::format::acl::Permissions::NO_MASK {
acl.add_entry_full(acl::ACL_MASK, None, default.mask_permissions.0)?;
}
for user in &metadata.acl.default_users {
acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
}
for group in &metadata.acl.default_groups {
acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
}
if !acl.is_valid() {
bail!("Error while restoring ACL - ACL invalid");
}
acl.set_file(c_proc_path, acl::ACL_TYPE_DEFAULT)?;
}
Ok(())
}
fn apply_quota_project_id(flags: Flags, fd: RawFd, metadata: &Metadata) -> Result<(), Error> {
if !flags.contains(Flags::WITH_QUOTA_PROJID) {
return Ok(());
}
let projid = match metadata.quota_project_id {
Some(projid) => projid,
None => return Ok(()),
};
let mut fsxattr = fs::FSXAttr::default();
unsafe {
fs::fs_ioc_fsgetxattr(fd, &mut fsxattr).map_err(|err| {
format_err!(
"error while getting fsxattr to restore quota project id - {}",
err
)
})?;
fsxattr.fsx_projid = projid.projid as u32;
fs::fs_ioc_fssetxattr(fd, &fsxattr).map_err(|err| {
format_err!(
"error while setting fsxattr to restore quota project id - {}",
err
)
})?;
}
Ok(())
}
pub(crate) fn errno_is_unsupported(errno: Errno) -> bool {
matches!(errno, Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL)
}
fn apply_chattr(fd: RawFd, chattr: libc::c_long, mask: libc::c_long) -> Result<(), Error> {
if chattr == 0 {
return Ok(());
}
let mut fattr: libc::c_long = 0;
match unsafe { fs::read_attr_fd(fd, &mut fattr) } {
Ok(_) => (),
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
return Ok(());
}
Err(err) => bail!("failed to read file attributes: {}", err),
}
let attr = (chattr & mask) | (fattr & !mask);
if attr == fattr {
return Ok(());
}
match unsafe { fs::write_attr_fd(fd, &attr) } {
Ok(_) => Ok(()),
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => Ok(()),
Err(err) => bail!("failed to set file attributes: {}", err),
}
}
fn apply_flags(flags: Flags, fd: RawFd, entry_flags: u64) -> Result<(), Error> {
let entry_flags = Flags::from_bits_truncate(entry_flags);
apply_chattr(fd, entry_flags.to_chattr(), flags.to_chattr())?;
let fatattr = (flags & entry_flags).to_fat_attr();
if fatattr != 0 {
match unsafe { fs::write_fat_attr_fd(fd, &fatattr) } {
Ok(_) => (),
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => (),
Err(err) => bail!("failed to set file FAT attributes: {}", err),
}
}
Ok(())
}

View File

@ -0,0 +1,71 @@
//! *pxar* Implementation (proxmox file archive format)
//!
//! This code implements a slightly modified version of the *catar*
//! format used in the [casync](https://github.com/systemd/casync)
//! toolkit (we are not 100\% binary compatible). It is a file archive
//! format defined by 'Lennart Poettering', specially defined for
//! efficient deduplication.
//! Every archive contains items in the following order:
//! * `ENTRY` -- containing general stat() data and related bits
//! * `USER` -- user name as text, if enabled
//! * `GROUP` -- group name as text, if enabled
//! * `XATTR` -- one extended attribute
//! * ... -- more of these when there are multiple defined
//! * `ACL_USER` -- one `USER ACL` entry
//! * ... -- more of these when there are multiple defined
//! * `ACL_GROUP` -- one `GROUP ACL` entry
//! * ... -- more of these when there are multiple defined
//! * `ACL_GROUP_OBJ` -- The `ACL_GROUP_OBJ`
//! * `ACL_DEFAULT` -- The various default ACL fields if there's one defined
//! * `ACL_DEFAULT_USER` -- one USER ACL entry
//! * ... -- more of these when multiple are defined
//! * `ACL_DEFAULT_GROUP` -- one GROUP ACL entry
//! * ... -- more of these when multiple are defined
//! * `FCAPS` -- file capability in Linux disk format
//! * `QUOTA_PROJECT_ID` -- the ext4/xfs quota project ID
//! * `PAYLOAD` -- file contents, if it is one
//! * `SYMLINK` -- symlink target, if it is one
//! * `DEVICE` -- device major/minor, if it is a block/char device
//!
//! If we are serializing a directory, then this is followed by:
//!
//! * `FILENAME` -- name of the first directory entry (strictly ordered!)
//! * `<archive>` -- serialization of the first directory entry's metadata and contents,
//! following the exact same archive format
//! * `FILENAME` -- name of the second directory entry (strictly ordered!)
//! * `<archive>` -- serialization of the second directory entry
//! * ...
//! * `GOODBYE` -- lookup table at the end of a list of directory entries
//!
//! The original format has no way to deal with hardlinks, so we
//! extended the format by a special `HARDLINK` tag, which can replace
//! an `ENTRY` tag. The `HARDLINK` tag contains an 64bit offset which
//! points to the linked `ENTRY` inside the archive, followed by the
//! full path name of that `ENTRY`. `HARDLINK`s may not have further data
//! (user, group, acl, ...) because this is already defined by the
//! linked `ENTRY`.
pub(crate) mod create;
pub(crate) mod dir_stack;
pub(crate) mod extract;
pub mod fuse;
pub(crate) mod metadata;
pub(crate) mod tools;
mod flags;
pub use flags::Flags;
pub use create::{create_archive, PxarCreateOptions};
pub use extract::{
create_zip, extract_archive, extract_sub_dir, extract_sub_dir_seq, ErrorHandler,
PxarExtractOptions,
};
/// The format requires to build sorted directory lookup tables in
/// memory, so we restrict the number of allowed entries to limit
/// maximum memory usage.
pub const ENCODER_MAX_ENTRIES: usize = 1024 * 1024;
pub use tools::{format_multi_line_entry, format_single_line_entry};

View File

@ -0,0 +1,202 @@
//! Some common methods used within the pxar code.
use std::convert::TryFrom;
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
use anyhow::{bail, format_err, Error};
use nix::sys::stat::Mode;
use pxar::{mode, Entry, EntryKind, Metadata, format::StatxTimestamp};
/// Get the file permissions as `nix::Mode`
pub fn perms_from_metadata(meta: &Metadata) -> Result<Mode, Error> {
let mode = meta.stat.get_permission_bits();
u32::try_from(mode)
.map_err(drop)
.and_then(|mode| Mode::from_bits(mode).ok_or(()))
.map_err(|_| format_err!("mode contains illegal bits: 0x{:x} (0o{:o})", mode, mode))
}
/// Make sure path is relative and not '.' or '..'.
pub fn assert_relative_path<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
assert_relative_path_do(Path::new(path))
}
/// Make sure path is a single component and not '.' or '..'.
pub fn assert_single_path_component<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
assert_single_path_component_do(Path::new(path))
}
fn assert_relative_path_do(path: &Path) -> Result<(), Error> {
if !path.is_relative() {
bail!("bad absolute file name in archive: {:?}", path);
}
Ok(())
}
fn assert_single_path_component_do(path: &Path) -> Result<(), Error> {
assert_relative_path_do(path)?;
let mut components = path.components();
match components.next() {
Some(std::path::Component::Normal(_)) => (),
_ => bail!("invalid path component in archive: {:?}", path),
}
if components.next().is_some() {
bail!(
"invalid path with multiple components in archive: {:?}",
path
);
}
Ok(())
}
#[rustfmt::skip]
fn symbolic_mode(c: u64, special: bool, special_x: u8, special_no_x: u8) -> [u8; 3] {
[
if 0 != c & 4 { b'r' } else { b'-' },
if 0 != c & 2 { b'w' } else { b'-' },
match (c & 1, special) {
(0, false) => b'-',
(0, true) => special_no_x,
(_, false) => b'x',
(_, true) => special_x,
}
]
}
fn mode_string(entry: &Entry) -> String {
// https://www.gnu.org/software/coreutils/manual/html_node/What-information-is-listed.html#What-information-is-listed
// additionally we use:
// file type capital 'L' hard links
// a second '+' after the mode to show non-acl xattr presence
//
// Trwxrwxrwx++ uid/gid size mtime filename [-> destination]
let meta = entry.metadata();
let mode = meta.stat.mode;
let type_char = if entry.is_hardlink() {
'L'
} else {
match mode & mode::IFMT {
mode::IFREG => '-',
mode::IFBLK => 'b',
mode::IFCHR => 'c',
mode::IFDIR => 'd',
mode::IFLNK => 'l',
mode::IFIFO => 'p',
mode::IFSOCK => 's',
_ => '?',
}
};
let fmt_u = symbolic_mode((mode >> 6) & 7, 0 != mode & mode::ISUID, b's', b'S');
let fmt_g = symbolic_mode((mode >> 3) & 7, 0 != mode & mode::ISGID, b's', b'S');
let fmt_o = symbolic_mode((mode >> 3) & 7, 0 != mode & mode::ISVTX, b't', b'T');
let has_acls = if meta.acl.is_empty() { ' ' } else { '+' };
let has_xattrs = if meta.xattrs.is_empty() { ' ' } else { '+' };
format!(
"{}{}{}{}{}{}",
type_char,
unsafe { std::str::from_utf8_unchecked(&fmt_u) },
unsafe { std::str::from_utf8_unchecked(&fmt_g) },
unsafe { std::str::from_utf8_unchecked(&fmt_o) },
has_acls,
has_xattrs,
)
}
fn format_mtime(mtime: &StatxTimestamp) -> String {
if let Ok(s) = proxmox::tools::time::strftime_local("%Y-%m-%d %H:%M:%S", mtime.secs) {
return s;
}
format!("{}.{}", mtime.secs, mtime.nanos)
}
pub fn format_single_line_entry(entry: &Entry) -> String {
let mode_string = mode_string(entry);
let meta = entry.metadata();
let (size, link) = match entry.kind() {
EntryKind::File { size, .. } => (format!("{}", *size), String::new()),
EntryKind::Symlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
EntryKind::Hardlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
EntryKind::Device(dev) => (format!("{},{}", dev.major, dev.minor), String::new()),
_ => ("0".to_string(), String::new()),
};
format!(
"{} {:<13} {} {:>8} {:?}{}",
mode_string,
format!("{}/{}", meta.stat.uid, meta.stat.gid),
format_mtime(&meta.stat.mtime),
size,
entry.path(),
link,
)
}
pub fn format_multi_line_entry(entry: &Entry) -> String {
let mode_string = mode_string(entry);
let meta = entry.metadata();
let (size, link, type_name) = match entry.kind() {
EntryKind::File { size, .. } => (format!("{}", *size), String::new(), "file"),
EntryKind::Symlink(link) => (
"0".to_string(),
format!(" -> {:?}", link.as_os_str()),
"symlink",
),
EntryKind::Hardlink(link) => (
"0".to_string(),
format!(" -> {:?}", link.as_os_str()),
"symlink",
),
EntryKind::Device(dev) => (
format!("{},{}", dev.major, dev.minor),
String::new(),
if meta.stat.is_chardev() {
"characters pecial file"
} else if meta.stat.is_blockdev() {
"block special file"
} else {
"device"
},
),
EntryKind::Socket => ("0".to_string(), String::new(), "socket"),
EntryKind::Fifo => ("0".to_string(), String::new(), "fifo"),
EntryKind::Directory => ("0".to_string(), String::new(), "directory"),
EntryKind::GoodbyeTable => ("0".to_string(), String::new(), "bad entry"),
};
let file_name = match std::str::from_utf8(entry.path().as_os_str().as_bytes()) {
Ok(name) => std::borrow::Cow::Borrowed(name),
Err(_) => std::borrow::Cow::Owned(format!("{:?}", entry.path())),
};
format!(
" File: {}{}\n \
Size: {:<13} Type: {}\n\
Access: ({:o}/{}) Uid: {:<5} Gid: {:<5}\n\
Modify: {}\n",
file_name,
link,
size,
type_name,
meta.file_mode(),
mode_string,
meta.stat.uid,
meta.stat.gid,
format_mtime(&meta.stat.mtime),
)
}

View File

@ -0,0 +1,125 @@
use std::io::Write;
//use std::os::unix::io::FromRawFd;
use std::path::Path;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{Context, Poll};
use anyhow::{format_err, Error};
use futures::stream::Stream;
use futures::future::{Abortable, AbortHandle};
use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
use pbs_datastore::catalog::CatalogWriter;
use pbs_tools::sync::StdChannelWriter;
use pbs_tools::tokio::TokioWriterAdapter;
/// Stream implementation to encode and upload .pxar archives.
///
/// The hyper client needs an async Stream for file upload, so we
/// spawn an extra thread to encode the .pxar data and pipe it to the
/// consumer.
pub struct PxarBackupStream {
rx: Option<std::sync::mpsc::Receiver<Result<Vec<u8>, Error>>>,
handle: Option<AbortHandle>,
error: Arc<Mutex<Option<String>>>,
}
impl Drop for PxarBackupStream {
fn drop(&mut self) {
self.rx = None;
self.handle.take().unwrap().abort();
}
}
impl PxarBackupStream {
pub fn new<W: Write + Send + 'static>(
dir: Dir,
catalog: Arc<Mutex<CatalogWriter<W>>>,
options: crate::pxar::PxarCreateOptions,
) -> Result<Self, Error> {
let (tx, rx) = std::sync::mpsc::sync_channel(10);
let buffer_size = 256 * 1024;
let error = Arc::new(Mutex::new(None));
let error2 = Arc::clone(&error);
let handler = async move {
let writer = TokioWriterAdapter::new(std::io::BufWriter::with_capacity(
buffer_size,
StdChannelWriter::new(tx),
));
let verbose = options.verbose;
let writer = pxar::encoder::sync::StandardWriter::new(writer);
if let Err(err) = crate::pxar::create_archive(
dir,
writer,
crate::pxar::Flags::DEFAULT,
move |path| {
if verbose {
println!("{:?}", path);
}
Ok(())
},
Some(catalog),
options,
).await {
let mut error = error2.lock().unwrap();
*error = Some(err.to_string());
}
};
let (handle, registration) = AbortHandle::new_pair();
let future = Abortable::new(handler, registration);
tokio::spawn(future);
Ok(Self {
rx: Some(rx),
handle: Some(handle),
error,
})
}
pub fn open<W: Write + Send + 'static>(
dirname: &Path,
catalog: Arc<Mutex<CatalogWriter<W>>>,
options: crate::pxar::PxarCreateOptions,
) -> Result<Self, Error> {
let dir = nix::dir::Dir::open(dirname, OFlag::O_DIRECTORY, Mode::empty())?;
Self::new(
dir,
catalog,
options,
)
}
}
impl Stream for PxarBackupStream {
type Item = Result<Vec<u8>, Error>;
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
{
// limit lock scope
let error = self.error.lock().unwrap();
if let Some(ref msg) = *error {
return Poll::Ready(Some(Err(format_err!("{}", msg))));
}
}
match pbs_runtime::block_in_place(|| self.rx.as_ref().unwrap().recv()) {
Ok(data) => Poll::Ready(Some(data)),
Err(_) => {
let error = self.error.lock().unwrap();
if let Some(ref msg) = *error {
return Poll::Ready(Some(Err(format_err!("{}", msg))));
}
Poll::Ready(None) // channel closed, no error
}
}
}
}

View File

@ -0,0 +1,125 @@
use std::future::Future;
use std::collections::HashMap;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use anyhow::{bail, Error};
use pbs_datastore::{CryptConfig, CryptMode};
use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::read_chunk::ReadChunk;
use pbs_datastore::read_chunk::AsyncReadChunk;
use pbs_runtime::block_on;
use super::BackupReader;
/// Read chunks from remote host using ``BackupReader``
#[derive(Clone)]
pub struct RemoteChunkReader {
client: Arc<BackupReader>,
crypt_config: Option<Arc<CryptConfig>>,
crypt_mode: CryptMode,
cache_hint: Arc<HashMap<[u8; 32], usize>>,
cache: Arc<Mutex<HashMap<[u8; 32], Vec<u8>>>>,
}
impl RemoteChunkReader {
/// Create a new instance.
///
/// Chunks listed in ``cache_hint`` are cached and kept in RAM.
pub fn new(
client: Arc<BackupReader>,
crypt_config: Option<Arc<CryptConfig>>,
crypt_mode: CryptMode,
cache_hint: HashMap<[u8; 32], usize>,
) -> Self {
Self {
client,
crypt_config,
crypt_mode,
cache_hint: Arc::new(cache_hint),
cache: Arc::new(Mutex::new(HashMap::new())),
}
}
/// Downloads raw chunk. This only verifies the (untrusted) CRC32, use
/// DataBlob::verify_unencrypted or DataBlob::decode before storing/processing further.
pub async fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
self.client
.download_chunk(&digest, &mut chunk_data)
.await?;
let chunk = DataBlob::load_from_reader(&mut &chunk_data[..])?;
match self.crypt_mode {
CryptMode::Encrypt => {
match chunk.crypt_mode()? {
CryptMode::Encrypt => Ok(chunk),
CryptMode::SignOnly | CryptMode::None => bail!("Index and chunk CryptMode don't match."),
}
},
CryptMode::SignOnly | CryptMode::None => {
match chunk.crypt_mode()? {
CryptMode::Encrypt => bail!("Index and chunk CryptMode don't match."),
CryptMode::SignOnly | CryptMode::None => Ok(chunk),
}
},
}
}
}
impl ReadChunk for RemoteChunkReader {
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
block_on(Self::read_raw_chunk(self, digest))
}
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
return Ok(raw_data.to_vec());
}
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
let use_cache = self.cache_hint.contains_key(digest);
if use_cache {
(*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
}
Ok(raw_data)
}
}
impl AsyncReadChunk for RemoteChunkReader {
fn read_raw_chunk<'a>(
&'a self,
digest: &'a [u8; 32],
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
Box::pin(Self::read_raw_chunk(self, digest))
}
fn read_chunk<'a>(
&'a self,
digest: &'a [u8; 32],
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
Box::pin(async move {
if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
return Ok(raw_data.to_vec());
}
let chunk = Self::read_raw_chunk(self, digest).await?;
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
let use_cache = self.cache_hint.contains_key(digest);
if use_cache {
(*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
}
Ok(raw_data)
})
}
}

117
pbs-client/src/task_log.rs Normal file
View File

@ -0,0 +1,117 @@
use std::sync::{Arc, atomic::{AtomicUsize, Ordering}};
use anyhow::{bail, Error};
use serde_json::{json, Value};
use tokio::signal::unix::{signal, SignalKind};
use futures::*;
use proxmox::api::cli::format_and_print_result;
use pbs_tools::percent_encoding::percent_encode_component;
use super::HttpClient;
/// Display task log on console
///
/// This polls the task API and prints the log to the console. It also
/// catches interrupt signals, and sends a abort request to the task if
/// the user presses CTRL-C. Two interrupts cause an immediate end of
/// the loop. The task may still run in that case.
pub async fn display_task_log(
client: &mut HttpClient,
upid_str: &str,
strip_date: bool,
) -> Result<(), Error> {
let mut signal_stream = signal(SignalKind::interrupt())?;
let abort_count = Arc::new(AtomicUsize::new(0));
let abort_count2 = Arc::clone(&abort_count);
let abort_future = async move {
while signal_stream.recv().await.is_some() {
println!("got shutdown request (SIGINT)");
let prev_count = abort_count2.fetch_add(1, Ordering::SeqCst);
if prev_count >= 1 {
println!("forced exit (task still running)");
break;
}
}
Ok::<_, Error>(())
};
let request_future = async move {
let mut start = 1;
let limit = 500;
loop {
let abort = abort_count.load(Ordering::Relaxed);
if abort > 0 {
let path = format!("api2/json/nodes/localhost/tasks/{}", percent_encode_component(upid_str));
let _ = client.delete(&path, None).await?;
}
let param = json!({ "start": start, "limit": limit, "test-status": true });
let path = format!("api2/json/nodes/localhost/tasks/{}/log", percent_encode_component(upid_str));
let result = client.get(&path, Some(param)).await?;
let active = result["active"].as_bool().unwrap();
let total = result["total"].as_u64().unwrap();
let data = result["data"].as_array().unwrap();
let lines = data.len();
for item in data {
let n = item["n"].as_u64().unwrap();
let t = item["t"].as_str().unwrap();
if n != start { bail!("got wrong line number in response data ({} != {}", n, start); }
if strip_date && t.len() > 27 && &t[25..27] == ": " {
let line = &t[27..];
println!("{}", line);
} else {
println!("{}", t);
}
start += 1;
}
if start > total {
if active {
tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await;
} else {
break;
}
} else if lines != limit {
bail!("got wrong number of lines from server ({} != {})", lines, limit);
}
}
Ok(())
};
futures::select!{
request = request_future.fuse() => request?,
abort = abort_future.fuse() => abort?,
};
Ok(())
}
/// Display task result (upid), or view task log - depending on output format
pub async fn view_task_result(
client: &mut HttpClient,
result: Value,
output_format: &str,
) -> Result<(), Error> {
let data = &result["data"];
if output_format == "text" {
if let Some(upid) = data.as_str() {
display_task_log(client, upid, true).await?;
}
} else {
format_and_print_result(&data, &output_format);
}
Ok(())
}

View File

@ -0,0 +1,585 @@
use std::convert::TryFrom;
use std::path::PathBuf;
use std::os::unix::io::{FromRawFd, RawFd};
use std::io::Read;
use anyhow::{bail, format_err, Error};
use serde_json::Value;
use proxmox::api::schema::*;
use proxmox::sys::linux::tty;
use proxmox::tools::fs::file_get_contents;
use pbs_api_types::CryptMode;
pub const DEFAULT_ENCRYPTION_KEY_FILE_NAME: &str = "encryption-key.json";
pub const DEFAULT_MASTER_PUBKEY_FILE_NAME: &str = "master-public.pem";
pub const KEYFILE_SCHEMA: Schema =
StringSchema::new("Path to encryption key. All data will be encrypted using this key.")
.schema();
pub const KEYFD_SCHEMA: Schema =
IntegerSchema::new("Pass an encryption key via an already opened file descriptor.")
.minimum(0)
.schema();
pub const MASTER_PUBKEY_FILE_SCHEMA: Schema = StringSchema::new(
"Path to master public key. The encryption key used for a backup will be encrypted using this key and appended to the backup.")
.schema();
pub const MASTER_PUBKEY_FD_SCHEMA: Schema =
IntegerSchema::new("Pass a master public key via an already opened file descriptor.")
.minimum(0)
.schema();
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum KeySource {
DefaultKey,
Fd,
Path(String),
}
pub fn format_key_source(source: &KeySource, key_type: &str) -> String {
match source {
KeySource::DefaultKey => format!("Using default {} key..", key_type),
KeySource::Fd => format!("Using {} key from file descriptor..", key_type),
KeySource::Path(path) => format!("Using {} key from '{}'..", key_type, path),
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct KeyWithSource {
pub source: KeySource,
pub key: Vec<u8>,
}
impl KeyWithSource {
pub fn from_fd(key: Vec<u8>) -> Self {
Self {
source: KeySource::Fd,
key,
}
}
pub fn from_default(key: Vec<u8>) -> Self {
Self {
source: KeySource::DefaultKey,
key,
}
}
pub fn from_path(path: String, key: Vec<u8>) -> Self {
Self {
source: KeySource::Path(path),
key,
}
}
}
#[derive(Debug, Eq, PartialEq)]
pub struct CryptoParams {
pub mode: CryptMode,
pub enc_key: Option<KeyWithSource>,
// FIXME switch to openssl::rsa::rsa<openssl::pkey::Public> once that is Eq?
pub master_pubkey: Option<KeyWithSource>,
}
pub fn crypto_parameters(param: &Value) -> Result<CryptoParams, Error> {
do_crypto_parameters(param, false)
}
pub fn crypto_parameters_keep_fd(param: &Value) -> Result<CryptoParams, Error> {
do_crypto_parameters(param, true)
}
fn do_crypto_parameters(param: &Value, keep_keyfd_open: bool) -> Result<CryptoParams, Error> {
let keyfile = match param.get("keyfile") {
Some(Value::String(keyfile)) => Some(keyfile),
Some(_) => bail!("bad --keyfile parameter type"),
None => None,
};
let key_fd = match param.get("keyfd") {
Some(Value::Number(key_fd)) => Some(
RawFd::try_from(key_fd
.as_i64()
.ok_or_else(|| format_err!("bad key fd: {:?}", key_fd))?
)
.map_err(|err| format_err!("bad key fd: {:?}: {}", key_fd, err))?
),
Some(_) => bail!("bad --keyfd parameter type"),
None => None,
};
let master_pubkey_file = match param.get("master-pubkey-file") {
Some(Value::String(keyfile)) => Some(keyfile),
Some(_) => bail!("bad --master-pubkey-file parameter type"),
None => None,
};
let master_pubkey_fd = match param.get("master-pubkey-fd") {
Some(Value::Number(key_fd)) => Some(
RawFd::try_from(key_fd
.as_i64()
.ok_or_else(|| format_err!("bad master public key fd: {:?}", key_fd))?
)
.map_err(|err| format_err!("bad public master key fd: {:?}: {}", key_fd, err))?
),
Some(_) => bail!("bad --master-pubkey-fd parameter type"),
None => None,
};
let mode: Option<CryptMode> = match param.get("crypt-mode") {
Some(mode) => Some(serde_json::from_value(mode.clone())?),
None => None,
};
let key = match (keyfile, key_fd) {
(None, None) => None,
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
(Some(keyfile), None) => Some(KeyWithSource::from_path(
keyfile.clone(),
file_get_contents(keyfile)?,
)),
(None, Some(fd)) => {
let mut input = unsafe { std::fs::File::from_raw_fd(fd) };
let mut data = Vec::new();
let _len: usize = input.read_to_end(&mut data).map_err(|err| {
format_err!("error reading encryption key from fd {}: {}", fd, err)
})?;
if keep_keyfd_open {
// don't close fd if requested, and try to reset seek position
std::mem::forget(input);
unsafe { libc::lseek(fd, 0, libc::SEEK_SET); }
}
Some(KeyWithSource::from_fd(data))
}
};
let master_pubkey = match (master_pubkey_file, master_pubkey_fd) {
(None, None) => None,
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
(Some(keyfile), None) => Some(KeyWithSource::from_path(
keyfile.clone(),
file_get_contents(keyfile)?,
)),
(None, Some(fd)) => {
let input = unsafe { std::fs::File::from_raw_fd(fd) };
let mut data = Vec::new();
let _len: usize = { input }
.read_to_end(&mut data)
.map_err(|err| format_err!("error reading master key from fd {}: {}", fd, err))?;
Some(KeyWithSource::from_fd(data))
}
};
let res = match mode {
// no crypt mode, enable encryption if keys are available
None => match (key, master_pubkey) {
// only default keys if available
(None, None) => match read_optional_default_encryption_key()? {
None => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
enc_key => {
let master_pubkey = read_optional_default_master_pubkey()?;
CryptoParams {
mode: CryptMode::Encrypt,
enc_key,
master_pubkey,
}
},
},
// explicit master key, default enc key needed
(None, master_pubkey) => match read_optional_default_encryption_key()? {
None => bail!("--master-pubkey-file/--master-pubkey-fd specified, but no key available"),
enc_key => {
CryptoParams {
mode: CryptMode::Encrypt,
enc_key,
master_pubkey,
}
},
},
// explicit keyfile, maybe default master key
(enc_key, None) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey: read_optional_default_master_pubkey()? },
// explicit keyfile and master key
(enc_key, master_pubkey) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey },
},
// explicitly disabled encryption
Some(CryptMode::None) => match (key, master_pubkey) {
// no keys => OK, no encryption
(None, None) => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
// --keyfile and --crypt-mode=none
(Some(_), _) => bail!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive"),
// --master-pubkey-file and --crypt-mode=none
(_, Some(_)) => bail!("--master-pubkey-file/--master-pubkey-fd and --crypt-mode=none are mutually exclusive"),
},
// explicitly enabled encryption
Some(mode) => match (key, master_pubkey) {
// no key, maybe master key
(None, master_pubkey) => match read_optional_default_encryption_key()? {
None => bail!("--crypt-mode without --keyfile and no default key file available"),
enc_key => {
eprintln!("Encrypting with default encryption key!");
let master_pubkey = match master_pubkey {
None => read_optional_default_master_pubkey()?,
master_pubkey => master_pubkey,
};
CryptoParams {
mode,
enc_key,
master_pubkey,
}
},
},
// --keyfile and --crypt-mode other than none
(enc_key, master_pubkey) => {
let master_pubkey = match master_pubkey {
None => read_optional_default_master_pubkey()?,
master_pubkey => master_pubkey,
};
CryptoParams { mode, enc_key, master_pubkey }
},
},
};
Ok(res)
}
pub fn find_default_master_pubkey() -> Result<Option<PathBuf>, Error> {
super::find_xdg_file(
DEFAULT_MASTER_PUBKEY_FILE_NAME,
"default master public key file",
)
}
pub fn place_default_master_pubkey() -> Result<PathBuf, Error> {
super::place_xdg_file(
DEFAULT_MASTER_PUBKEY_FILE_NAME,
"default master public key file",
)
}
pub fn find_default_encryption_key() -> Result<Option<PathBuf>, Error> {
super::find_xdg_file(
DEFAULT_ENCRYPTION_KEY_FILE_NAME,
"default encryption key file",
)
}
pub fn place_default_encryption_key() -> Result<PathBuf, Error> {
super::place_xdg_file(
DEFAULT_ENCRYPTION_KEY_FILE_NAME,
"default encryption key file",
)
}
#[cfg(not(test))]
pub(crate) fn read_optional_default_encryption_key() -> Result<Option<KeyWithSource>, Error> {
find_default_encryption_key()?
.map(|path| file_get_contents(path).map(KeyWithSource::from_default))
.transpose()
}
#[cfg(not(test))]
pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSource>, Error> {
find_default_master_pubkey()?
.map(|path| file_get_contents(path).map(KeyWithSource::from_default))
.transpose()
}
#[cfg(test)]
static mut TEST_DEFAULT_ENCRYPTION_KEY: Result<Option<Vec<u8>>, Error> = Ok(None);
#[cfg(test)]
pub(crate) fn read_optional_default_encryption_key() -> Result<Option<KeyWithSource>, Error> {
// not safe when multiple concurrent test cases end up here!
unsafe {
match &TEST_DEFAULT_ENCRYPTION_KEY {
Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
Ok(None) => Ok(None),
Err(_) => bail!("test error"),
}
}
}
#[cfg(test)]
// not safe when multiple concurrent test cases end up here!
pub(crate) unsafe fn set_test_encryption_key(value: Result<Option<Vec<u8>>, Error>) {
TEST_DEFAULT_ENCRYPTION_KEY = value;
}
#[cfg(test)]
static mut TEST_DEFAULT_MASTER_PUBKEY: Result<Option<Vec<u8>>, Error> = Ok(None);
#[cfg(test)]
pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSource>, Error> {
// not safe when multiple concurrent test cases end up here!
unsafe {
match &TEST_DEFAULT_MASTER_PUBKEY {
Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
Ok(None) => Ok(None),
Err(_) => bail!("test error"),
}
}
}
#[cfg(test)]
// not safe when multiple concurrent test cases end up here!
pub(crate) unsafe fn set_test_default_master_pubkey(value: Result<Option<Vec<u8>>, Error>) {
TEST_DEFAULT_MASTER_PUBKEY = value;
}
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
// fixme: implement other input methods
use std::env::VarError::*;
match std::env::var("PBS_ENCRYPTION_PASSWORD") {
Ok(p) => return Ok(p.as_bytes().to_vec()),
Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
Err(NotPresent) => {
// Try another method
}
}
// If we're on a TTY, query the user for a password
if tty::stdin_isatty() {
return Ok(tty::read_password("Encryption Key Password: ")?);
}
bail!("no password input mechanism available");
}
#[test]
// WARNING: there must only be one test for crypto_parameters as the default key handling is not
// safe w.r.t. concurrency
fn test_crypto_parameters_handling() -> Result<(), Error> {
use serde_json::json;
use proxmox::tools::fs::{replace_file, CreateOptions};
let some_key = vec![1;1];
let default_key = vec![2;1];
let some_master_key = vec![3;1];
let default_master_key = vec![4;1];
let keypath = "./target/testout/keyfile.test";
let master_keypath = "./target/testout/masterkeyfile.test";
let invalid_keypath = "./target/testout/invalid_keyfile.test";
let no_key_res = CryptoParams {
enc_key: None,
master_pubkey: None,
mode: CryptMode::None,
};
let some_key_res = CryptoParams {
enc_key: Some(KeyWithSource::from_path(
keypath.to_string(),
some_key.clone(),
)),
master_pubkey: None,
mode: CryptMode::Encrypt,
};
let some_key_some_master_res = CryptoParams {
enc_key: Some(KeyWithSource::from_path(
keypath.to_string(),
some_key.clone(),
)),
master_pubkey: Some(KeyWithSource::from_path(
master_keypath.to_string(),
some_master_key.clone(),
)),
mode: CryptMode::Encrypt,
};
let some_key_default_master_res = CryptoParams {
enc_key: Some(KeyWithSource::from_path(
keypath.to_string(),
some_key.clone(),
)),
master_pubkey: Some(KeyWithSource::from_default(default_master_key.clone())),
mode: CryptMode::Encrypt,
};
let some_key_sign_res = CryptoParams {
enc_key: Some(KeyWithSource::from_path(
keypath.to_string(),
some_key.clone(),
)),
master_pubkey: None,
mode: CryptMode::SignOnly,
};
let default_key_res = CryptoParams {
enc_key: Some(KeyWithSource::from_default(default_key.clone())),
master_pubkey: None,
mode: CryptMode::Encrypt,
};
let default_key_sign_res = CryptoParams {
enc_key: Some(KeyWithSource::from_default(default_key.clone())),
master_pubkey: None,
mode: CryptMode::SignOnly,
};
replace_file(&keypath, &some_key, CreateOptions::default())?;
replace_file(&master_keypath, &some_master_key, CreateOptions::default())?;
// no params, no default key == no key
let res = crypto_parameters(&json!({}));
assert_eq!(res.unwrap(), no_key_res);
// keyfile param == key from keyfile
let res = crypto_parameters(&json!({"keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// crypt mode none == no key
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
assert_eq!(res.unwrap(), no_key_res);
// crypt mode encrypt/sign-only, no keyfile, no default key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
// crypt mode none with explicit key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_sign_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// invalid keyfile parameter always errors
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
// now set a default key
unsafe { set_test_encryption_key(Ok(Some(default_key.clone()))); }
// and repeat
// no params but default key == default key
let res = crypto_parameters(&json!({}));
assert_eq!(res.unwrap(), default_key_res);
// keyfile param == key from keyfile
let res = crypto_parameters(&json!({"keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// crypt mode none == no key
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
assert_eq!(res.unwrap(), no_key_res);
// crypt mode encrypt/sign-only, no keyfile, default key == default key with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "sign-only"}));
assert_eq!(res.unwrap(), default_key_sign_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt"}));
assert_eq!(res.unwrap(), default_key_res);
// crypt mode none with explicit key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_sign_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// invalid keyfile parameter always errors
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
// now make default key retrieval error
unsafe { set_test_encryption_key(Err(format_err!("test error"))); }
// and repeat
// no params, default key retrieval errors == Error
assert!(crypto_parameters(&json!({})).is_err());
// keyfile param == key from keyfile
let res = crypto_parameters(&json!({"keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// crypt mode none == no key
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
assert_eq!(res.unwrap(), no_key_res);
// crypt mode encrypt/sign-only, no keyfile, default key error == Error
assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
// crypt mode none with explicit key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_sign_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// invalid keyfile parameter always errors
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
// now remove default key again
unsafe { set_test_encryption_key(Ok(None)); }
// set a default master key
unsafe { set_test_default_master_pubkey(Ok(Some(default_master_key.clone()))); }
// and use an explicit master key
assert!(crypto_parameters(&json!({"master-pubkey-file": master_keypath})).is_err());
// just a default == no key
let res = crypto_parameters(&json!({}));
assert_eq!(res.unwrap(), no_key_res);
// keyfile param == key from keyfile
let res = crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": master_keypath}));
assert_eq!(res.unwrap(), some_key_some_master_res);
// same with fallback to default master key
let res = crypto_parameters(&json!({"keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_default_master_res);
// crypt mode none == error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "master-pubkey-file": master_keypath})).is_err());
// with just default master key == no key
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
assert_eq!(res.unwrap(), no_key_res);
// crypt mode encrypt without enc key == error
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt", "master-pubkey-file": master_keypath})).is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
// crypt mode none with explicit key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath, "master-pubkey-file": master_keypath})).is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
// crypt mode encrypt with keyfile == key from keyfile with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath, "master-pubkey-file": master_keypath}));
assert_eq!(res.unwrap(), some_key_some_master_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_default_master_res);
// invalid master keyfile parameter always errors when a key is passed, even with a valid
// default master key
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath})).is_err());
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "none"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "encrypt"})).is_err());
Ok(())
}

374
pbs-client/src/tools/mod.rs Normal file
View File

@ -0,0 +1,374 @@
//! Shared tools useful for common CLI clients.
use std::collections::HashMap;
use anyhow::{bail, format_err, Context, Error};
use serde_json::{json, Value};
use xdg::BaseDirectories;
use proxmox::{
api::schema::*,
tools::fs::file_get_json,
};
use pbs_api_types::{BACKUP_REPO_URL, Authid, UserWithTokens};
use pbs_datastore::BackupDir;
use pbs_tools::json::json_object_to_query;
use crate::{BackupRepository, HttpClient, HttpClientOptions};
pub mod key_source;
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
.format(&BACKUP_REPO_URL)
.max_length(256)
.schema();
pub const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
.minimum(64)
.maximum(4096)
.default(4096)
.schema();
pub fn get_default_repository() -> Option<String> {
std::env::var("PBS_REPOSITORY").ok()
}
pub fn extract_repository_from_value(param: &Value) -> Result<BackupRepository, Error> {
let repo_url = param["repository"]
.as_str()
.map(String::from)
.or_else(get_default_repository)
.ok_or_else(|| format_err!("unable to get (default) repository"))?;
let repo: BackupRepository = repo_url.parse()?;
Ok(repo)
}
pub fn extract_repository_from_map(param: &HashMap<String, String>) -> Option<BackupRepository> {
param
.get("repository")
.map(String::from)
.or_else(get_default_repository)
.and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
}
pub fn connect(repo: &BackupRepository) -> Result<HttpClient, Error> {
connect_do(repo.host(), repo.port(), repo.auth_id())
.map_err(|err| format_err!("error building client for repository {} - {}", repo, err))
}
fn connect_do(server: &str, port: u16, auth_id: &Authid) -> Result<HttpClient, Error> {
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
use std::env::VarError::*;
let password = match std::env::var(ENV_VAR_PBS_PASSWORD) {
Ok(p) => Some(p),
Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", ENV_VAR_PBS_PASSWORD)),
Err(NotPresent) => None,
};
let options = HttpClientOptions::new_interactive(password, fingerprint);
HttpClient::new(server, port, auth_id, options)
}
/// like get, but simply ignore errors and return Null instead
pub async fn try_get(repo: &BackupRepository, url: &str) -> Value {
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
let password = std::env::var(ENV_VAR_PBS_PASSWORD).ok();
// ticket cache, but no questions asked
let options = HttpClientOptions::new_interactive(password, fingerprint)
.interactive(false);
let client = match HttpClient::new(repo.host(), repo.port(), repo.auth_id(), options) {
Ok(v) => v,
_ => return Value::Null,
};
let mut resp = match client.get(url, None).await {
Ok(v) => v,
_ => return Value::Null,
};
if let Some(map) = resp.as_object_mut() {
if let Some(data) = map.remove("data") {
return data;
}
}
Value::Null
}
pub fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
pbs_runtime::main(async { complete_backup_group_do(param).await })
}
pub async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let repo = match extract_repository_from_map(param) {
Some(v) => v,
_ => return result,
};
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
let data = try_get(&repo, &path).await;
if let Some(list) = data.as_array() {
for item in list {
if let (Some(backup_id), Some(backup_type)) =
(item["backup-id"].as_str(), item["backup-type"].as_str())
{
result.push(format!("{}/{}", backup_type, backup_id));
}
}
}
result
}
pub fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
pbs_runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
}
pub async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
if arg.matches('/').count() < 2 {
let groups = complete_backup_group_do(param).await;
let mut result = vec![];
for group in groups {
result.push(group.to_string());
result.push(format!("{}/", group));
}
return result;
}
complete_backup_snapshot_do(param).await
}
pub fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
pbs_runtime::main(async { complete_backup_snapshot_do(param).await })
}
pub async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let repo = match extract_repository_from_map(param) {
Some(v) => v,
_ => return result,
};
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
let data = try_get(&repo, &path).await;
if let Some(list) = data.as_array() {
for item in list {
if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
(item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
{
if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
result.push(snapshot.relative_path().to_str().unwrap().to_owned());
}
}
}
}
result
}
pub fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
pbs_runtime::main(async { complete_server_file_name_do(param).await })
}
pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let repo = match extract_repository_from_map(param) {
Some(v) => v,
_ => return result,
};
let snapshot: BackupDir = match param.get("snapshot") {
Some(path) => {
match path.parse() {
Ok(v) => v,
_ => return result,
}
}
_ => return result,
};
let query = json_object_to_query(json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
"backup-time": snapshot.backup_time(),
})).unwrap();
let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
let data = try_get(&repo, &path).await;
if let Some(list) = data.as_array() {
for item in list {
if let Some(filename) = item["filename"].as_str() {
result.push(filename.to_owned());
}
}
}
result
}
pub fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
complete_server_file_name(arg, param)
.iter()
.map(|v| pbs_tools::format::strip_server_file_extension(&v))
.collect()
}
pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
complete_server_file_name(arg, param)
.iter()
.filter_map(|name| {
if name.ends_with(".pxar.didx") {
Some(pbs_tools::format::strip_server_file_extension(name))
} else {
None
}
})
.collect()
}
pub fn complete_img_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
complete_server_file_name(arg, param)
.iter()
.filter_map(|name| {
if name.ends_with(".img.fidx") {
Some(pbs_tools::format::strip_server_file_extension(name))
} else {
None
}
})
.collect()
}
pub fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let mut size = 64;
loop {
result.push(size.to_string());
size *= 2;
if size > 4096 { break; }
}
result
}
pub fn complete_auth_id(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
pbs_runtime::main(async { complete_auth_id_do(param).await })
}
pub async fn complete_auth_id_do(param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let repo = match extract_repository_from_map(param) {
Some(v) => v,
_ => return result,
};
let data = try_get(&repo, "api2/json/access/users?include_tokens=true").await;
if let Ok(parsed) = serde_json::from_value::<Vec<UserWithTokens>>(data) {
for user in parsed {
result.push(user.userid.to_string());
for token in user.tokens {
result.push(token.tokenid.to_string());
}
}
};
result
}
pub fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let base = match BaseDirectories::with_prefix("proxmox-backup") {
Ok(v) => v,
_ => return result,
};
// usually $HOME/.cache/proxmox-backup/repo-list
let path = match base.place_cache_file("repo-list") {
Ok(v) => v,
_ => return result,
};
let data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
if let Some(map) = data.as_object() {
for (repo, _count) in map {
result.push(repo.to_owned());
}
}
result
}
pub fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let data: Vec<&str> = arg.splitn(2, ':').collect();
if data.len() != 2 {
result.push(String::from("root.pxar:/"));
result.push(String::from("etc.pxar:/etc"));
return result;
}
let files = pbs_tools::fs::complete_file_name(data[1], param);
for file in files {
result.push(format!("{}:{}", data[0], file));
}
result
}
pub fn base_directories() -> Result<xdg::BaseDirectories, Error> {
xdg::BaseDirectories::with_prefix("proxmox-backup").map_err(Error::from)
}
/// Convenience helper for better error messages:
pub fn find_xdg_file(
file_name: impl AsRef<std::path::Path>,
description: &'static str,
) -> Result<Option<std::path::PathBuf>, Error> {
let file_name = file_name.as_ref();
base_directories()
.map(|base| base.find_config_file(file_name))
.with_context(|| format!("error searching for {}", description))
}
pub fn place_xdg_file(
file_name: impl AsRef<std::path::Path>,
description: &'static str,
) -> Result<std::path::PathBuf, Error> {
let file_name = file_name.as_ref();
base_directories()
.and_then(|base| base.place_config_file(file_name).map_err(Error::from))
.with_context(|| format!("failed to place {} in xdg home", description))
}

View File

@ -0,0 +1,256 @@
use std::pin::Pin;
use std::task::{Context, Poll};
use anyhow::{bail, format_err, Error};
use futures::*;
use http::Uri;
use http::{Request, Response};
use hyper::client::connect::{Connected, Connection};
use hyper::client::Client;
use hyper::Body;
use pin_project::pin_project;
use serde_json::Value;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
use tokio::net::UnixStream;
use proxmox::api::error::HttpError;
pub const DEFAULT_VSOCK_PORT: u16 = 807;
#[derive(Clone)]
struct VsockConnector;
#[pin_project]
/// Wrapper around UnixStream so we can implement hyper::client::connect::Connection
struct UnixConnection {
#[pin]
stream: UnixStream,
}
impl tower_service::Service<Uri> for VsockConnector {
type Response = UnixConnection;
type Error = Error;
type Future = Pin<Box<dyn Future<Output = Result<UnixConnection, Error>> + Send>>;
fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, dst: Uri) -> Self::Future {
use nix::sys::socket::*;
use std::os::unix::io::FromRawFd;
// connect can block, so run in blocking task (though in reality it seems to immediately
// return with either ENODEV or ETIMEDOUT in case of error)
tokio::task::spawn_blocking(move || {
if dst.scheme_str().unwrap_or_default() != "vsock" {
bail!("invalid URI (scheme) for vsock connector: {}", dst);
}
let cid = match dst.host() {
Some(host) => host.parse().map_err(|err| {
format_err!(
"invalid URI (host not a number) for vsock connector: {} ({})",
dst,
err
)
})?,
None => bail!("invalid URI (no host) for vsock connector: {}", dst),
};
let port = match dst.port_u16() {
Some(port) => port,
None => bail!("invalid URI (bad port) for vsock connector: {}", dst),
};
let sock_fd = socket(
AddressFamily::Vsock,
SockType::Stream,
SockFlag::empty(),
None,
)?;
let sock_addr = VsockAddr::new(cid, port as u32);
connect(sock_fd, &SockAddr::Vsock(sock_addr))?;
// connect sync, but set nonblock after (tokio requires it)
let std_stream = unsafe { std::os::unix::net::UnixStream::from_raw_fd(sock_fd) };
std_stream.set_nonblocking(true)?;
let stream = tokio::net::UnixStream::from_std(std_stream)?;
let connection = UnixConnection { stream };
Ok(connection)
})
// unravel the thread JoinHandle to a usable future
.map(|res| match res {
Ok(res) => res,
Err(err) => Err(format_err!("thread join error on vsock connect: {}", err)),
})
.boxed()
}
}
impl Connection for UnixConnection {
fn connected(&self) -> Connected {
Connected::new()
}
}
impl AsyncRead for UnixConnection {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf,
) -> Poll<Result<(), std::io::Error>> {
let this = self.project();
this.stream.poll_read(cx, buf)
}
}
impl AsyncWrite for UnixConnection {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<tokio::io::Result<usize>> {
let this = self.project();
this.stream.poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<tokio::io::Result<()>> {
let this = self.project();
this.stream.poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<tokio::io::Result<()>> {
let this = self.project();
this.stream.poll_shutdown(cx)
}
}
/// Slimmed down version of HttpClient for virtio-vsock connections (file restore daemon)
pub struct VsockClient {
client: Client<VsockConnector>,
cid: i32,
port: u16,
auth: Option<String>,
}
impl VsockClient {
pub fn new(cid: i32, port: u16, auth: Option<String>) -> Self {
let conn = VsockConnector {};
let client = Client::builder().build::<_, Body>(conn);
Self {
client,
cid,
port,
auth,
}
}
pub async fn get(&self, path: &str, data: Option<Value>) -> Result<Value, Error> {
let req = self.request_builder("GET", path, data)?;
self.api_request(req).await
}
pub async fn post(&self, path: &str, data: Option<Value>) -> Result<Value, Error> {
let req = self.request_builder("POST", path, data)?;
self.api_request(req).await
}
pub async fn download(
&self,
path: &str,
data: Option<Value>,
output: &mut (dyn AsyncWrite + Send + Unpin),
) -> Result<(), Error> {
let req = self.request_builder("GET", path, data)?;
let client = self.client.clone();
let resp = client
.request(req)
.await
.map_err(|_| format_err!("vsock download request timed out"))?;
let status = resp.status();
if !status.is_success() {
Self::api_response(resp).await.map(|_| ())?
} else {
resp.into_body()
.map_err(Error::from)
.try_fold(output, move |acc, chunk| async move {
acc.write_all(&chunk).await?;
Ok::<_, Error>(acc)
})
.await?;
}
Ok(())
}
async fn api_response(response: Response<Body>) -> Result<Value, Error> {
let status = response.status();
let data = hyper::body::to_bytes(response.into_body()).await?;
let text = String::from_utf8(data.to_vec()).unwrap();
if status.is_success() {
if text.is_empty() {
Ok(Value::Null)
} else {
let value: Value = serde_json::from_str(&text)?;
Ok(value)
}
} else {
Err(Error::from(HttpError::new(status, text)))
}
}
async fn api_request(&self, req: Request<Body>) -> Result<Value, Error> {
self.client
.request(req)
.map_err(Error::from)
.and_then(Self::api_response)
.await
}
fn request_builder(
&self,
method: &str,
path: &str,
data: Option<Value>,
) -> Result<Request<Body>, Error> {
let path = path.trim_matches('/');
let url: Uri = format!("vsock://{}:{}/{}", self.cid, self.port, path).parse()?;
let make_builder = |content_type: &str, url: &Uri| {
let mut builder = Request::builder()
.method(method)
.uri(url)
.header(hyper::header::CONTENT_TYPE, content_type);
if let Some(auth) = &self.auth {
builder = builder.header(hyper::header::AUTHORIZATION, auth);
}
builder
};
if let Some(data) = data {
if method == "POST" {
let builder = make_builder("application/json", &url);
let request = builder.body(Body::from(data.to_string()))?;
return Ok(request);
} else {
let query = pbs_tools::json::json_object_to_query(data)?;
let url: Uri =
format!("vsock://{}:{}/{}?{}", self.cid, self.port, path, query).parse()?;
let builder = make_builder("application/x-www-form-urlencoded", &url);
let request = builder.body(Body::empty())?;
return Ok(request);
}
}
let builder = make_builder("application/x-www-form-urlencoded", &url);
let request = builder.body(Body::empty())?;
Ok(request)
}
}