move remaining client tools to pbs-tools/datastore
pbs-datastore now ended up depending on tokio after all, but that's fine for now for the fuse code I added pbs-fuse-loop (has the old fuse_loop and its 'loopdev' module) ultimately only binaries should depend on this to avoid the library link the only thins remaining to move out the client binary are the api method return types, those will need to be moved to pbs-api-types... Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
This commit is contained in:
@ -933,44 +933,6 @@ pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
|
||||
.schema();
|
||||
|
||||
|
||||
pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
#[api]
|
||||
#[derive(Deserialize, Serialize)]
|
||||
/// RSA public key information
|
||||
pub struct RsaPubKeyInfo {
|
||||
/// Path to key (if stored in a file)
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub path: Option<String>,
|
||||
/// RSA exponent
|
||||
pub exponent: String,
|
||||
/// Hex-encoded RSA modulus
|
||||
pub modulus: String,
|
||||
/// Key (modulus) length in bits
|
||||
pub length: usize,
|
||||
}
|
||||
|
||||
impl std::convert::TryFrom<openssl::rsa::Rsa<openssl::pkey::Public>> for RsaPubKeyInfo {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(value: openssl::rsa::Rsa<openssl::pkey::Public>) -> Result<Self, Self::Error> {
|
||||
let modulus = value.n().to_hex_str()?.to_string();
|
||||
let exponent = value.e().to_dec_str()?.to_string();
|
||||
let length = value.size() as usize * 8;
|
||||
|
||||
Ok(Self {
|
||||
path: None,
|
||||
exponent,
|
||||
modulus,
|
||||
length,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"next-run": {
|
||||
|
@ -1,230 +0,0 @@
|
||||
use std::io::{self, Seek, SeekFrom};
|
||||
use std::ops::Range;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::task::Context;
|
||||
use std::pin::Pin;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||
|
||||
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||
use pbs_datastore::read_chunk::ReadChunk;
|
||||
use pbs_datastore::index::IndexFile;
|
||||
use pbs_tools::lru_cache::LruCache;
|
||||
|
||||
struct CachedChunk {
|
||||
range: Range<u64>,
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
impl CachedChunk {
|
||||
/// Perform sanity checks on the range and data size:
|
||||
pub fn new(range: Range<u64>, data: Vec<u8>) -> Result<Self, Error> {
|
||||
if data.len() as u64 != range.end - range.start {
|
||||
bail!(
|
||||
"read chunk with wrong size ({} != {})",
|
||||
data.len(),
|
||||
range.end - range.start,
|
||||
);
|
||||
}
|
||||
Ok(Self { range, data })
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BufferedDynamicReader<S> {
|
||||
store: S,
|
||||
index: DynamicIndexReader,
|
||||
archive_size: u64,
|
||||
read_buffer: Vec<u8>,
|
||||
buffered_chunk_idx: usize,
|
||||
buffered_chunk_start: u64,
|
||||
read_offset: u64,
|
||||
lru_cache: LruCache<usize, CachedChunk>,
|
||||
}
|
||||
|
||||
struct ChunkCacher<'a, S> {
|
||||
store: &'a mut S,
|
||||
index: &'a DynamicIndexReader,
|
||||
}
|
||||
|
||||
impl<'a, S: ReadChunk> pbs_tools::lru_cache::Cacher<usize, CachedChunk> for ChunkCacher<'a, S> {
|
||||
fn fetch(&mut self, index: usize) -> Result<Option<CachedChunk>, Error> {
|
||||
let info = match self.index.chunk_info(index) {
|
||||
Some(info) => info,
|
||||
None => bail!("chunk index out of range"),
|
||||
};
|
||||
let range = info.range;
|
||||
let data = self.store.read_chunk(&info.digest)?;
|
||||
CachedChunk::new(range, data).map(Some)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||
pub fn new(index: DynamicIndexReader, store: S) -> Self {
|
||||
let archive_size = index.index_bytes();
|
||||
Self {
|
||||
store,
|
||||
index,
|
||||
archive_size,
|
||||
read_buffer: Vec::with_capacity(1024 * 1024),
|
||||
buffered_chunk_idx: 0,
|
||||
buffered_chunk_start: 0,
|
||||
read_offset: 0,
|
||||
lru_cache: LruCache::new(32),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn archive_size(&self) -> u64 {
|
||||
self.archive_size
|
||||
}
|
||||
|
||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
||||
//let (start, end, data) = self.lru_cache.access(
|
||||
let cached_chunk = self.lru_cache.access(
|
||||
idx,
|
||||
&mut ChunkCacher {
|
||||
store: &mut self.store,
|
||||
index: &self.index,
|
||||
},
|
||||
)?.ok_or_else(|| format_err!("chunk not found by cacher"))?;
|
||||
|
||||
// fixme: avoid copy
|
||||
self.read_buffer.clear();
|
||||
self.read_buffer.extend_from_slice(&cached_chunk.data);
|
||||
|
||||
self.buffered_chunk_idx = idx;
|
||||
|
||||
self.buffered_chunk_start = cached_chunk.range.start;
|
||||
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> crate::tools::BufferedRead for BufferedDynamicReader<S> {
|
||||
fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error> {
|
||||
if offset == self.archive_size {
|
||||
return Ok(&self.read_buffer[0..0]);
|
||||
}
|
||||
|
||||
let buffer_len = self.read_buffer.len();
|
||||
let index = &self.index;
|
||||
|
||||
// optimization for sequential read
|
||||
if buffer_len > 0
|
||||
&& ((self.buffered_chunk_idx + 1) < index.index().len())
|
||||
&& (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||
{
|
||||
let next_idx = self.buffered_chunk_idx + 1;
|
||||
let next_end = index.chunk_end(next_idx);
|
||||
if offset < next_end {
|
||||
self.buffer_chunk(next_idx)?;
|
||||
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
|
||||
return Ok(&self.read_buffer[buffer_offset..]);
|
||||
}
|
||||
}
|
||||
|
||||
if (buffer_len == 0)
|
||||
|| (offset < self.buffered_chunk_start)
|
||||
|| (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||
{
|
||||
let end_idx = index.index().len() - 1;
|
||||
let end = index.chunk_end(end_idx);
|
||||
let idx = index.binary_search(0, 0, end_idx, end, offset)?;
|
||||
self.buffer_chunk(idx)?;
|
||||
}
|
||||
|
||||
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
|
||||
Ok(&self.read_buffer[buffer_offset..])
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> std::io::Read for BufferedDynamicReader<S> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
||||
use crate::tools::BufferedRead;
|
||||
use std::io::{Error, ErrorKind};
|
||||
|
||||
let data = match self.buffered_read(self.read_offset) {
|
||||
Ok(v) => v,
|
||||
Err(err) => return Err(Error::new(ErrorKind::Other, err.to_string())),
|
||||
};
|
||||
|
||||
let n = if data.len() > buf.len() {
|
||||
buf.len()
|
||||
} else {
|
||||
data.len()
|
||||
};
|
||||
|
||||
buf[0..n].copy_from_slice(&data[0..n]);
|
||||
|
||||
self.read_offset += n as u64;
|
||||
|
||||
Ok(n)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> std::io::Seek for BufferedDynamicReader<S> {
|
||||
fn seek(&mut self, pos: SeekFrom) -> Result<u64, std::io::Error> {
|
||||
let new_offset = match pos {
|
||||
SeekFrom::Start(start_offset) => start_offset as i64,
|
||||
SeekFrom::End(end_offset) => (self.archive_size as i64) + end_offset,
|
||||
SeekFrom::Current(offset) => (self.read_offset as i64) + offset,
|
||||
};
|
||||
|
||||
use std::io::{Error, ErrorKind};
|
||||
if (new_offset < 0) || (new_offset > (self.archive_size as i64)) {
|
||||
return Err(Error::new(
|
||||
ErrorKind::Other,
|
||||
format!(
|
||||
"seek is out of range {} ([0..{}])",
|
||||
new_offset, self.archive_size
|
||||
),
|
||||
));
|
||||
}
|
||||
self.read_offset = new_offset as u64;
|
||||
|
||||
Ok(self.read_offset)
|
||||
}
|
||||
}
|
||||
|
||||
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
||||
/// async use!
|
||||
///
|
||||
/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
|
||||
/// so that we can properly access it from multiple threads simultaneously while not issuing
|
||||
/// duplicate simultaneous reads over http.
|
||||
#[derive(Clone)]
|
||||
pub struct LocalDynamicReadAt<R: ReadChunk> {
|
||||
inner: Arc<Mutex<BufferedDynamicReader<R>>>,
|
||||
}
|
||||
|
||||
impl<R: ReadChunk> LocalDynamicReadAt<R> {
|
||||
pub fn new(inner: BufferedDynamicReader<R>) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(inner)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: ReadChunk> ReadAt for LocalDynamicReadAt<R> {
|
||||
fn start_read_at<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
_cx: &mut Context,
|
||||
buf: &'a mut [u8],
|
||||
offset: u64,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
use std::io::Read;
|
||||
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
||||
let mut reader = self.inner.lock().unwrap();
|
||||
reader.seek(SeekFrom::Start(offset))?;
|
||||
Ok(reader.read(buf)?)
|
||||
}))
|
||||
}
|
||||
|
||||
fn poll_complete<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
_op: ReadAtOperation<'a>,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
panic!("LocalDynamicReadAt::start_read_at returned Pending");
|
||||
}
|
||||
}
|
@ -85,10 +85,6 @@ pub use pbs_datastore::read_chunk::*;
|
||||
mod read_chunk;
|
||||
pub use read_chunk::*;
|
||||
|
||||
// Split
|
||||
mod dynamic_index;
|
||||
pub use dynamic_index::*;
|
||||
|
||||
mod datastore;
|
||||
pub use datastore::*;
|
||||
|
||||
|
@ -64,7 +64,7 @@ use pbs_datastore::{CATALOG_NAME, CryptConfig, KeyConfig, decrypt_key, rsa_encry
|
||||
use pbs_datastore::backup_info::{BackupDir, BackupGroup};
|
||||
use pbs_datastore::catalog::{BackupCatalogWriter, CatalogReader, CatalogWriter};
|
||||
use pbs_datastore::chunk_store::verify_chunk_size;
|
||||
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||
use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader};
|
||||
use pbs_datastore::fixed_index::FixedIndexReader;
|
||||
use pbs_datastore::index::IndexFile;
|
||||
use pbs_datastore::manifest::{
|
||||
@ -76,10 +76,6 @@ use pbs_tools::sync::StdChannelWriter;
|
||||
use pbs_tools::tokio::TokioWriterAdapter;
|
||||
use pbs_tools::json;
|
||||
|
||||
use proxmox_backup::backup::{
|
||||
BufferedDynamicReader,
|
||||
};
|
||||
|
||||
mod proxmox_backup_client;
|
||||
use proxmox_backup_client::*;
|
||||
|
||||
|
@ -13,19 +13,14 @@ use proxmox::api::router::ReturnType;
|
||||
use proxmox::sys::linux::tty;
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
|
||||
use pbs_datastore::{KeyInfo, Kdf};
|
||||
use pbs_api_types::{RsaPubKeyInfo, PASSWORD_HINT_SCHEMA};
|
||||
use pbs_datastore::{KeyConfig, KeyInfo, Kdf, rsa_decrypt_key_config};
|
||||
use pbs_datastore::paperkey::{generate_paper_key, PaperkeyFormat};
|
||||
use pbs_client::tools::key_source::{
|
||||
find_default_encryption_key, find_default_master_pubkey, get_encryption_key_password,
|
||||
place_default_encryption_key, place_default_master_pubkey,
|
||||
};
|
||||
|
||||
|
||||
use proxmox_backup::{
|
||||
api2::types::{RsaPubKeyInfo, PASSWORD_HINT_SCHEMA},
|
||||
backup::{rsa_decrypt_key_config, KeyConfig},
|
||||
tools::paperkey::{generate_paper_key, PaperkeyFormat},
|
||||
};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
|
@ -17,20 +17,14 @@ use proxmox::{sortable, identity};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment, schema::*, cli::*};
|
||||
use proxmox::tools::fd::Fd;
|
||||
|
||||
use pbs_datastore::{BackupDir, BackupGroup, CryptConfig, load_and_decrypt_key};
|
||||
use pbs_datastore::index::IndexFile;
|
||||
use pbs_datastore::dynamic_index::BufferedDynamicReader;
|
||||
use pbs_client::tools::key_source::get_encryption_key_password;
|
||||
use pbs_client::{BackupReader, RemoteChunkReader};
|
||||
use pbs_tools::json::required_string_param;
|
||||
|
||||
use proxmox_backup::tools;
|
||||
use proxmox_backup::backup::{
|
||||
load_and_decrypt_key,
|
||||
CryptConfig,
|
||||
IndexFile,
|
||||
BackupDir,
|
||||
BackupGroup,
|
||||
BufferedDynamicReader,
|
||||
CachedChunkReader,
|
||||
};
|
||||
use proxmox_backup::backup::CachedChunkReader;
|
||||
|
||||
use crate::{
|
||||
REPO_URL_SCHEMA,
|
||||
@ -120,10 +114,10 @@ pub fn unmap_cmd_def() -> CliCommand {
|
||||
fn complete_mapping_names<S: BuildHasher>(_arg: &str, _param: &HashMap<String, String, S>)
|
||||
-> Vec<String>
|
||||
{
|
||||
match tools::fuse_loop::find_all_mappings() {
|
||||
match pbs_fuse_loop::find_all_mappings() {
|
||||
Ok(mappings) => mappings
|
||||
.filter_map(|(name, _)| {
|
||||
tools::systemd::unescape_unit(&name).ok()
|
||||
pbs_systemd::unescape_unit(&name).ok()
|
||||
}).collect(),
|
||||
Err(_) => Vec::new()
|
||||
}
|
||||
@ -144,7 +138,7 @@ fn mount(
|
||||
|
||||
// Process should be daemonized.
|
||||
// Make sure to fork before the async runtime is instantiated to avoid troubles.
|
||||
let (pr, pw) = proxmox_backup::tools::pipe()?;
|
||||
let (pr, pw) = pbs_tools::io::pipe()?;
|
||||
match unsafe { fork() } {
|
||||
Ok(ForkResult::Parent { .. }) => {
|
||||
drop(pw);
|
||||
@ -284,9 +278,9 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
|
||||
let reader = CachedChunkReader::new(chunk_reader, index, 8).seekable();
|
||||
|
||||
let name = &format!("{}:{}/{}", repo.to_string(), path, archive_name);
|
||||
let name_escaped = tools::systemd::escape_unit(name, false);
|
||||
let name_escaped = pbs_systemd::escape_unit(name, false);
|
||||
|
||||
let mut session = tools::fuse_loop::FuseLoopSession::map_loop(size, reader, &name_escaped, options).await?;
|
||||
let mut session = pbs_fuse_loop::FuseLoopSession::map_loop(size, reader, &name_escaped, options).await?;
|
||||
let loopdev = session.loopdev_path.clone();
|
||||
|
||||
let (st_send, st_recv) = futures::channel::mpsc::channel(1);
|
||||
@ -343,10 +337,10 @@ fn unmap(
|
||||
let mut name = match param["name"].as_str() {
|
||||
Some(name) => name.to_owned(),
|
||||
None => {
|
||||
tools::fuse_loop::cleanup_unused_run_files(None);
|
||||
pbs_fuse_loop::cleanup_unused_run_files(None);
|
||||
let mut any = false;
|
||||
for (backing, loopdev) in tools::fuse_loop::find_all_mappings()? {
|
||||
let name = tools::systemd::unescape_unit(&backing)?;
|
||||
for (backing, loopdev) in pbs_fuse_loop::find_all_mappings()? {
|
||||
let name = pbs_systemd::unescape_unit(&backing)?;
|
||||
println!("{}:\t{}", loopdev.unwrap_or_else(|| "(unmapped)".to_string()), name);
|
||||
any = true;
|
||||
}
|
||||
@ -363,10 +357,10 @@ fn unmap(
|
||||
}
|
||||
|
||||
if name.starts_with("/dev/loop") {
|
||||
tools::fuse_loop::unmap_loopdev(name)?;
|
||||
pbs_fuse_loop::unmap_loopdev(name)?;
|
||||
} else {
|
||||
let name = tools::systemd::escape_unit(&name, false);
|
||||
tools::fuse_loop::unmap_name(name)?;
|
||||
let name = pbs_systemd::escape_unit(&name, false);
|
||||
pbs_fuse_loop::unmap_name(name)?;
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
|
@ -8,20 +8,12 @@ use proxmox::{
|
||||
tools::fs::file_get_contents,
|
||||
};
|
||||
|
||||
use pbs_api_types::SnapshotListItem;
|
||||
use pbs_client::tools::key_source::get_encryption_key_password;
|
||||
use pbs_datastore::{BackupGroup, CryptMode, CryptConfig, decrypt_key};
|
||||
use pbs_datastore::data_blob::DataBlob;
|
||||
use pbs_tools::json::required_string_param;
|
||||
|
||||
use proxmox_backup::{
|
||||
api2::types::*,
|
||||
backup::{
|
||||
CryptMode,
|
||||
CryptConfig,
|
||||
DataBlob,
|
||||
BackupGroup,
|
||||
decrypt_key,
|
||||
}
|
||||
};
|
||||
|
||||
use crate::{
|
||||
REPO_URL_SCHEMA,
|
||||
KEYFILE_SCHEMA,
|
||||
|
@ -7,7 +7,7 @@ use pbs_client::display_task_log;
|
||||
use pbs_tools::percent_encoding::percent_encode_component;
|
||||
use pbs_tools::json::required_string_param;
|
||||
|
||||
use proxmox_backup::api2::types::UPID_SCHEMA;
|
||||
use pbs_api_types::UPID;
|
||||
|
||||
use crate::{
|
||||
REPO_URL_SCHEMA,
|
||||
@ -87,7 +87,7 @@ async fn task_list(param: Value) -> Result<Value, Error> {
|
||||
optional: true,
|
||||
},
|
||||
upid: {
|
||||
schema: UPID_SCHEMA,
|
||||
type: UPID,
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -113,7 +113,7 @@ async fn task_log(param: Value) -> Result<Value, Error> {
|
||||
optional: true,
|
||||
},
|
||||
upid: {
|
||||
schema: UPID_SCHEMA,
|
||||
type: UPID,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -12,14 +12,9 @@ use proxmox::{
|
||||
};
|
||||
|
||||
use pbs_datastore::Kdf;
|
||||
use pbs_datastore::paperkey::{PaperkeyFormat, generate_paper_key};
|
||||
|
||||
use proxmox_backup::{
|
||||
tools::{
|
||||
paperkey::{
|
||||
PaperkeyFormat,
|
||||
generate_paper_key,
|
||||
},
|
||||
},
|
||||
config,
|
||||
api2::{
|
||||
self,
|
||||
|
@ -1,416 +0,0 @@
|
||||
//! Map a raw data reader as a loop device via FUSE
|
||||
|
||||
use anyhow::{Error, format_err, bail};
|
||||
use std::ffi::OsStr;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::fs::{File, remove_file, read_to_string, OpenOptions};
|
||||
use std::io::SeekFrom;
|
||||
use std::io::prelude::*;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use nix::unistd::Pid;
|
||||
use nix::sys::signal::{self, Signal};
|
||||
|
||||
use tokio::io::{AsyncRead, AsyncSeek, AsyncReadExt, AsyncSeekExt};
|
||||
use futures::stream::{StreamExt, TryStreamExt};
|
||||
use futures::channel::mpsc::{Sender, Receiver};
|
||||
|
||||
use proxmox::const_regex;
|
||||
use proxmox::tools::time;
|
||||
use proxmox_fuse::{*, requests::FuseRequest};
|
||||
use super::loopdev;
|
||||
|
||||
const RUN_DIR: &str = "/run/pbs-loopdev";
|
||||
|
||||
const_regex! {
|
||||
pub LOOPDEV_REGEX = r"^loop\d+$";
|
||||
}
|
||||
|
||||
/// Represents an ongoing FUSE-session that has been mapped onto a loop device.
|
||||
/// Create with map_loop, then call 'main' and poll until startup_chan reports
|
||||
/// success. Then, daemonize or otherwise finish setup, and continue polling
|
||||
/// main's future until completion.
|
||||
pub struct FuseLoopSession<R: AsyncRead + AsyncSeek + Unpin> {
|
||||
session: Option<Fuse>,
|
||||
stat: libc::stat,
|
||||
reader: R,
|
||||
fuse_path: String,
|
||||
pid_path: String,
|
||||
pub loopdev_path: String,
|
||||
}
|
||||
|
||||
impl<R: AsyncRead + AsyncSeek + Unpin> FuseLoopSession<R> {
|
||||
|
||||
/// Prepare for mapping the given reader as a block device node at
|
||||
/// /dev/loopN. Creates a temporary file for FUSE and a PID file for unmap.
|
||||
pub async fn map_loop<P: AsRef<str>>(size: u64, mut reader: R, name: P, options: &OsStr)
|
||||
-> Result<Self, Error>
|
||||
{
|
||||
// attempt a single read to check if the reader is configured correctly
|
||||
let _ = reader.read_u8().await?;
|
||||
|
||||
std::fs::create_dir_all(RUN_DIR)?;
|
||||
let mut path = PathBuf::from(RUN_DIR);
|
||||
path.push(name.as_ref());
|
||||
let mut pid_path = path.clone();
|
||||
pid_path.set_extension("pid");
|
||||
|
||||
// cleanup previous instance with same name
|
||||
// if loopdev is actually still mapped, this will do nothing and the
|
||||
// create_new below will fail as intended
|
||||
cleanup_unused_run_files(Some(name.as_ref().to_owned()));
|
||||
|
||||
match OpenOptions::new().write(true).create_new(true).open(&path) {
|
||||
Ok(_) => { /* file created, continue on */ },
|
||||
Err(e) => {
|
||||
if e.kind() == std::io::ErrorKind::AlreadyExists {
|
||||
bail!("the given archive is already mapped, cannot map twice");
|
||||
} else {
|
||||
bail!("error while creating backing file ({:?}) - {}", &path, e);
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
let session = Fuse::builder("pbs-block-dev")?
|
||||
.options_os(options)?
|
||||
.enable_read()
|
||||
.build()?
|
||||
.mount(&path)?;
|
||||
|
||||
let loopdev_path = loopdev::get_or_create_free_dev().map_err(|err| {
|
||||
format_err!("loop-control GET_FREE failed - {}", err)
|
||||
})?;
|
||||
|
||||
// write pidfile so unmap can later send us a signal to exit
|
||||
Self::write_pidfile(&pid_path)?;
|
||||
|
||||
Ok(Self {
|
||||
session: Some(session),
|
||||
reader,
|
||||
stat: minimal_stat(size as i64),
|
||||
fuse_path: path.to_string_lossy().into_owned(),
|
||||
pid_path: pid_path.to_string_lossy().into_owned(),
|
||||
loopdev_path,
|
||||
})
|
||||
}
|
||||
|
||||
fn write_pidfile(path: &Path) -> Result<(), Error> {
|
||||
let pid = unsafe { libc::getpid() };
|
||||
let mut file = File::create(path)?;
|
||||
write!(file, "{}", pid)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Runs the FUSE request loop and assigns the loop device. Will send a
|
||||
/// message on startup_chan once the loop device is assigned (or assignment
|
||||
/// fails). Send a message on abort_chan to trigger cleanup and exit FUSE.
|
||||
/// An error on loopdev assignment does *not* automatically close the FUSE
|
||||
/// handle or do cleanup, trigger abort_chan manually in case startup fails.
|
||||
pub async fn main(
|
||||
&mut self,
|
||||
mut startup_chan: Sender<Result<(), Error>>,
|
||||
abort_chan: Receiver<()>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
if self.session.is_none() {
|
||||
panic!("internal error: fuse_loop::main called before ::map_loop");
|
||||
}
|
||||
let mut session = self.session.take().unwrap().fuse();
|
||||
let mut abort_chan = abort_chan.fuse();
|
||||
|
||||
let (loopdev_path, fuse_path) = (self.loopdev_path.clone(), self.fuse_path.clone());
|
||||
tokio::task::spawn_blocking(move || {
|
||||
if let Err(err) = loopdev::assign(loopdev_path, fuse_path) {
|
||||
let _ = startup_chan.try_send(Err(format_err!("error while assigning loop device - {}", err)));
|
||||
} else {
|
||||
// device is assigned successfully, which means not only is the
|
||||
// loopdev ready, but FUSE is also okay, since the assignment
|
||||
// would have failed otherwise
|
||||
let _ = startup_chan.try_send(Ok(()));
|
||||
}
|
||||
});
|
||||
|
||||
let (loopdev_path, fuse_path, pid_path) =
|
||||
(self.loopdev_path.clone(), self.fuse_path.clone(), self.pid_path.clone());
|
||||
let cleanup = |session: futures::stream::Fuse<Fuse>| {
|
||||
// only warn for errors on cleanup, if these fail nothing is lost
|
||||
if let Err(err) = loopdev::unassign(&loopdev_path) {
|
||||
eprintln!(
|
||||
"cleanup: warning: could not unassign file {} from loop device {} - {}",
|
||||
&fuse_path,
|
||||
&loopdev_path,
|
||||
err,
|
||||
);
|
||||
}
|
||||
|
||||
// force close FUSE handle before attempting to remove backing file
|
||||
std::mem::drop(session);
|
||||
|
||||
if let Err(err) = remove_file(&fuse_path) {
|
||||
eprintln!(
|
||||
"cleanup: warning: could not remove temporary file {} - {}",
|
||||
&fuse_path,
|
||||
err,
|
||||
);
|
||||
}
|
||||
if let Err(err) = remove_file(&pid_path) {
|
||||
eprintln!(
|
||||
"cleanup: warning: could not remove PID file {} - {}",
|
||||
&pid_path,
|
||||
err,
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
loop {
|
||||
tokio::select!{
|
||||
_ = abort_chan.next() => {
|
||||
// aborted, do cleanup and exit
|
||||
break;
|
||||
},
|
||||
req = session.try_next() => {
|
||||
let res = match req? {
|
||||
Some(Request::Lookup(req)) => {
|
||||
let stat = self.stat;
|
||||
let entry = EntryParam::simple(stat.st_ino, stat);
|
||||
req.reply(&entry)
|
||||
},
|
||||
Some(Request::Getattr(req)) => {
|
||||
req.reply(&self.stat, std::f64::MAX)
|
||||
},
|
||||
Some(Request::Read(req)) => {
|
||||
match self.reader.seek(SeekFrom::Start(req.offset)).await {
|
||||
Ok(_) => {
|
||||
let mut buf = vec![0u8; req.size];
|
||||
match self.reader.read_exact(&mut buf).await {
|
||||
Ok(_) => {
|
||||
req.reply(&buf)
|
||||
},
|
||||
Err(e) => {
|
||||
req.io_fail(e)
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
req.io_fail(e)
|
||||
}
|
||||
}
|
||||
},
|
||||
Some(_) => {
|
||||
// only FUSE requests necessary for loop-mapping are implemented
|
||||
eprintln!("Unimplemented FUSE request type encountered");
|
||||
Ok(())
|
||||
},
|
||||
None => {
|
||||
// FUSE connection closed
|
||||
break;
|
||||
}
|
||||
};
|
||||
if let Err(err) = res {
|
||||
// error during FUSE reply, cleanup and exit
|
||||
cleanup(session);
|
||||
bail!(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// non-error FUSE exit
|
||||
cleanup(session);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Clean up leftover files as well as FUSE instances without a loop device
|
||||
/// connected. Best effort, never returns an error.
|
||||
/// If filter_name is Some("..."), only this name will be cleaned up.
|
||||
pub fn cleanup_unused_run_files(filter_name: Option<String>) {
|
||||
if let Ok(maps) = find_all_mappings() {
|
||||
for (name, loopdev) in maps {
|
||||
if loopdev.is_none() &&
|
||||
(filter_name.is_none() || &name == filter_name.as_ref().unwrap())
|
||||
{
|
||||
let mut path = PathBuf::from(RUN_DIR);
|
||||
path.push(&name);
|
||||
|
||||
// clean leftover FUSE instances (e.g. user called 'losetup -d' or similar)
|
||||
// does nothing if files are already stagnant (e.g. instance crashed etc...)
|
||||
if unmap_from_backing(&path, None).is_ok() {
|
||||
// we have reaped some leftover instance, tell the user
|
||||
eprintln!(
|
||||
"Cleaned up dangling mapping '{}': no loop device assigned",
|
||||
&name
|
||||
);
|
||||
}
|
||||
|
||||
// remove remnant files
|
||||
// these we're not doing anything, so no need to inform the user
|
||||
let _ = remove_file(&path);
|
||||
path.set_extension("pid");
|
||||
let _ = remove_file(&path);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_backing_file(loopdev: &str) -> Result<String, Error> {
|
||||
let num = loopdev.split_at(9).1.parse::<u8>().map_err(|err|
|
||||
format_err!("malformed loopdev path, does not end with valid number - {}", err))?;
|
||||
|
||||
let block_path = PathBuf::from(format!("/sys/devices/virtual/block/loop{}/loop/backing_file", num));
|
||||
let backing_file = read_to_string(block_path).map_err(|err| {
|
||||
if err.kind() == std::io::ErrorKind::NotFound {
|
||||
format_err!("nothing mapped to {}", loopdev)
|
||||
} else {
|
||||
format_err!("error reading backing file - {}", err)
|
||||
}
|
||||
})?;
|
||||
|
||||
let backing_file = backing_file.trim();
|
||||
|
||||
if !backing_file.starts_with(RUN_DIR) {
|
||||
bail!(
|
||||
"loopdev {} is in use, but not by proxmox-backup-client (mapped to '{}')",
|
||||
loopdev,
|
||||
backing_file,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(backing_file.to_owned())
|
||||
}
|
||||
|
||||
// call in broken state: we found the mapping, but the client is already dead,
|
||||
// only thing to do is clean up what we can
|
||||
fn emerg_cleanup (loopdev: Option<&str>, mut backing_file: PathBuf) {
|
||||
eprintln!(
|
||||
"warning: found mapping with dead process ({:?}), attempting cleanup",
|
||||
&backing_file
|
||||
);
|
||||
|
||||
if let Some(loopdev) = loopdev {
|
||||
let _ = loopdev::unassign(loopdev);
|
||||
}
|
||||
|
||||
// killing the backing process does not cancel the FUSE mount automatically
|
||||
let mut command = std::process::Command::new("fusermount");
|
||||
command.arg("-u");
|
||||
command.arg(&backing_file);
|
||||
let _ = crate::tools::run_command(command, None);
|
||||
|
||||
let _ = remove_file(&backing_file);
|
||||
backing_file.set_extension("pid");
|
||||
let _ = remove_file(&backing_file);
|
||||
}
|
||||
|
||||
fn unmap_from_backing(backing_file: &Path, loopdev: Option<&str>) -> Result<(), Error> {
|
||||
let mut pid_path = PathBuf::from(backing_file);
|
||||
pid_path.set_extension("pid");
|
||||
|
||||
let pid_str = read_to_string(&pid_path).map_err(|err| {
|
||||
if err.kind() == std::io::ErrorKind::NotFound {
|
||||
emerg_cleanup(loopdev, backing_file.to_owned());
|
||||
}
|
||||
format_err!("error reading pidfile {:?}: {}", &pid_path, err)
|
||||
})?;
|
||||
let pid = pid_str.parse::<i32>().map_err(|err|
|
||||
format_err!("malformed PID ({}) in pidfile - {}", pid_str, err))?;
|
||||
|
||||
let pid = Pid::from_raw(pid);
|
||||
|
||||
// send SIGINT to trigger cleanup and exit in target process
|
||||
match signal::kill(pid, Signal::SIGINT) {
|
||||
Ok(()) => {},
|
||||
Err(nix::Error::Sys(nix::errno::Errno::ESRCH)) => {
|
||||
emerg_cleanup(loopdev, backing_file.to_owned());
|
||||
return Ok(());
|
||||
},
|
||||
Err(e) => return Err(e.into()),
|
||||
}
|
||||
|
||||
// block until unmap is complete or timeout
|
||||
let start = time::epoch_i64();
|
||||
loop {
|
||||
match signal::kill(pid, None) {
|
||||
Ok(_) => {
|
||||
// 10 second timeout, then assume failure
|
||||
if (time::epoch_i64() - start) > 10 {
|
||||
return Err(format_err!("timed out waiting for PID '{}' to exit", &pid));
|
||||
}
|
||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||
},
|
||||
Err(nix::Error::Sys(nix::errno::Errno::ESRCH)) => {
|
||||
break;
|
||||
},
|
||||
Err(e) => return Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns an Iterator over a set of currently active mappings, i.e.
|
||||
/// FuseLoopSession instances. Returns ("backing-file-name", Some("/dev/loopX"))
|
||||
/// where .1 is None when a user has manually called 'losetup -d' or similar but
|
||||
/// the FUSE instance is still running.
|
||||
pub fn find_all_mappings() -> Result<impl Iterator<Item = (String, Option<String>)>, Error> {
|
||||
// get map of all /dev/loop mappings belonging to us
|
||||
let mut loopmap = HashMap::new();
|
||||
for ent in pbs_tools::fs::scan_subdir(libc::AT_FDCWD, Path::new("/dev/"), &LOOPDEV_REGEX)? {
|
||||
if let Ok(ent) = ent {
|
||||
let loopdev = format!("/dev/{}", ent.file_name().to_string_lossy());
|
||||
if let Ok(file) = get_backing_file(&loopdev) {
|
||||
// insert filename only, strip RUN_DIR/
|
||||
loopmap.insert(file[RUN_DIR.len()+1..].to_owned(), loopdev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(pbs_tools::fs::read_subdir(libc::AT_FDCWD, Path::new(RUN_DIR))?
|
||||
.filter_map(move |ent| {
|
||||
match ent {
|
||||
Ok(ent) => {
|
||||
let file = ent.file_name().to_string_lossy();
|
||||
if file == "." || file == ".." || file.ends_with(".pid") {
|
||||
None
|
||||
} else {
|
||||
let loopdev = loopmap.get(file.as_ref()).map(String::to_owned);
|
||||
Some((file.into_owned(), loopdev))
|
||||
}
|
||||
},
|
||||
Err(_) => None,
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
/// Try and unmap a running proxmox-backup-client instance from the given
|
||||
/// /dev/loopN device
|
||||
pub fn unmap_loopdev<S: AsRef<str>>(loopdev: S) -> Result<(), Error> {
|
||||
let loopdev = loopdev.as_ref();
|
||||
if loopdev.len() < 10 || !loopdev.starts_with("/dev/loop") {
|
||||
bail!("malformed loopdev path, must be in format '/dev/loopX'");
|
||||
}
|
||||
|
||||
let backing_file = get_backing_file(loopdev)?;
|
||||
unmap_from_backing(Path::new(&backing_file), Some(loopdev))
|
||||
}
|
||||
|
||||
/// Try and unmap a running proxmox-backup-client instance from the given name
|
||||
pub fn unmap_name<S: AsRef<str>>(name: S) -> Result<(), Error> {
|
||||
for (mapping, loopdev) in find_all_mappings()? {
|
||||
if mapping.ends_with(name.as_ref()) {
|
||||
let mut path = PathBuf::from(RUN_DIR);
|
||||
path.push(&mapping);
|
||||
return unmap_from_backing(&path, loopdev.as_deref());
|
||||
}
|
||||
}
|
||||
Err(format_err!("no mapping for name '{}' found", name.as_ref()))
|
||||
}
|
||||
|
||||
fn minimal_stat(size: i64) -> libc::stat {
|
||||
let mut stat: libc::stat = unsafe { std::mem::zeroed() };
|
||||
stat.st_mode = libc::S_IFREG;
|
||||
stat.st_ino = 1;
|
||||
stat.st_nlink = 1;
|
||||
stat.st_size = size;
|
||||
stat
|
||||
}
|
@ -1,95 +0,0 @@
|
||||
//! Helpers to work with /dev/loop* devices
|
||||
|
||||
use anyhow::Error;
|
||||
use std::fs::{File, OpenOptions};
|
||||
use std::path::Path;
|
||||
use std::os::unix::io::{RawFd, AsRawFd};
|
||||
|
||||
const LOOP_CONTROL: &str = "/dev/loop-control";
|
||||
const LOOP_NAME: &str = "/dev/loop";
|
||||
|
||||
/// Implements a subset of loop device ioctls necessary to assign and release
|
||||
/// a single file from a free loopdev.
|
||||
mod loop_ioctl {
|
||||
use nix::{ioctl_none, ioctl_write_int_bad, ioctl_write_ptr_bad};
|
||||
|
||||
const LOOP_IOCTL: u16 = 0x4C; // 'L'
|
||||
const LOOP_SET_FD: u16 = 0x00;
|
||||
const LOOP_CLR_FD: u16 = 0x01;
|
||||
const LOOP_SET_STATUS64: u16 = 0x04;
|
||||
|
||||
const LOOP_CTRL_GET_FREE: u16 = 0x82;
|
||||
|
||||
ioctl_write_int_bad!(ioctl_set_fd, (LOOP_IOCTL << 8) | LOOP_SET_FD);
|
||||
ioctl_none!(ioctl_clr_fd, LOOP_IOCTL, LOOP_CLR_FD);
|
||||
ioctl_none!(ioctl_ctrl_get_free, LOOP_IOCTL, LOOP_CTRL_GET_FREE);
|
||||
ioctl_write_ptr_bad!(ioctl_set_status64, (LOOP_IOCTL << 8) | LOOP_SET_STATUS64, LoopInfo64);
|
||||
|
||||
pub const LO_FLAGS_READ_ONLY: u32 = 1;
|
||||
pub const LO_FLAGS_PARTSCAN: u32 = 8;
|
||||
|
||||
const LO_NAME_SIZE: usize = 64;
|
||||
const LO_KEY_SIZE: usize = 32;
|
||||
|
||||
#[repr(C)]
|
||||
pub struct LoopInfo64 {
|
||||
pub lo_device: u64,
|
||||
pub lo_inode: u64,
|
||||
pub lo_rdevice: u64,
|
||||
pub lo_offset: u64,
|
||||
pub lo_sizelimit: u64,
|
||||
pub lo_number: u32,
|
||||
pub lo_encrypt_type: u32,
|
||||
pub lo_encrypt_key_size: u32,
|
||||
pub lo_flags: u32,
|
||||
pub lo_file_name: [u8; LO_NAME_SIZE],
|
||||
pub lo_crypt_name: [u8; LO_NAME_SIZE],
|
||||
pub lo_encrypt_key: [u8; LO_KEY_SIZE],
|
||||
pub lo_init: [u64; 2],
|
||||
}
|
||||
}
|
||||
|
||||
// ioctl helpers create public fns, do not export them outside the module
|
||||
// users should use the wrapper functions below
|
||||
use loop_ioctl::*;
|
||||
|
||||
/// Use the GET_FREE ioctl to get or add a free loop device, of which the
|
||||
/// /dev/loopN path will be returned. This is inherently racy because of the
|
||||
/// delay between this and calling assign, but since assigning is atomic it
|
||||
/// does not matter much and will simply cause assign to fail.
|
||||
pub fn get_or_create_free_dev() -> Result<String, Error> {
|
||||
let ctrl_file = File::open(LOOP_CONTROL)?;
|
||||
let free_num = unsafe { ioctl_ctrl_get_free(ctrl_file.as_raw_fd())? };
|
||||
let loop_file_path = format!("{}{}", LOOP_NAME, free_num);
|
||||
Ok(loop_file_path)
|
||||
}
|
||||
|
||||
fn assign_dev(fd: RawFd, backing_fd: RawFd) -> Result<(), Error> {
|
||||
unsafe { ioctl_set_fd(fd, backing_fd)?; }
|
||||
|
||||
// set required read-only flag and partscan for convenience
|
||||
let mut info: LoopInfo64 = unsafe { std::mem::zeroed() };
|
||||
info.lo_flags = LO_FLAGS_READ_ONLY | LO_FLAGS_PARTSCAN;
|
||||
unsafe { ioctl_set_status64(fd, &info)?; }
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Open the next available /dev/loopN file and assign the given path to
|
||||
/// it as it's backing file in read-only mode.
|
||||
pub fn assign<P: AsRef<Path>>(loop_dev: P, backing: P) -> Result<(), Error> {
|
||||
let loop_file = File::open(loop_dev)?;
|
||||
let backing_file = OpenOptions::new()
|
||||
.read(true)
|
||||
.open(backing)?;
|
||||
assign_dev(loop_file.as_raw_fd(), backing_file.as_raw_fd())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Unassign any file descriptors currently attached to the given
|
||||
/// /dev/loopN device.
|
||||
pub fn unassign<P: AsRef<Path>>(path: P) -> Result<(), Error> {
|
||||
let loop_file = File::open(path)?;
|
||||
unsafe { ioctl_clr_fd(loop_file.as_raw_fd())?; }
|
||||
Ok(())
|
||||
}
|
@ -30,20 +30,17 @@ pub mod config;
|
||||
pub mod cpio;
|
||||
pub mod daemon;
|
||||
pub mod disks;
|
||||
pub mod fuse_loop;
|
||||
|
||||
mod memcom;
|
||||
pub use memcom::Memcom;
|
||||
|
||||
pub mod logrotate;
|
||||
pub mod loopdev;
|
||||
pub mod serde_filter;
|
||||
pub mod statistics;
|
||||
pub mod subscription;
|
||||
pub mod systemd;
|
||||
pub mod ticket;
|
||||
pub mod sgutils2;
|
||||
pub mod paperkey;
|
||||
|
||||
pub mod parallel_handler;
|
||||
pub use parallel_handler::ParallelHandler;
|
||||
@ -54,16 +51,6 @@ pub use file_logger::{FileLogger, FileLogOptions};
|
||||
pub use pbs_tools::broadcast_future::{BroadcastData, BroadcastFuture};
|
||||
pub use pbs_tools::ops::ControlFlow;
|
||||
|
||||
/// The `BufferedRead` trait provides a single function
|
||||
/// `buffered_read`. It returns a reference to an internal buffer. The
|
||||
/// purpose of this traid is to avoid unnecessary data copies.
|
||||
pub trait BufferedRead {
|
||||
/// This functions tries to fill the internal buffers, then
|
||||
/// returns a reference to the available data. It returns an empty
|
||||
/// buffer if `offset` points to the end of the file.
|
||||
fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error>;
|
||||
}
|
||||
|
||||
/// Shortcut for md5 sums.
|
||||
pub fn md5sum(data: &[u8]) -> Result<DigestBytes, Error> {
|
||||
hash(MessageDigest::md5(), data).map_err(Error::from)
|
||||
@ -174,13 +161,6 @@ pub fn fail_on_shutdown() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// safe wrapper for `nix::unistd::pipe2` defaulting to `O_CLOEXEC` and guarding the file
|
||||
/// descriptors.
|
||||
pub fn pipe() -> Result<(Fd, Fd), Error> {
|
||||
let (pin, pout) = nix::unistd::pipe2(nix::fcntl::OFlag::O_CLOEXEC)?;
|
||||
Ok((Fd(pin), Fd(pout)))
|
||||
}
|
||||
|
||||
/// safe wrapper for `nix::sys::socket::socketpair` defaulting to `O_CLOEXEC` and guarding the file
|
||||
/// descriptors.
|
||||
pub fn socketpair() -> Result<(Fd, Fd), Error> {
|
||||
|
@ -1,253 +0,0 @@
|
||||
use std::io::Write;
|
||||
use std::process::{Command, Stdio};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
|
||||
use crate::backup::KeyConfig;
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Paperkey output format
|
||||
pub enum PaperkeyFormat {
|
||||
/// Format as Utf8 text. Includes QR codes as ascii-art.
|
||||
Text,
|
||||
/// Format as Html. Includes QR codes as SVG images.
|
||||
Html,
|
||||
}
|
||||
|
||||
/// Generate a paper key (html or utf8 text)
|
||||
///
|
||||
/// This function takes an encryption key (either RSA private key
|
||||
/// text, or `KeyConfig` json), and generates a printable text or html
|
||||
/// page, including a scanable QR code to recover the key.
|
||||
pub fn generate_paper_key<W: Write>(
|
||||
output: W,
|
||||
data: &str,
|
||||
subject: Option<String>,
|
||||
output_format: Option<PaperkeyFormat>,
|
||||
) -> Result<(), Error> {
|
||||
let (data, is_master_key) = if data.starts_with("-----BEGIN ENCRYPTED PRIVATE KEY-----\n")
|
||||
|| data.starts_with("-----BEGIN RSA PRIVATE KEY-----\n")
|
||||
{
|
||||
let data = data.trim_end();
|
||||
if !(data.ends_with("\n-----END ENCRYPTED PRIVATE KEY-----")
|
||||
|| data.ends_with("\n-----END RSA PRIVATE KEY-----"))
|
||||
{
|
||||
bail!("unexpected key format");
|
||||
}
|
||||
|
||||
let lines: Vec<String> = data
|
||||
.lines()
|
||||
.map(|s| s.trim_end())
|
||||
.filter(|s| !s.is_empty())
|
||||
.map(String::from)
|
||||
.collect();
|
||||
|
||||
if lines.len() < 20 {
|
||||
bail!("unexpected key format");
|
||||
}
|
||||
|
||||
(lines, true)
|
||||
} else {
|
||||
match serde_json::from_str::<KeyConfig>(&data) {
|
||||
Ok(key_config) => {
|
||||
let lines = serde_json::to_string_pretty(&key_config)?
|
||||
.lines()
|
||||
.map(String::from)
|
||||
.collect();
|
||||
|
||||
(lines, false)
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("Couldn't parse data as KeyConfig - {}", err);
|
||||
bail!("Neither a PEM-formatted private key, nor a PBS key file.");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let format = output_format.unwrap_or(PaperkeyFormat::Html);
|
||||
|
||||
match format {
|
||||
PaperkeyFormat::Html => paperkey_html(output, &data, subject, is_master_key),
|
||||
PaperkeyFormat::Text => paperkey_text(output, &data, subject, is_master_key),
|
||||
}
|
||||
}
|
||||
|
||||
fn paperkey_html<W: Write>(
|
||||
mut output: W,
|
||||
lines: &[String],
|
||||
subject: Option<String>,
|
||||
is_master: bool,
|
||||
) -> Result<(), Error> {
|
||||
let img_size_pt = 500;
|
||||
|
||||
writeln!(output, "<!DOCTYPE html>")?;
|
||||
writeln!(output, "<html lang=\"en\">")?;
|
||||
writeln!(output, "<head>")?;
|
||||
writeln!(output, "<meta charset=\"utf-8\">")?;
|
||||
writeln!(
|
||||
output,
|
||||
"<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">"
|
||||
)?;
|
||||
writeln!(output, "<title>Proxmox Backup Paperkey</title>")?;
|
||||
writeln!(output, "<style type=\"text/css\">")?;
|
||||
|
||||
writeln!(output, " p {{")?;
|
||||
writeln!(output, " font-size: 12pt;")?;
|
||||
writeln!(output, " font-family: monospace;")?;
|
||||
writeln!(output, " white-space: pre-wrap;")?;
|
||||
writeln!(output, " line-break: anywhere;")?;
|
||||
writeln!(output, " }}")?;
|
||||
|
||||
writeln!(output, "</style>")?;
|
||||
|
||||
writeln!(output, "</head>")?;
|
||||
|
||||
writeln!(output, "<body>")?;
|
||||
|
||||
if let Some(subject) = subject {
|
||||
writeln!(output, "<p>Subject: {}</p>", subject)?;
|
||||
}
|
||||
|
||||
if is_master {
|
||||
const BLOCK_SIZE: usize = 20;
|
||||
|
||||
for (block_nr, block) in lines.chunks(BLOCK_SIZE).enumerate() {
|
||||
writeln!(
|
||||
output,
|
||||
"<div style=\"page-break-inside: avoid;page-break-after: always\">"
|
||||
)?;
|
||||
writeln!(output, "<p>")?;
|
||||
|
||||
for (i, line) in block.iter().enumerate() {
|
||||
writeln!(output, "{:02}: {}", i + block_nr * BLOCK_SIZE, line)?;
|
||||
}
|
||||
|
||||
writeln!(output, "</p>")?;
|
||||
|
||||
let qr_code = generate_qr_code("svg", block)?;
|
||||
let qr_code = base64::encode_config(&qr_code, base64::STANDARD_NO_PAD);
|
||||
|
||||
writeln!(output, "<center>")?;
|
||||
writeln!(output, "<img")?;
|
||||
writeln!(
|
||||
output,
|
||||
"width=\"{}pt\" height=\"{}pt\"",
|
||||
img_size_pt, img_size_pt
|
||||
)?;
|
||||
writeln!(output, "src=\"data:image/svg+xml;base64,{}\"/>", qr_code)?;
|
||||
writeln!(output, "</center>")?;
|
||||
writeln!(output, "</div>")?;
|
||||
}
|
||||
|
||||
writeln!(output, "</body>")?;
|
||||
writeln!(output, "</html>")?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
writeln!(output, "<div style=\"page-break-inside: avoid\">")?;
|
||||
|
||||
writeln!(output, "<p>")?;
|
||||
|
||||
writeln!(output, "-----BEGIN PROXMOX BACKUP KEY-----")?;
|
||||
|
||||
for line in lines {
|
||||
writeln!(output, "{}", line)?;
|
||||
}
|
||||
|
||||
writeln!(output, "-----END PROXMOX BACKUP KEY-----")?;
|
||||
|
||||
writeln!(output, "</p>")?;
|
||||
|
||||
let qr_code = generate_qr_code("svg", lines)?;
|
||||
let qr_code = base64::encode_config(&qr_code, base64::STANDARD_NO_PAD);
|
||||
|
||||
writeln!(output, "<center>")?;
|
||||
writeln!(output, "<img")?;
|
||||
writeln!(
|
||||
output,
|
||||
"width=\"{}pt\" height=\"{}pt\"",
|
||||
img_size_pt, img_size_pt
|
||||
)?;
|
||||
writeln!(output, "src=\"data:image/svg+xml;base64,{}\"/>", qr_code)?;
|
||||
writeln!(output, "</center>")?;
|
||||
|
||||
writeln!(output, "</div>")?;
|
||||
|
||||
writeln!(output, "</body>")?;
|
||||
writeln!(output, "</html>")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn paperkey_text<W: Write>(
|
||||
mut output: W,
|
||||
lines: &[String],
|
||||
subject: Option<String>,
|
||||
is_private: bool,
|
||||
) -> Result<(), Error> {
|
||||
if let Some(subject) = subject {
|
||||
writeln!(output, "Subject: {}\n", subject)?;
|
||||
}
|
||||
|
||||
if is_private {
|
||||
const BLOCK_SIZE: usize = 5;
|
||||
|
||||
for (block_nr, block) in lines.chunks(BLOCK_SIZE).enumerate() {
|
||||
for (i, line) in block.iter().enumerate() {
|
||||
writeln!(output, "{:-2}: {}", i + block_nr * BLOCK_SIZE, line)?;
|
||||
}
|
||||
let qr_code = generate_qr_code("utf8i", block)?;
|
||||
let qr_code = String::from_utf8(qr_code)
|
||||
.map_err(|_| format_err!("Failed to read qr code (got non-utf8 data)"))?;
|
||||
writeln!(output, "{}", qr_code)?;
|
||||
writeln!(output, "{}", char::from(12u8))?; // page break
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
writeln!(output, "-----BEGIN PROXMOX BACKUP KEY-----")?;
|
||||
for line in lines {
|
||||
writeln!(output, "{}", line)?;
|
||||
}
|
||||
writeln!(output, "-----END PROXMOX BACKUP KEY-----")?;
|
||||
|
||||
let qr_code = generate_qr_code("utf8i", &lines)?;
|
||||
let qr_code = String::from_utf8(qr_code)
|
||||
.map_err(|_| format_err!("Failed to read qr code (got non-utf8 data)"))?;
|
||||
|
||||
writeln!(output, "{}", qr_code)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn generate_qr_code(output_type: &str, lines: &[String]) -> Result<Vec<u8>, Error> {
|
||||
let mut child = Command::new("qrencode")
|
||||
.args(&["-t", output_type, "-m0", "-s1", "-lm", "--output", "-"])
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.spawn()?;
|
||||
|
||||
{
|
||||
let stdin = child
|
||||
.stdin
|
||||
.as_mut()
|
||||
.ok_or_else(|| format_err!("Failed to open stdin"))?;
|
||||
let data = lines.join("\n");
|
||||
stdin
|
||||
.write_all(data.as_bytes())
|
||||
.map_err(|_| format_err!("Failed to write to stdin"))?;
|
||||
}
|
||||
|
||||
let output = child
|
||||
.wait_with_output()
|
||||
.map_err(|_| format_err!("Failed to read stdout"))?;
|
||||
|
||||
let output = crate::tools::command_output(output, None)?;
|
||||
|
||||
Ok(output)
|
||||
}
|
Reference in New Issue
Block a user