move remaining client tools to pbs-tools/datastore

pbs-datastore now ended up depending on tokio after all, but
that's fine for now

for the fuse code I added pbs-fuse-loop (has the old
fuse_loop and its 'loopdev' module)
ultimately only binaries should depend on this to avoid the
library link

the only thins remaining to move out the client binary are
the api method return types, those will need to be moved to
pbs-api-types...

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
This commit is contained in:
Wolfgang Bumiller 2021-07-21 14:12:22 +02:00
parent bbc71e3b02
commit eb5e0ae65a
25 changed files with 353 additions and 126 deletions

View File

@ -23,6 +23,7 @@ members = [
"pbs-buildcfg",
"pbs-client",
"pbs-datastore",
"pbs-fuse-loop",
"pbs-runtime",
"pbs-systemd",
"pbs-tools",
@ -90,7 +91,6 @@ pxar = { version = "0.10.1", features = [ "tokio-io" ] }
proxmox = { version = "0.12.0", features = [ "sortable-macro", "api-macro", "cli", "router", "tfa" ] }
proxmox-acme-rs = "0.2.1"
proxmox-apt = "0.5.1"
proxmox-fuse = "0.1.1"
proxmox-http = { version = "0.3.0", features = [ "client", "http-helpers", "websocket" ] }
proxmox-openid = "0.6.1"
@ -98,6 +98,7 @@ pbs-api-types = { path = "pbs-api-types" }
pbs-buildcfg = { path = "pbs-buildcfg" }
pbs-client = { path = "pbs-client" }
pbs-datastore = { path = "pbs-datastore" }
pbs-fuse-loop = { path = "pbs-fuse-loop" }
pbs-runtime = { path = "pbs-runtime" }
pbs-systemd = { path = "pbs-systemd" }
pbs-tools = { path = "pbs-tools" }

View File

@ -35,6 +35,7 @@ SUBCRATES := \
pbs-buildcfg \
pbs-client \
pbs-datastore \
pbs-fuse-loop \
pbs-runtime \
pbs-systemd \
pbs-tools \

View File

@ -8,8 +8,9 @@ description = "general API type helpers for PBS"
[dependencies]
anyhow = "1.0"
lazy_static = "1.4"
nix = "0.19.1"
libc = "0.2"
nix = "0.19.1"
openssl = "0.10"
regex = "1.2"
serde = { version = "1.0", features = ["derive"] }

View File

@ -525,3 +525,42 @@ pub struct Counts {
/// The counts for other backup types
pub other: Option<TypeCounts>,
}
pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(1)
.max_length(64)
.schema();
#[api]
#[derive(Deserialize, Serialize)]
/// RSA public key information
pub struct RsaPubKeyInfo {
/// Path to key (if stored in a file)
#[serde(skip_serializing_if="Option::is_none")]
pub path: Option<String>,
/// RSA exponent
pub exponent: String,
/// Hex-encoded RSA modulus
pub modulus: String,
/// Key (modulus) length in bits
pub length: usize,
}
impl std::convert::TryFrom<openssl::rsa::Rsa<openssl::pkey::Public>> for RsaPubKeyInfo {
type Error = anyhow::Error;
fn try_from(value: openssl::rsa::Rsa<openssl::pkey::Public>) -> Result<Self, Self::Error> {
let modulus = value.n().to_hex_str()?.to_string();
let exponent = value.e().to_dec_str()?.to_string();
let length = value.size() as usize * 8;
Ok(Self {
path: None,
exponent,
modulus,
length,
})
}
}

View File

@ -101,7 +101,7 @@ impl<S: ReadChunk> BufferedDynamicReader<S> {
}
}
impl<S: ReadChunk> crate::tools::BufferedRead for BufferedDynamicReader<S> {
impl<S: ReadChunk> pbs_tools::io::BufferedRead for BufferedDynamicReader<S> {
fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error> {
if offset == self.archive_size {
return Ok(&self.read_buffer[0..0]);
@ -141,7 +141,7 @@ impl<S: ReadChunk> crate::tools::BufferedRead for BufferedDynamicReader<S> {
impl<S: ReadChunk> std::io::Read for BufferedDynamicReader<S> {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
use crate::tools::BufferedRead;
use pbs_tools::io::BufferedRead;
use std::io::{Error, ErrorKind};
let data = match self.buffered_read(self.read_offset) {

View File

@ -11,6 +11,7 @@ use pbs_tools::cert::CertInfo;
use pbs_tools::auth::private_auth_key;
pub mod catalog_shell;
pub mod dynamic_index;
pub mod pxar;
pub mod tools;

View File

@ -7,6 +7,7 @@ description = "low level pbs data storage access"
[dependencies]
anyhow = "1.0"
base64 = "0.12"
crc32fast = "1"
endian_trait = { version = "0.6", features = [ "arrays" ] }
libc = "0.2"
@ -15,10 +16,11 @@ nix = "0.19.1"
openssl = "0.10"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
tokio = { version = "1.6", features = [] }
zstd = { version = "0.6", features = [ "bindgen" ] }
pathpatterns = "0.1.2"
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
pxar = "0.10.1"
proxmox = { version = "0.12.0", default-features = false, features = [ "api-macro" ] }

View File

@ -1,15 +1,20 @@
use std::fs::File;
use std::io::{BufWriter, Seek, SeekFrom, Write};
use std::ops::Range;
use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::Context;
use anyhow::{bail, format_err, Error};
use proxmox::tools::io::ReadExt;
use proxmox::tools::uuid::Uuid;
use proxmox::tools::mmap::Mmap;
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
use pbs_tools::lru_cache::LruCache;
use pbs_tools::process_locker::ProcessLockSharedGuard;
use crate::Chunker;
@ -18,6 +23,7 @@ use crate::chunk_store::ChunkStore;
use crate::data_blob::{DataBlob, DataChunkBuilder};
use crate::file_formats;
use crate::index::{IndexFile, ChunkReadInfo};
use crate::read_chunk::ReadChunk;
/// Header format definition for dynamic index files (`.dixd`)
#[repr(C)]
@ -506,3 +512,219 @@ impl Write for DynamicChunkWriter {
))
}
}
struct CachedChunk {
range: Range<u64>,
data: Vec<u8>,
}
impl CachedChunk {
/// Perform sanity checks on the range and data size:
pub fn new(range: Range<u64>, data: Vec<u8>) -> Result<Self, Error> {
if data.len() as u64 != range.end - range.start {
bail!(
"read chunk with wrong size ({} != {})",
data.len(),
range.end - range.start,
);
}
Ok(Self { range, data })
}
}
pub struct BufferedDynamicReader<S> {
store: S,
index: DynamicIndexReader,
archive_size: u64,
read_buffer: Vec<u8>,
buffered_chunk_idx: usize,
buffered_chunk_start: u64,
read_offset: u64,
lru_cache: LruCache<usize, CachedChunk>,
}
struct ChunkCacher<'a, S> {
store: &'a mut S,
index: &'a DynamicIndexReader,
}
impl<'a, S: ReadChunk> pbs_tools::lru_cache::Cacher<usize, CachedChunk> for ChunkCacher<'a, S> {
fn fetch(&mut self, index: usize) -> Result<Option<CachedChunk>, Error> {
let info = match self.index.chunk_info(index) {
Some(info) => info,
None => bail!("chunk index out of range"),
};
let range = info.range;
let data = self.store.read_chunk(&info.digest)?;
CachedChunk::new(range, data).map(Some)
}
}
impl<S: ReadChunk> BufferedDynamicReader<S> {
pub fn new(index: DynamicIndexReader, store: S) -> Self {
let archive_size = index.index_bytes();
Self {
store,
index,
archive_size,
read_buffer: Vec::with_capacity(1024 * 1024),
buffered_chunk_idx: 0,
buffered_chunk_start: 0,
read_offset: 0,
lru_cache: LruCache::new(32),
}
}
pub fn archive_size(&self) -> u64 {
self.archive_size
}
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
//let (start, end, data) = self.lru_cache.access(
let cached_chunk = self.lru_cache.access(
idx,
&mut ChunkCacher {
store: &mut self.store,
index: &self.index,
},
)?.ok_or_else(|| format_err!("chunk not found by cacher"))?;
// fixme: avoid copy
self.read_buffer.clear();
self.read_buffer.extend_from_slice(&cached_chunk.data);
self.buffered_chunk_idx = idx;
self.buffered_chunk_start = cached_chunk.range.start;
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
Ok(())
}
}
impl<S: ReadChunk> pbs_tools::io::BufferedRead for BufferedDynamicReader<S> {
fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error> {
if offset == self.archive_size {
return Ok(&self.read_buffer[0..0]);
}
let buffer_len = self.read_buffer.len();
let index = &self.index;
// optimization for sequential read
if buffer_len > 0
&& ((self.buffered_chunk_idx + 1) < index.index().len())
&& (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
{
let next_idx = self.buffered_chunk_idx + 1;
let next_end = index.chunk_end(next_idx);
if offset < next_end {
self.buffer_chunk(next_idx)?;
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
return Ok(&self.read_buffer[buffer_offset..]);
}
}
if (buffer_len == 0)
|| (offset < self.buffered_chunk_start)
|| (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
{
let end_idx = index.index().len() - 1;
let end = index.chunk_end(end_idx);
let idx = index.binary_search(0, 0, end_idx, end, offset)?;
self.buffer_chunk(idx)?;
}
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
Ok(&self.read_buffer[buffer_offset..])
}
}
impl<S: ReadChunk> std::io::Read for BufferedDynamicReader<S> {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
use pbs_tools::io::BufferedRead;
use std::io::{Error, ErrorKind};
let data = match self.buffered_read(self.read_offset) {
Ok(v) => v,
Err(err) => return Err(Error::new(ErrorKind::Other, err.to_string())),
};
let n = if data.len() > buf.len() {
buf.len()
} else {
data.len()
};
buf[0..n].copy_from_slice(&data[0..n]);
self.read_offset += n as u64;
Ok(n)
}
}
impl<S: ReadChunk> std::io::Seek for BufferedDynamicReader<S> {
fn seek(&mut self, pos: SeekFrom) -> Result<u64, std::io::Error> {
let new_offset = match pos {
SeekFrom::Start(start_offset) => start_offset as i64,
SeekFrom::End(end_offset) => (self.archive_size as i64) + end_offset,
SeekFrom::Current(offset) => (self.read_offset as i64) + offset,
};
use std::io::{Error, ErrorKind};
if (new_offset < 0) || (new_offset > (self.archive_size as i64)) {
return Err(Error::new(
ErrorKind::Other,
format!(
"seek is out of range {} ([0..{}])",
new_offset, self.archive_size
),
));
}
self.read_offset = new_offset as u64;
Ok(self.read_offset)
}
}
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
/// async use!
///
/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
/// so that we can properly access it from multiple threads simultaneously while not issuing
/// duplicate simultaneous reads over http.
#[derive(Clone)]
pub struct LocalDynamicReadAt<R: ReadChunk> {
inner: Arc<Mutex<BufferedDynamicReader<R>>>,
}
impl<R: ReadChunk> LocalDynamicReadAt<R> {
pub fn new(inner: BufferedDynamicReader<R>) -> Self {
Self {
inner: Arc::new(Mutex::new(inner)),
}
}
}
impl<R: ReadChunk> ReadAt for LocalDynamicReadAt<R> {
fn start_read_at<'a>(
self: Pin<&'a Self>,
_cx: &mut Context,
buf: &'a mut [u8],
offset: u64,
) -> MaybeReady<std::io::Result<usize>, ReadAtOperation<'a>> {
use std::io::Read;
MaybeReady::Ready(tokio::task::block_in_place(move || {
let mut reader = self.inner.lock().unwrap();
reader.seek(SeekFrom::Start(offset))?;
Ok(reader.read(buf)?)
}))
}
fn poll_complete<'a>(
self: Pin<&'a Self>,
_op: ReadAtOperation<'a>,
) -> MaybeReady<std::io::Result<usize>, ReadAtOperation<'a>> {
panic!("LocalDynamicReadAt::start_read_at returned Pending");
}
}

View File

@ -195,6 +195,7 @@ pub mod file_formats;
pub mod index;
pub mod key_derivation;
pub mod manifest;
pub mod paperkey;
pub mod prune;
pub mod read_chunk;
pub mod store_progress;

View File

@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize};
use proxmox::api::api;
use crate::backup::KeyConfig;
use crate::KeyConfig;
#[api()]
#[derive(Debug, Serialize, Deserialize)]
@ -247,7 +247,7 @@ fn generate_qr_code(output_type: &str, lines: &[String]) -> Result<Vec<u8>, Erro
.wait_with_output()
.map_err(|_| format_err!("Failed to read stdout"))?;
let output = crate::tools::command_output(output, None)?;
let output = pbs_tools::command_output(output, None)?;
Ok(output)
}

20
pbs-fuse-loop/Cargo.toml Normal file
View File

@ -0,0 +1,20 @@
[package]
name = "pbs-fuse-loop"
version = "0.1.0"
authors = ["Proxmox Support Team <support@proxmox.com>"]
edition = "2018"
description = "fuse and loop device helpers"
[dependencies]
anyhow = "1.0"
futures = "0.3"
lazy_static = "1.4"
libc = "0.2"
nix = "0.19.1"
regex = "1.2"
tokio = { version = "1.6", features = [] }
proxmox = "0.12.0"
proxmox-fuse = "0.1.1"
pbs-tools = { path = "../pbs-tools" }

View File

@ -295,7 +295,7 @@ fn emerg_cleanup (loopdev: Option<&str>, mut backing_file: PathBuf) {
let mut command = std::process::Command::new("fusermount");
command.arg("-u");
command.arg(&backing_file);
let _ = crate::tools::run_command(command, None);
let _ = pbs_tools::run_command(command, None);
let _ = remove_file(&backing_file);
backing_file.set_extension("pid");

5
pbs-fuse-loop/src/lib.rs Normal file
View File

@ -0,0 +1,5 @@
pub mod loopdev;
mod fuse_loop;
pub use fuse_loop::*;

View File

@ -1,10 +1,11 @@
//! Helpers to work with /dev/loop* devices
use anyhow::Error;
use std::fs::{File, OpenOptions};
use std::path::Path;
use std::os::unix::io::{RawFd, AsRawFd};
use anyhow::Error;
const LOOP_CONTROL: &str = "/dev/loop-control";
const LOOP_NAME: &str = "/dev/loop";

22
pbs-tools/src/io.rs Normal file
View File

@ -0,0 +1,22 @@
//! I/O utilities.
use anyhow::Error;
use proxmox::tools::fd::Fd;
/// The `BufferedRead` trait provides a single function
/// `buffered_read`. It returns a reference to an internal buffer. The
/// purpose of this traid is to avoid unnecessary data copies.
pub trait BufferedRead {
/// This functions tries to fill the internal buffers, then
/// returns a reference to the available data. It returns an empty
/// buffer if `offset` points to the end of the file.
fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error>;
}
/// safe wrapper for `nix::unistd::pipe2` defaulting to `O_CLOEXEC` and guarding the file
/// descriptors.
pub fn pipe() -> Result<(Fd, Fd), Error> {
let (pin, pout) = nix::unistd::pipe2(nix::fcntl::OFlag::O_CLOEXEC)?;
Ok((Fd(pin), Fd(pout)))
}

View File

@ -7,6 +7,7 @@ pub mod cert;
pub mod compression;
pub mod format;
pub mod fs;
pub mod io;
pub mod json;
pub mod lru_cache;
pub mod nom;

View File

@ -933,44 +933,6 @@ pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
.schema();
pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(1)
.max_length(64)
.schema();
#[api]
#[derive(Deserialize, Serialize)]
/// RSA public key information
pub struct RsaPubKeyInfo {
/// Path to key (if stored in a file)
#[serde(skip_serializing_if="Option::is_none")]
pub path: Option<String>,
/// RSA exponent
pub exponent: String,
/// Hex-encoded RSA modulus
pub modulus: String,
/// Key (modulus) length in bits
pub length: usize,
}
impl std::convert::TryFrom<openssl::rsa::Rsa<openssl::pkey::Public>> for RsaPubKeyInfo {
type Error = anyhow::Error;
fn try_from(value: openssl::rsa::Rsa<openssl::pkey::Public>) -> Result<Self, Self::Error> {
let modulus = value.n().to_hex_str()?.to_string();
let exponent = value.e().to_dec_str()?.to_string();
let length = value.size() as usize * 8;
Ok(Self {
path: None,
exponent,
modulus,
length,
})
}
}
#[api(
properties: {
"next-run": {

View File

@ -85,10 +85,6 @@ pub use pbs_datastore::read_chunk::*;
mod read_chunk;
pub use read_chunk::*;
// Split
mod dynamic_index;
pub use dynamic_index::*;
mod datastore;
pub use datastore::*;

View File

@ -64,7 +64,7 @@ use pbs_datastore::{CATALOG_NAME, CryptConfig, KeyConfig, decrypt_key, rsa_encry
use pbs_datastore::backup_info::{BackupDir, BackupGroup};
use pbs_datastore::catalog::{BackupCatalogWriter, CatalogReader, CatalogWriter};
use pbs_datastore::chunk_store::verify_chunk_size;
use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader};
use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{
@ -76,10 +76,6 @@ use pbs_tools::sync::StdChannelWriter;
use pbs_tools::tokio::TokioWriterAdapter;
use pbs_tools::json;
use proxmox_backup::backup::{
BufferedDynamicReader,
};
mod proxmox_backup_client;
use proxmox_backup_client::*;

View File

@ -13,19 +13,14 @@ use proxmox::api::router::ReturnType;
use proxmox::sys::linux::tty;
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
use pbs_datastore::{KeyInfo, Kdf};
use pbs_api_types::{RsaPubKeyInfo, PASSWORD_HINT_SCHEMA};
use pbs_datastore::{KeyConfig, KeyInfo, Kdf, rsa_decrypt_key_config};
use pbs_datastore::paperkey::{generate_paper_key, PaperkeyFormat};
use pbs_client::tools::key_source::{
find_default_encryption_key, find_default_master_pubkey, get_encryption_key_password,
place_default_encryption_key, place_default_master_pubkey,
};
use proxmox_backup::{
api2::types::{RsaPubKeyInfo, PASSWORD_HINT_SCHEMA},
backup::{rsa_decrypt_key_config, KeyConfig},
tools::paperkey::{generate_paper_key, PaperkeyFormat},
};
#[api(
input: {
properties: {

View File

@ -17,20 +17,14 @@ use proxmox::{sortable, identity};
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment, schema::*, cli::*};
use proxmox::tools::fd::Fd;
use pbs_datastore::{BackupDir, BackupGroup, CryptConfig, load_and_decrypt_key};
use pbs_datastore::index::IndexFile;
use pbs_datastore::dynamic_index::BufferedDynamicReader;
use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupReader, RemoteChunkReader};
use pbs_tools::json::required_string_param;
use proxmox_backup::tools;
use proxmox_backup::backup::{
load_and_decrypt_key,
CryptConfig,
IndexFile,
BackupDir,
BackupGroup,
BufferedDynamicReader,
CachedChunkReader,
};
use proxmox_backup::backup::CachedChunkReader;
use crate::{
REPO_URL_SCHEMA,
@ -120,10 +114,10 @@ pub fn unmap_cmd_def() -> CliCommand {
fn complete_mapping_names<S: BuildHasher>(_arg: &str, _param: &HashMap<String, String, S>)
-> Vec<String>
{
match tools::fuse_loop::find_all_mappings() {
match pbs_fuse_loop::find_all_mappings() {
Ok(mappings) => mappings
.filter_map(|(name, _)| {
tools::systemd::unescape_unit(&name).ok()
pbs_systemd::unescape_unit(&name).ok()
}).collect(),
Err(_) => Vec::new()
}
@ -144,7 +138,7 @@ fn mount(
// Process should be daemonized.
// Make sure to fork before the async runtime is instantiated to avoid troubles.
let (pr, pw) = proxmox_backup::tools::pipe()?;
let (pr, pw) = pbs_tools::io::pipe()?;
match unsafe { fork() } {
Ok(ForkResult::Parent { .. }) => {
drop(pw);
@ -284,9 +278,9 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
let reader = CachedChunkReader::new(chunk_reader, index, 8).seekable();
let name = &format!("{}:{}/{}", repo.to_string(), path, archive_name);
let name_escaped = tools::systemd::escape_unit(name, false);
let name_escaped = pbs_systemd::escape_unit(name, false);
let mut session = tools::fuse_loop::FuseLoopSession::map_loop(size, reader, &name_escaped, options).await?;
let mut session = pbs_fuse_loop::FuseLoopSession::map_loop(size, reader, &name_escaped, options).await?;
let loopdev = session.loopdev_path.clone();
let (st_send, st_recv) = futures::channel::mpsc::channel(1);
@ -343,10 +337,10 @@ fn unmap(
let mut name = match param["name"].as_str() {
Some(name) => name.to_owned(),
None => {
tools::fuse_loop::cleanup_unused_run_files(None);
pbs_fuse_loop::cleanup_unused_run_files(None);
let mut any = false;
for (backing, loopdev) in tools::fuse_loop::find_all_mappings()? {
let name = tools::systemd::unescape_unit(&backing)?;
for (backing, loopdev) in pbs_fuse_loop::find_all_mappings()? {
let name = pbs_systemd::unescape_unit(&backing)?;
println!("{}:\t{}", loopdev.unwrap_or_else(|| "(unmapped)".to_string()), name);
any = true;
}
@ -363,10 +357,10 @@ fn unmap(
}
if name.starts_with("/dev/loop") {
tools::fuse_loop::unmap_loopdev(name)?;
pbs_fuse_loop::unmap_loopdev(name)?;
} else {
let name = tools::systemd::escape_unit(&name, false);
tools::fuse_loop::unmap_name(name)?;
let name = pbs_systemd::escape_unit(&name, false);
pbs_fuse_loop::unmap_name(name)?;
}
Ok(Value::Null)

View File

@ -8,20 +8,12 @@ use proxmox::{
tools::fs::file_get_contents,
};
use pbs_api_types::SnapshotListItem;
use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_datastore::{BackupGroup, CryptMode, CryptConfig, decrypt_key};
use pbs_datastore::data_blob::DataBlob;
use pbs_tools::json::required_string_param;
use proxmox_backup::{
api2::types::*,
backup::{
CryptMode,
CryptConfig,
DataBlob,
BackupGroup,
decrypt_key,
}
};
use crate::{
REPO_URL_SCHEMA,
KEYFILE_SCHEMA,

View File

@ -7,7 +7,7 @@ use pbs_client::display_task_log;
use pbs_tools::percent_encoding::percent_encode_component;
use pbs_tools::json::required_string_param;
use proxmox_backup::api2::types::UPID_SCHEMA;
use pbs_api_types::UPID;
use crate::{
REPO_URL_SCHEMA,
@ -87,7 +87,7 @@ async fn task_list(param: Value) -> Result<Value, Error> {
optional: true,
},
upid: {
schema: UPID_SCHEMA,
type: UPID,
},
}
}
@ -113,7 +113,7 @@ async fn task_log(param: Value) -> Result<Value, Error> {
optional: true,
},
upid: {
schema: UPID_SCHEMA,
type: UPID,
},
}
}

View File

@ -12,14 +12,9 @@ use proxmox::{
};
use pbs_datastore::Kdf;
use pbs_datastore::paperkey::{PaperkeyFormat, generate_paper_key};
use proxmox_backup::{
tools::{
paperkey::{
PaperkeyFormat,
generate_paper_key,
},
},
config,
api2::{
self,

View File

@ -30,20 +30,17 @@ pub mod config;
pub mod cpio;
pub mod daemon;
pub mod disks;
pub mod fuse_loop;
mod memcom;
pub use memcom::Memcom;
pub mod logrotate;
pub mod loopdev;
pub mod serde_filter;
pub mod statistics;
pub mod subscription;
pub mod systemd;
pub mod ticket;
pub mod sgutils2;
pub mod paperkey;
pub mod parallel_handler;
pub use parallel_handler::ParallelHandler;
@ -54,16 +51,6 @@ pub use file_logger::{FileLogger, FileLogOptions};
pub use pbs_tools::broadcast_future::{BroadcastData, BroadcastFuture};
pub use pbs_tools::ops::ControlFlow;
/// The `BufferedRead` trait provides a single function
/// `buffered_read`. It returns a reference to an internal buffer. The
/// purpose of this traid is to avoid unnecessary data copies.
pub trait BufferedRead {
/// This functions tries to fill the internal buffers, then
/// returns a reference to the available data. It returns an empty
/// buffer if `offset` points to the end of the file.
fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error>;
}
/// Shortcut for md5 sums.
pub fn md5sum(data: &[u8]) -> Result<DigestBytes, Error> {
hash(MessageDigest::md5(), data).map_err(Error::from)
@ -174,13 +161,6 @@ pub fn fail_on_shutdown() -> Result<(), Error> {
Ok(())
}
/// safe wrapper for `nix::unistd::pipe2` defaulting to `O_CLOEXEC` and guarding the file
/// descriptors.
pub fn pipe() -> Result<(Fd, Fd), Error> {
let (pin, pout) = nix::unistd::pipe2(nix::fcntl::OFlag::O_CLOEXEC)?;
Ok((Fd(pin), Fd(pout)))
}
/// safe wrapper for `nix::sys::socket::socketpair` defaulting to `O_CLOEXEC` and guarding the file
/// descriptors.
pub fn socketpair() -> Result<(Fd, Fd), Error> {