move client to pbs-client subcrate

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
This commit is contained in:
Wolfgang Bumiller
2021-07-19 10:50:18 +02:00
parent 72fbe9ffa5
commit 2b7f8dd5ea
74 changed files with 802 additions and 753 deletions

View File

@ -10,7 +10,11 @@ use proxmox::api::router::SubdirMap;
use proxmox::api::schema::{Schema, StringSchema};
use proxmox::tools::fs::open_file_locked;
use crate::api2::types::*;
use pbs_api_types::{
PASSWORD_FORMAT, PROXMOX_CONFIG_DIGEST_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, Authid,
Tokenname, UserWithTokens, Userid,
};
use crate::config::user;
use crate::config::token_shadow;
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
@ -22,77 +26,16 @@ pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
.max_length(64)
.schema();
#[api(
properties: {
userid: {
type: Userid,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
optional: true,
schema: user::ENABLE_USER_SCHEMA,
},
expire: {
optional: true,
schema: user::EXPIRE_USER_SCHEMA,
},
firstname: {
optional: true,
schema: user::FIRST_NAME_SCHEMA,
},
lastname: {
schema: user::LAST_NAME_SCHEMA,
optional: true,
},
email: {
schema: user::EMAIL_SCHEMA,
optional: true,
},
tokens: {
type: Array,
optional: true,
description: "List of user's API tokens.",
items: {
type: user::ApiToken
},
},
}
)]
#[derive(Serialize,Deserialize)]
/// User properties with added list of ApiTokens
pub struct UserWithTokens {
pub userid: Userid,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub enable: Option<bool>,
#[serde(skip_serializing_if="Option::is_none")]
pub expire: Option<i64>,
#[serde(skip_serializing_if="Option::is_none")]
pub firstname: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub lastname: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub email: Option<String>,
#[serde(skip_serializing_if="Vec::is_empty", default)]
pub tokens: Vec<user::ApiToken>,
}
impl UserWithTokens {
fn new(user: user::User) -> Self {
Self {
userid: user.userid,
comment: user.comment,
enable: user.enable,
expire: user.expire,
firstname: user.firstname,
lastname: user.lastname,
email: user.email,
tokens: Vec::new(),
}
fn new_user_with_tokens(user: user::User) -> UserWithTokens {
UserWithTokens {
userid: user.userid,
comment: user.comment,
enable: user.enable,
expire: user.expire,
firstname: user.firstname,
lastname: user.lastname,
email: user.email,
tokens: Vec::new(),
}
}
@ -165,13 +108,13 @@ pub fn list_users(
});
iter
.map(|user: user::User| {
let mut user = UserWithTokens::new(user);
let mut user = new_user_with_tokens(user);
user.tokens = user_to_tokens.remove(&user.userid).unwrap_or_default();
user
})
.collect()
} else {
iter.map(UserWithTokens::new)
iter.map(new_user_with_tokens)
.collect()
};

View File

@ -26,13 +26,14 @@ use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
use pxar::accessor::aio::Accessor;
use pxar::EntryKind;
use pbs_client::pxar::create_zip;
use crate::api2::types::*;
use crate::api2::node::rrd::create_value_from_rrd;
use crate::api2::helpers;
use crate::backup::*;
use crate::config::datastore;
use crate::config::cached_user_info::CachedUserInfo;
use crate::pxar::create_zip;
use crate::server::{jobstate::Job, WorkerTask};
use crate::tools::{

View File

@ -13,6 +13,7 @@ use proxmox::api::router::SubdirMap;
use proxmox::api::schema::*;
use pbs_tools::fs::lock_dir_noblock_shared;
use pbs_datastore::PROXMOX_BACKUP_PROTOCOL_ID_V1;
use crate::tools;
use crate::server::{WorkerTask, H2Service};

View File

@ -6,8 +6,9 @@ use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox::http_err;
use proxmox::tools::fs::open_file_locked;
use pbs_client::{HttpClient, HttpClientOptions};
use crate::api2::types::*;
use crate::client::{HttpClient, HttpClientOptions};
use crate::config::cached_user_info::CachedUserInfo;
use crate::config::remote;
use crate::config::acl::{PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY};

View File

@ -7,9 +7,10 @@ use futures::{select, future::FutureExt};
use proxmox::api::api;
use proxmox::api::{ApiMethod, Router, RpcEnvironment, Permission};
use pbs_client::{HttpClient, BackupRepository};
use crate::server::{WorkerTask, jobstate::Job, pull::pull_store};
use crate::backup::DataStore;
use crate::client::{HttpClient, BackupRepository};
use crate::api2::types::{
DATASTORE_SCHEMA, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, Authid,
};

View File

@ -28,6 +28,7 @@ use proxmox::{
};
use pbs_tools::fs::lock_dir_noblock_shared;
use pbs_datastore::PROXMOX_BACKUP_READER_PROTOCOL_ID_V1;
use crate::{
api2::{

View File

@ -107,14 +107,6 @@ pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema = StringSchema::new(
.format(&FINGERPRINT_SHA256_FORMAT)
.schema();
pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
"Prevent changes if current configuration file has different \
SHA256 digest. This can be used to prevent concurrent \
modifications."
)
.format(&PVE_CONFIG_DIGEST_FORMAT) .schema();
pub const CHUNK_DIGEST_SCHEMA: Schema = StringSchema::new("Chunk digest (SHA256).")
.format(&CHUNK_DIGEST_FORMAT)
.schema();

File diff suppressed because it is too large Load Diff

View File

@ -5,20 +5,6 @@ use anyhow::{bail, Error};
// Note: .pcat1 => Proxmox Catalog Format version 1
pub const CATALOG_NAME: &str = "catalog.pcat1.didx";
#[macro_export]
macro_rules! PROXMOX_BACKUP_PROTOCOL_ID_V1 {
() => {
"proxmox-backup-protocol-v1"
};
}
#[macro_export]
macro_rules! PROXMOX_BACKUP_READER_PROTOCOL_ID_V1 {
() => {
"proxmox-backup-reader-protocol-v1"
};
}
/// Unix system user used by proxmox-backup-proxy
pub const BACKUP_USER_NAME: &str = "backup";
/// Unix system group used by proxmox-backup-proxy
@ -102,9 +88,5 @@ pub use datastore::*;
mod verify;
pub use verify::*;
// Move to client
mod catalog_shell;
pub use catalog_shell::*;
mod cached_chunk_reader;
pub use cached_chunk_reader::*;

View File

@ -3,7 +3,7 @@ use anyhow::{Error};
use proxmox::api::format::*;
use proxmox::api::cli::*;
use proxmox_backup::backup::catalog_shell_cli;
use pbs_client::catalog_shell::catalog_shell_cli;
fn main() -> Result<(), Error> {

View File

@ -27,47 +27,24 @@ use proxmox::{
};
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
use pbs_datastore::catalog::BackupCatalogWriter;
use pbs_tools::sync::StdChannelWriter;
use pbs_tools::tokio::TokioWriterAdapter;
use proxmox_backup::api2::types::*;
use proxmox_backup::api2::version;
use proxmox_backup::client::*;
use proxmox_backup::backup::{
archive_type,
decrypt_key,
rsa_encrypt_key_config,
verify_chunk_size,
ArchiveType,
AsyncReadChunk,
BackupDir,
BackupGroup,
BackupManifest,
BufferedDynamicReader,
CATALOG_NAME,
CatalogReader,
CatalogWriter,
ChunkStream,
CryptConfig,
CryptMode,
DynamicIndexReader,
ENCRYPTED_KEY_BLOB_NAME,
FixedChunkStream,
FixedIndexReader,
KeyConfig,
IndexFile,
MANIFEST_BLOB_NAME,
Shell,
PruneOptions,
use pbs_api_types::CryptMode;
use pbs_client::{
BACKUP_SOURCE_SCHEMA,
BackupReader,
BackupRepository,
BackupSpecificationType,
BackupStats,
BackupWriter,
HttpClient,
PxarBackupStream,
RemoteChunkReader,
UploadOptions,
delete_ticket_info,
parse_backup_specification,
view_task_result,
};
use proxmox_backup::tools;
mod proxmox_backup_client;
use proxmox_backup_client::*;
pub mod proxmox_client_tools;
use proxmox_client_tools::{
use pbs_client::catalog_shell::Shell;
use pbs_client::tools::{
complete_archive_name, complete_auth_id, complete_backup_group, complete_backup_snapshot,
complete_backup_source, complete_chunk_size, complete_group_or_snapshot,
complete_img_archive_name, complete_pxar_archive_name, complete_repository, connect,
@ -78,6 +55,37 @@ use proxmox_client_tools::{
},
CHUNK_SIZE_SCHEMA, REPO_URL_SCHEMA,
};
use pbs_datastore::CryptConfig;
use pbs_datastore::backup_info::{BackupDir, BackupGroup};
use pbs_datastore::catalog::BackupCatalogWriter;
use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{MANIFEST_BLOB_NAME, ArchiveType, BackupManifest, archive_type};
use pbs_datastore::read_chunk::AsyncReadChunk;
use pbs_tools::sync::StdChannelWriter;
use pbs_tools::tokio::TokioWriterAdapter;
use proxmox_backup::api2::types::*;
use proxmox_backup::api2::version;
use proxmox_backup::backup::{
decrypt_key,
rsa_encrypt_key_config,
verify_chunk_size,
BufferedDynamicReader,
CATALOG_NAME,
CatalogReader,
CatalogWriter,
ChunkStream,
ENCRYPTED_KEY_BLOB_NAME,
FixedChunkStream,
KeyConfig,
PruneOptions,
};
use proxmox_backup::tools;
mod proxmox_backup_client;
use proxmox_backup_client::*;
fn record_repository(repo: &BackupRepository) {
@ -172,7 +180,7 @@ async fn backup_directory<P: AsRef<Path>>(
archive_name: &str,
chunk_size: Option<usize>,
catalog: Arc<Mutex<CatalogWriter<TokioWriterAdapter<StdChannelWriter>>>>,
pxar_create_options: proxmox_backup::pxar::PxarCreateOptions,
pxar_create_options: pbs_client::pxar::PxarCreateOptions,
upload_options: UploadOptions,
) -> Result<BackupStats, Error> {
@ -589,7 +597,7 @@ fn spawn_catalog_upload(
type: Integer,
description: "Max number of entries to hold in memory.",
optional: true,
default: proxmox_backup::pxar::ENCODER_MAX_ENTRIES as isize,
default: pbs_client::pxar::ENCODER_MAX_ENTRIES as isize,
},
"verbose": {
type: Boolean,
@ -633,7 +641,7 @@ async fn create_backup(
let include_dev = param["include-dev"].as_array();
let entries_max = param["entries-max"].as_u64()
.unwrap_or(proxmox_backup::pxar::ENCODER_MAX_ENTRIES as u64);
.unwrap_or(pbs_client::pxar::ENCODER_MAX_ENTRIES as u64);
let empty = Vec::new();
let exclude_args = param["exclude"].as_array().unwrap_or(&empty);
@ -856,7 +864,7 @@ async fn create_backup(
println!("Upload directory '{}' to '{}' as {}", filename, repo, target);
catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
let pxar_options = proxmox_backup::pxar::PxarCreateOptions {
let pxar_options = pbs_client::pxar::PxarCreateOptions {
device_set: devices.clone(),
patterns: pattern_list.clone(),
entries_max: entries_max as usize,
@ -1168,7 +1176,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
let options = proxmox_backup::pxar::PxarExtractOptions {
let options = pbs_client::pxar::PxarExtractOptions {
match_list: &[],
extract_match_default: true,
allow_existing_dirs,
@ -1176,10 +1184,10 @@ async fn restore(param: Value) -> Result<Value, Error> {
};
if let Some(target) = target {
proxmox_backup::pxar::extract_archive(
pbs_client::pxar::extract_archive(
pxar::decoder::Decoder::from_std(reader)?,
Path::new(target),
proxmox_backup::pxar::Flags::DEFAULT,
pbs_client::pxar::Flags::DEFAULT,
|path| {
if verbose {
println!("{:?}", path);
@ -1377,7 +1385,6 @@ async fn status(param: Value) -> Result<Value, Error> {
Ok(Value::Null)
}
use proxmox_backup::client::RemoteChunkReader;
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
/// async use!
///
@ -1424,13 +1431,13 @@ fn main() {
.arg_param(&["backupspec"])
.completion_cb("repository", complete_repository)
.completion_cb("backupspec", complete_backup_source)
.completion_cb("keyfile", tools::complete_file_name)
.completion_cb("master-pubkey-file", tools::complete_file_name)
.completion_cb("keyfile", pbs_tools::fs::complete_file_name)
.completion_cb("master-pubkey-file", pbs_tools::fs::complete_file_name)
.completion_cb("chunk-size", complete_chunk_size);
let benchmark_cmd_def = CliCommand::new(&API_METHOD_BENCHMARK)
.completion_cb("repository", complete_repository)
.completion_cb("keyfile", tools::complete_file_name);
.completion_cb("keyfile", pbs_tools::fs::complete_file_name);
let list_cmd_def = CliCommand::new(&API_METHOD_LIST_BACKUP_GROUPS)
.completion_cb("repository", complete_repository);
@ -1443,7 +1450,7 @@ fn main() {
.completion_cb("repository", complete_repository)
.completion_cb("snapshot", complete_group_or_snapshot)
.completion_cb("archive-name", complete_archive_name)
.completion_cb("target", tools::complete_file_name);
.completion_cb("target", pbs_tools::fs::complete_file_name);
let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE)
.arg_param(&["group"])

View File

@ -6,12 +6,12 @@ use serde_json::{json, Value};
use proxmox::api::{api, cli::*, RpcEnvironment};
use pbs_client::{connect_to_localhost, display_task_log, view_task_result};
use pbs_tools::percent_encoding::percent_encode_component;
use proxmox_backup::tools;
use proxmox_backup::config;
use proxmox_backup::api2::{self, types::* };
use proxmox_backup::client::*;
use proxmox_backup::server::wait_for_local_worker;
mod proxmox_backup_manager;

View File

@ -16,18 +16,10 @@ use proxmox::api::{
use pxar::accessor::aio::Accessor;
use pxar::decoder::aio::Decoder;
use proxmox_backup::api2::{helpers, types::ArchiveEntry};
use proxmox_backup::backup::{
decrypt_key, BackupDir, BufferedDynamicReader, CatalogReader, CryptConfig, CryptMode,
DirEntryAttribute, IndexFile, LocalDynamicReadAt, CATALOG_NAME,
};
use proxmox_backup::client::{BackupReader, RemoteChunkReader};
use proxmox_backup::pxar::{create_zip, extract_sub_dir, extract_sub_dir_seq};
use proxmox_backup::tools;
// use "pub" so rust doesn't complain about "unused" functions in the module
pub mod proxmox_client_tools;
use proxmox_client_tools::{
use pbs_datastore::index::IndexFile;
use pbs_client::{BackupReader, RemoteChunkReader};
use pbs_client::pxar::{create_zip, extract_sub_dir, extract_sub_dir_seq};
use pbs_client::tools::{
complete_group_or_snapshot, complete_repository, connect, extract_repository_from_value,
key_source::{
crypto_parameters_keep_fd, format_key_source, get_encryption_key_password, KEYFD_SCHEMA,
@ -36,6 +28,13 @@ use proxmox_client_tools::{
REPO_URL_SCHEMA,
};
use proxmox_backup::api2::{helpers, types::ArchiveEntry};
use proxmox_backup::backup::{
decrypt_key, BackupDir, BufferedDynamicReader, CatalogReader, CryptConfig, CryptMode,
DirEntryAttribute, LocalDynamicReadAt, CATALOG_NAME,
};
use proxmox_backup::tools;
mod proxmox_file_restore;
use proxmox_file_restore::*;
@ -456,7 +455,7 @@ fn main() {
.arg_param(&["snapshot", "path", "target"])
.completion_cb("repository", complete_repository)
.completion_cb("snapshot", complete_group_or_snapshot)
.completion_cb("target", tools::complete_file_name);
.completion_cb("target", pbs_tools::fs::complete_file_name);
let status_cmd_def = CliCommand::new(&API_METHOD_STATUS);
let stop_cmd_def = CliCommand::new(&API_METHOD_STOP)
@ -476,3 +475,15 @@ fn main() {
Some(|future| pbs_runtime::main(future)),
);
}
/// Returns a runtime dir owned by the current user.
/// Note that XDG_RUNTIME_DIR is not always available, especially for non-login users like
/// "www-data", so we use a custom one in /run/proxmox-backup/<uid> instead.
pub fn get_user_run_dir() -> Result<std::path::PathBuf, Error> {
let uid = nix::unistd::Uid::current();
let mut path: std::path::PathBuf = pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR.into();
path.push(uid.to_string());
tools::create_run_dir()?;
std::fs::create_dir_all(&path)?;
Ok(path)
}

View File

@ -13,8 +13,9 @@ use std::sync::{Arc, Mutex};
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use pbs_client::DEFAULT_VSOCK_PORT;
use proxmox::api::RpcEnvironmentType;
use proxmox_backup::client::DEFAULT_VSOCK_PORT;
use proxmox_backup::server::{rest::*, ApiConfig};
mod proxmox_restore_daemon;

View File

@ -14,6 +14,7 @@ use proxmox::{
},
};
use pbs_client::{connect_to_localhost, view_task_result};
use pbs_tools::format::{
HumanByte,
render_epoch,
@ -21,10 +22,6 @@ use pbs_tools::format::{
};
use proxmox_backup::{
client::{
connect_to_localhost,
view_task_result,
},
api2::{
self,
types::{

View File

@ -18,6 +18,9 @@ use proxmox::api::{
router::ReturnType,
};
use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupRepository, BackupWriter};
use proxmox_backup::backup::{
load_and_decrypt_key,
CryptConfig,
@ -25,8 +28,6 @@ use proxmox_backup::backup::{
DataChunkBuilder,
};
use proxmox_backup::client::*;
use crate::{
KEYFILE_SCHEMA, REPO_URL_SCHEMA,
extract_repository_from_value,
@ -34,8 +35,6 @@ use crate::{
connect,
};
use crate::proxmox_client_tools::key_source::get_encryption_key_password;
#[api()]
#[derive(Copy, Clone, Serialize)]
/// Speed test result

View File

@ -7,9 +7,10 @@ use serde_json::Value;
use proxmox::api::{api, cli::*};
use proxmox_backup::tools;
use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupReader, RemoteChunkReader};
use proxmox_backup::client::*;
use proxmox_backup::tools;
use crate::{
REPO_URL_SCHEMA,
@ -37,8 +38,6 @@ use crate::{
Shell,
};
use crate::proxmox_client_tools::key_source::get_encryption_key_password;
#[api(
input: {
properties: {
@ -219,9 +218,9 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), file_info.chunk_crypt_mode(), most_used);
let reader = BufferedDynamicReader::new(index, chunk_reader);
let archive_size = reader.archive_size();
let reader: proxmox_backup::pxar::fuse::Reader =
let reader: pbs_client::pxar::fuse::Reader =
Arc::new(BufferedDynamicReadAt::new(reader));
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
let decoder = pbs_client::pxar::fuse::Accessor::new(reader, archive_size).await?;
client.download(CATALOG_NAME, &mut tmpfile).await?;
let index = DynamicIndexReader::new(tmpfile)

View File

@ -14,19 +14,18 @@ use proxmox::sys::linux::tty;
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
use pbs_datastore::{KeyInfo, Kdf};
use pbs_client::tools::key_source::{
find_default_encryption_key, find_default_master_pubkey, get_encryption_key_password,
place_default_encryption_key, place_default_master_pubkey,
};
use proxmox_backup::{
api2::types::{RsaPubKeyInfo, PASSWORD_HINT_SCHEMA},
backup::{rsa_decrypt_key_config, KeyConfig},
tools,
tools::paperkey::{generate_paper_key, PaperkeyFormat},
};
use crate::proxmox_client_tools::key_source::{
find_default_encryption_key, find_default_master_pubkey, get_encryption_key_password,
place_default_encryption_key, place_default_master_pubkey,
};
#[api(
input: {
properties: {
@ -458,35 +457,35 @@ fn paper_key(
pub fn cli() -> CliCommandMap {
let key_create_cmd_def = CliCommand::new(&API_METHOD_CREATE)
.arg_param(&["path"])
.completion_cb("path", tools::complete_file_name);
.completion_cb("path", pbs_tools::fs::complete_file_name);
let key_import_with_master_key_cmd_def = CliCommand::new(&API_METHOD_IMPORT_WITH_MASTER_KEY)
.arg_param(&["master-keyfile"])
.completion_cb("master-keyfile", tools::complete_file_name)
.completion_cb("master-keyfile", pbs_tools::fs::complete_file_name)
.arg_param(&["encrypted-keyfile"])
.completion_cb("encrypted-keyfile", tools::complete_file_name)
.completion_cb("encrypted-keyfile", pbs_tools::fs::complete_file_name)
.arg_param(&["path"])
.completion_cb("path", tools::complete_file_name);
.completion_cb("path", pbs_tools::fs::complete_file_name);
let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_CHANGE_PASSPHRASE)
.arg_param(&["path"])
.completion_cb("path", tools::complete_file_name);
.completion_cb("path", pbs_tools::fs::complete_file_name);
let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_CREATE_MASTER_KEY);
let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_IMPORT_MASTER_PUBKEY)
.arg_param(&["path"])
.completion_cb("path", tools::complete_file_name);
.completion_cb("path", pbs_tools::fs::complete_file_name);
let key_show_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_SHOW_MASTER_PUBKEY)
.arg_param(&["path"])
.completion_cb("path", tools::complete_file_name);
.completion_cb("path", pbs_tools::fs::complete_file_name);
let key_show_cmd_def = CliCommand::new(&API_METHOD_SHOW_KEY)
.arg_param(&["path"])
.completion_cb("path", tools::complete_file_name);
.completion_cb("path", pbs_tools::fs::complete_file_name);
let paper_key_cmd_def = CliCommand::new(&API_METHOD_PAPER_KEY)
.arg_param(&["path"])
.completion_cb("path", tools::complete_file_name);
.completion_cb("path", pbs_tools::fs::complete_file_name);
CliCommandMap::new()
.insert("create", key_create_cmd_def)

View File

@ -17,6 +17,9 @@ use proxmox::{sortable, identity};
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment, schema::*, cli::*};
use proxmox::tools::fd::Fd;
use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupReader, RemoteChunkReader};
use proxmox_backup::tools;
use proxmox_backup::backup::{
load_and_decrypt_key,
@ -28,8 +31,6 @@ use proxmox_backup::backup::{
CachedChunkReader,
};
use proxmox_backup::client::*;
use crate::{
REPO_URL_SCHEMA,
extract_repository_from_value,
@ -43,8 +44,6 @@ use crate::{
BufferedDynamicReadAt,
};
use crate::proxmox_client_tools::key_source::get_encryption_key_password;
#[sortable]
const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
&ApiHandler::Sync(&mount),
@ -98,7 +97,7 @@ pub fn mount_cmd_def() -> CliCommand {
.completion_cb("repository", complete_repository)
.completion_cb("snapshot", complete_group_or_snapshot)
.completion_cb("archive-name", complete_pxar_archive_name)
.completion_cb("target", tools::complete_file_name)
.completion_cb("target", pbs_tools::fs::complete_file_name)
}
pub fn map_cmd_def() -> CliCommand {
@ -257,11 +256,11 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
let reader = BufferedDynamicReader::new(index, chunk_reader);
let archive_size = reader.archive_size();
let reader: proxmox_backup::pxar::fuse::Reader =
let reader: pbs_client::pxar::fuse::Reader =
Arc::new(BufferedDynamicReadAt::new(reader));
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
let decoder = pbs_client::pxar::fuse::Accessor::new(reader, archive_size).await?;
let session = proxmox_backup::pxar::fuse::Session::mount(
let session = pbs_client::pxar::fuse::Session::mount(
decoder,
&options,
false,

View File

@ -8,6 +8,8 @@ use proxmox::{
tools::fs::file_get_contents,
};
use pbs_client::tools::key_source::get_encryption_key_password;
use proxmox_backup::{
tools,
api2::types::*,
@ -35,8 +37,6 @@ use crate::{
record_repository,
};
use crate::proxmox_client_tools::key_source::get_encryption_key_password;
#[api(
input: {
properties: {
@ -412,8 +412,8 @@ pub fn snapshot_mgtm_cli() -> CliCommandMap {
CliCommand::new(&API_METHOD_UPLOAD_LOG)
.arg_param(&["snapshot", "logfile"])
.completion_cb("snapshot", complete_backup_snapshot)
.completion_cb("logfile", tools::complete_file_name)
.completion_cb("keyfile", tools::complete_file_name)
.completion_cb("logfile", pbs_tools::fs::complete_file_name)
.completion_cb("keyfile", pbs_tools::fs::complete_file_name)
.completion_cb("repository", complete_repository)
)
}

View File

@ -4,10 +4,10 @@ use serde_json::{json, Value};
use proxmox::api::{api, cli::*};
use pbs_tools::percent_encoding::percent_encode_component;
use pbs_client::display_task_log;
use proxmox_backup::tools;
use proxmox_backup::client::*;
use proxmox_backup::api2::types::UPID_SCHEMA;
use crate::{

View File

@ -3,12 +3,10 @@ use serde_json::Value;
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
use pbs_client::{connect_to_localhost, view_task_result};
use proxmox_backup::config;
use proxmox_backup::api2::{self, types::* };
use proxmox_backup::client::{
connect_to_localhost,
view_task_result,
};
use proxmox_backup::config::datastore::DIR_NAME_SCHEMA;
#[api(

View File

@ -1,585 +0,0 @@
use std::convert::TryFrom;
use std::path::PathBuf;
use std::os::unix::io::{FromRawFd, RawFd};
use std::io::Read;
use anyhow::{bail, format_err, Error};
use serde_json::Value;
use proxmox::api::schema::*;
use proxmox::sys::linux::tty;
use proxmox::tools::fs::file_get_contents;
use proxmox_backup::backup::CryptMode;
pub const DEFAULT_ENCRYPTION_KEY_FILE_NAME: &str = "encryption-key.json";
pub const DEFAULT_MASTER_PUBKEY_FILE_NAME: &str = "master-public.pem";
pub const KEYFILE_SCHEMA: Schema =
StringSchema::new("Path to encryption key. All data will be encrypted using this key.")
.schema();
pub const KEYFD_SCHEMA: Schema =
IntegerSchema::new("Pass an encryption key via an already opened file descriptor.")
.minimum(0)
.schema();
pub const MASTER_PUBKEY_FILE_SCHEMA: Schema = StringSchema::new(
"Path to master public key. The encryption key used for a backup will be encrypted using this key and appended to the backup.")
.schema();
pub const MASTER_PUBKEY_FD_SCHEMA: Schema =
IntegerSchema::new("Pass a master public key via an already opened file descriptor.")
.minimum(0)
.schema();
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum KeySource {
DefaultKey,
Fd,
Path(String),
}
pub fn format_key_source(source: &KeySource, key_type: &str) -> String {
match source {
KeySource::DefaultKey => format!("Using default {} key..", key_type),
KeySource::Fd => format!("Using {} key from file descriptor..", key_type),
KeySource::Path(path) => format!("Using {} key from '{}'..", key_type, path),
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct KeyWithSource {
pub source: KeySource,
pub key: Vec<u8>,
}
impl KeyWithSource {
pub fn from_fd(key: Vec<u8>) -> Self {
Self {
source: KeySource::Fd,
key,
}
}
pub fn from_default(key: Vec<u8>) -> Self {
Self {
source: KeySource::DefaultKey,
key,
}
}
pub fn from_path(path: String, key: Vec<u8>) -> Self {
Self {
source: KeySource::Path(path),
key,
}
}
}
#[derive(Debug, Eq, PartialEq)]
pub struct CryptoParams {
pub mode: CryptMode,
pub enc_key: Option<KeyWithSource>,
// FIXME switch to openssl::rsa::rsa<openssl::pkey::Public> once that is Eq?
pub master_pubkey: Option<KeyWithSource>,
}
pub fn crypto_parameters(param: &Value) -> Result<CryptoParams, Error> {
do_crypto_parameters(param, false)
}
pub fn crypto_parameters_keep_fd(param: &Value) -> Result<CryptoParams, Error> {
do_crypto_parameters(param, true)
}
fn do_crypto_parameters(param: &Value, keep_keyfd_open: bool) -> Result<CryptoParams, Error> {
let keyfile = match param.get("keyfile") {
Some(Value::String(keyfile)) => Some(keyfile),
Some(_) => bail!("bad --keyfile parameter type"),
None => None,
};
let key_fd = match param.get("keyfd") {
Some(Value::Number(key_fd)) => Some(
RawFd::try_from(key_fd
.as_i64()
.ok_or_else(|| format_err!("bad key fd: {:?}", key_fd))?
)
.map_err(|err| format_err!("bad key fd: {:?}: {}", key_fd, err))?
),
Some(_) => bail!("bad --keyfd parameter type"),
None => None,
};
let master_pubkey_file = match param.get("master-pubkey-file") {
Some(Value::String(keyfile)) => Some(keyfile),
Some(_) => bail!("bad --master-pubkey-file parameter type"),
None => None,
};
let master_pubkey_fd = match param.get("master-pubkey-fd") {
Some(Value::Number(key_fd)) => Some(
RawFd::try_from(key_fd
.as_i64()
.ok_or_else(|| format_err!("bad master public key fd: {:?}", key_fd))?
)
.map_err(|err| format_err!("bad public master key fd: {:?}: {}", key_fd, err))?
),
Some(_) => bail!("bad --master-pubkey-fd parameter type"),
None => None,
};
let mode: Option<CryptMode> = match param.get("crypt-mode") {
Some(mode) => Some(serde_json::from_value(mode.clone())?),
None => None,
};
let key = match (keyfile, key_fd) {
(None, None) => None,
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
(Some(keyfile), None) => Some(KeyWithSource::from_path(
keyfile.clone(),
file_get_contents(keyfile)?,
)),
(None, Some(fd)) => {
let mut input = unsafe { std::fs::File::from_raw_fd(fd) };
let mut data = Vec::new();
let _len: usize = input.read_to_end(&mut data).map_err(|err| {
format_err!("error reading encryption key from fd {}: {}", fd, err)
})?;
if keep_keyfd_open {
// don't close fd if requested, and try to reset seek position
std::mem::forget(input);
unsafe { libc::lseek(fd, 0, libc::SEEK_SET); }
}
Some(KeyWithSource::from_fd(data))
}
};
let master_pubkey = match (master_pubkey_file, master_pubkey_fd) {
(None, None) => None,
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
(Some(keyfile), None) => Some(KeyWithSource::from_path(
keyfile.clone(),
file_get_contents(keyfile)?,
)),
(None, Some(fd)) => {
let input = unsafe { std::fs::File::from_raw_fd(fd) };
let mut data = Vec::new();
let _len: usize = { input }
.read_to_end(&mut data)
.map_err(|err| format_err!("error reading master key from fd {}: {}", fd, err))?;
Some(KeyWithSource::from_fd(data))
}
};
let res = match mode {
// no crypt mode, enable encryption if keys are available
None => match (key, master_pubkey) {
// only default keys if available
(None, None) => match read_optional_default_encryption_key()? {
None => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
enc_key => {
let master_pubkey = read_optional_default_master_pubkey()?;
CryptoParams {
mode: CryptMode::Encrypt,
enc_key,
master_pubkey,
}
},
},
// explicit master key, default enc key needed
(None, master_pubkey) => match read_optional_default_encryption_key()? {
None => bail!("--master-pubkey-file/--master-pubkey-fd specified, but no key available"),
enc_key => {
CryptoParams {
mode: CryptMode::Encrypt,
enc_key,
master_pubkey,
}
},
},
// explicit keyfile, maybe default master key
(enc_key, None) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey: read_optional_default_master_pubkey()? },
// explicit keyfile and master key
(enc_key, master_pubkey) => CryptoParams { mode: CryptMode::Encrypt, enc_key, master_pubkey },
},
// explicitly disabled encryption
Some(CryptMode::None) => match (key, master_pubkey) {
// no keys => OK, no encryption
(None, None) => CryptoParams { mode: CryptMode::None, enc_key: None, master_pubkey: None },
// --keyfile and --crypt-mode=none
(Some(_), _) => bail!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive"),
// --master-pubkey-file and --crypt-mode=none
(_, Some(_)) => bail!("--master-pubkey-file/--master-pubkey-fd and --crypt-mode=none are mutually exclusive"),
},
// explicitly enabled encryption
Some(mode) => match (key, master_pubkey) {
// no key, maybe master key
(None, master_pubkey) => match read_optional_default_encryption_key()? {
None => bail!("--crypt-mode without --keyfile and no default key file available"),
enc_key => {
eprintln!("Encrypting with default encryption key!");
let master_pubkey = match master_pubkey {
None => read_optional_default_master_pubkey()?,
master_pubkey => master_pubkey,
};
CryptoParams {
mode,
enc_key,
master_pubkey,
}
},
},
// --keyfile and --crypt-mode other than none
(enc_key, master_pubkey) => {
let master_pubkey = match master_pubkey {
None => read_optional_default_master_pubkey()?,
master_pubkey => master_pubkey,
};
CryptoParams { mode, enc_key, master_pubkey }
},
},
};
Ok(res)
}
pub fn find_default_master_pubkey() -> Result<Option<PathBuf>, Error> {
super::find_xdg_file(
DEFAULT_MASTER_PUBKEY_FILE_NAME,
"default master public key file",
)
}
pub fn place_default_master_pubkey() -> Result<PathBuf, Error> {
super::place_xdg_file(
DEFAULT_MASTER_PUBKEY_FILE_NAME,
"default master public key file",
)
}
pub fn find_default_encryption_key() -> Result<Option<PathBuf>, Error> {
super::find_xdg_file(
DEFAULT_ENCRYPTION_KEY_FILE_NAME,
"default encryption key file",
)
}
pub fn place_default_encryption_key() -> Result<PathBuf, Error> {
super::place_xdg_file(
DEFAULT_ENCRYPTION_KEY_FILE_NAME,
"default encryption key file",
)
}
#[cfg(not(test))]
pub(crate) fn read_optional_default_encryption_key() -> Result<Option<KeyWithSource>, Error> {
find_default_encryption_key()?
.map(|path| file_get_contents(path).map(KeyWithSource::from_default))
.transpose()
}
#[cfg(not(test))]
pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSource>, Error> {
find_default_master_pubkey()?
.map(|path| file_get_contents(path).map(KeyWithSource::from_default))
.transpose()
}
#[cfg(test)]
static mut TEST_DEFAULT_ENCRYPTION_KEY: Result<Option<Vec<u8>>, Error> = Ok(None);
#[cfg(test)]
pub(crate) fn read_optional_default_encryption_key() -> Result<Option<KeyWithSource>, Error> {
// not safe when multiple concurrent test cases end up here!
unsafe {
match &TEST_DEFAULT_ENCRYPTION_KEY {
Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
Ok(None) => Ok(None),
Err(_) => bail!("test error"),
}
}
}
#[cfg(test)]
// not safe when multiple concurrent test cases end up here!
pub(crate) unsafe fn set_test_encryption_key(value: Result<Option<Vec<u8>>, Error>) {
TEST_DEFAULT_ENCRYPTION_KEY = value;
}
#[cfg(test)]
static mut TEST_DEFAULT_MASTER_PUBKEY: Result<Option<Vec<u8>>, Error> = Ok(None);
#[cfg(test)]
pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSource>, Error> {
// not safe when multiple concurrent test cases end up here!
unsafe {
match &TEST_DEFAULT_MASTER_PUBKEY {
Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
Ok(None) => Ok(None),
Err(_) => bail!("test error"),
}
}
}
#[cfg(test)]
// not safe when multiple concurrent test cases end up here!
pub(crate) unsafe fn set_test_default_master_pubkey(value: Result<Option<Vec<u8>>, Error>) {
TEST_DEFAULT_MASTER_PUBKEY = value;
}
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
// fixme: implement other input methods
use std::env::VarError::*;
match std::env::var("PBS_ENCRYPTION_PASSWORD") {
Ok(p) => return Ok(p.as_bytes().to_vec()),
Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
Err(NotPresent) => {
// Try another method
}
}
// If we're on a TTY, query the user for a password
if tty::stdin_isatty() {
return Ok(tty::read_password("Encryption Key Password: ")?);
}
bail!("no password input mechanism available");
}
#[test]
// WARNING: there must only be one test for crypto_parameters as the default key handling is not
// safe w.r.t. concurrency
fn test_crypto_parameters_handling() -> Result<(), Error> {
use serde_json::json;
use proxmox::tools::fs::{replace_file, CreateOptions};
let some_key = vec![1;1];
let default_key = vec![2;1];
let some_master_key = vec![3;1];
let default_master_key = vec![4;1];
let keypath = "./target/testout/keyfile.test";
let master_keypath = "./target/testout/masterkeyfile.test";
let invalid_keypath = "./target/testout/invalid_keyfile.test";
let no_key_res = CryptoParams {
enc_key: None,
master_pubkey: None,
mode: CryptMode::None,
};
let some_key_res = CryptoParams {
enc_key: Some(KeyWithSource::from_path(
keypath.to_string(),
some_key.clone(),
)),
master_pubkey: None,
mode: CryptMode::Encrypt,
};
let some_key_some_master_res = CryptoParams {
enc_key: Some(KeyWithSource::from_path(
keypath.to_string(),
some_key.clone(),
)),
master_pubkey: Some(KeyWithSource::from_path(
master_keypath.to_string(),
some_master_key.clone(),
)),
mode: CryptMode::Encrypt,
};
let some_key_default_master_res = CryptoParams {
enc_key: Some(KeyWithSource::from_path(
keypath.to_string(),
some_key.clone(),
)),
master_pubkey: Some(KeyWithSource::from_default(default_master_key.clone())),
mode: CryptMode::Encrypt,
};
let some_key_sign_res = CryptoParams {
enc_key: Some(KeyWithSource::from_path(
keypath.to_string(),
some_key.clone(),
)),
master_pubkey: None,
mode: CryptMode::SignOnly,
};
let default_key_res = CryptoParams {
enc_key: Some(KeyWithSource::from_default(default_key.clone())),
master_pubkey: None,
mode: CryptMode::Encrypt,
};
let default_key_sign_res = CryptoParams {
enc_key: Some(KeyWithSource::from_default(default_key.clone())),
master_pubkey: None,
mode: CryptMode::SignOnly,
};
replace_file(&keypath, &some_key, CreateOptions::default())?;
replace_file(&master_keypath, &some_master_key, CreateOptions::default())?;
// no params, no default key == no key
let res = crypto_parameters(&json!({}));
assert_eq!(res.unwrap(), no_key_res);
// keyfile param == key from keyfile
let res = crypto_parameters(&json!({"keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// crypt mode none == no key
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
assert_eq!(res.unwrap(), no_key_res);
// crypt mode encrypt/sign-only, no keyfile, no default key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
// crypt mode none with explicit key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_sign_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// invalid keyfile parameter always errors
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
// now set a default key
unsafe { set_test_encryption_key(Ok(Some(default_key.clone()))); }
// and repeat
// no params but default key == default key
let res = crypto_parameters(&json!({}));
assert_eq!(res.unwrap(), default_key_res);
// keyfile param == key from keyfile
let res = crypto_parameters(&json!({"keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// crypt mode none == no key
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
assert_eq!(res.unwrap(), no_key_res);
// crypt mode encrypt/sign-only, no keyfile, default key == default key with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "sign-only"}));
assert_eq!(res.unwrap(), default_key_sign_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt"}));
assert_eq!(res.unwrap(), default_key_res);
// crypt mode none with explicit key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_sign_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// invalid keyfile parameter always errors
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
// now make default key retrieval error
unsafe { set_test_encryption_key(Err(format_err!("test error"))); }
// and repeat
// no params, default key retrieval errors == Error
assert!(crypto_parameters(&json!({})).is_err());
// keyfile param == key from keyfile
let res = crypto_parameters(&json!({"keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// crypt mode none == no key
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
assert_eq!(res.unwrap(), no_key_res);
// crypt mode encrypt/sign-only, no keyfile, default key error == Error
assert!(crypto_parameters(&json!({"crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
// crypt mode none with explicit key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
// crypt mode sign-only/encrypt with keyfile == key from keyfile with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "sign-only", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_sign_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_res);
// invalid keyfile parameter always errors
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "none"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": invalid_keypath, "crypt-mode": "encrypt"})).is_err());
// now remove default key again
unsafe { set_test_encryption_key(Ok(None)); }
// set a default master key
unsafe { set_test_default_master_pubkey(Ok(Some(default_master_key.clone()))); }
// and use an explicit master key
assert!(crypto_parameters(&json!({"master-pubkey-file": master_keypath})).is_err());
// just a default == no key
let res = crypto_parameters(&json!({}));
assert_eq!(res.unwrap(), no_key_res);
// keyfile param == key from keyfile
let res = crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": master_keypath}));
assert_eq!(res.unwrap(), some_key_some_master_res);
// same with fallback to default master key
let res = crypto_parameters(&json!({"keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_default_master_res);
// crypt mode none == error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "master-pubkey-file": master_keypath})).is_err());
// with just default master key == no key
let res = crypto_parameters(&json!({"crypt-mode": "none"}));
assert_eq!(res.unwrap(), no_key_res);
// crypt mode encrypt without enc key == error
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt", "master-pubkey-file": master_keypath})).is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "encrypt"})).is_err());
// crypt mode none with explicit key == Error
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath, "master-pubkey-file": master_keypath})).is_err());
assert!(crypto_parameters(&json!({"crypt-mode": "none", "keyfile": keypath})).is_err());
// crypt mode encrypt with keyfile == key from keyfile with correct mode
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath, "master-pubkey-file": master_keypath}));
assert_eq!(res.unwrap(), some_key_some_master_res);
let res = crypto_parameters(&json!({"crypt-mode": "encrypt", "keyfile": keypath}));
assert_eq!(res.unwrap(), some_key_default_master_res);
// invalid master keyfile parameter always errors when a key is passed, even with a valid
// default master key
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath})).is_err());
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "none"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "sign-only"})).is_err());
assert!(crypto_parameters(&json!({"keyfile": keypath, "master-pubkey-file": invalid_keypath,"crypt-mode": "encrypt"})).is_err());
Ok(())
}

View File

@ -1,389 +0,0 @@
//! Shared tools useful for common CLI clients.
use std::collections::HashMap;
use anyhow::{bail, format_err, Context, Error};
use serde_json::{json, Value};
use xdg::BaseDirectories;
use proxmox::{
api::schema::*,
tools::fs::file_get_json,
};
use pbs_api_types::{BACKUP_REPO_URL, Authid};
use pbs_buildcfg;
use pbs_datastore::BackupDir;
use pbs_tools::json::json_object_to_query;
use proxmox_backup::api2::access::user::UserWithTokens;
use proxmox_backup::client::{BackupRepository, HttpClient, HttpClientOptions};
use proxmox_backup::tools;
pub mod key_source;
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
.format(&BACKUP_REPO_URL)
.max_length(256)
.schema();
pub const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
.minimum(64)
.maximum(4096)
.default(4096)
.schema();
pub fn get_default_repository() -> Option<String> {
std::env::var("PBS_REPOSITORY").ok()
}
pub fn extract_repository_from_value(param: &Value) -> Result<BackupRepository, Error> {
let repo_url = param["repository"]
.as_str()
.map(String::from)
.or_else(get_default_repository)
.ok_or_else(|| format_err!("unable to get (default) repository"))?;
let repo: BackupRepository = repo_url.parse()?;
Ok(repo)
}
pub fn extract_repository_from_map(param: &HashMap<String, String>) -> Option<BackupRepository> {
param
.get("repository")
.map(String::from)
.or_else(get_default_repository)
.and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
}
pub fn connect(repo: &BackupRepository) -> Result<HttpClient, Error> {
connect_do(repo.host(), repo.port(), repo.auth_id())
.map_err(|err| format_err!("error building client for repository {} - {}", repo, err))
}
fn connect_do(server: &str, port: u16, auth_id: &Authid) -> Result<HttpClient, Error> {
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
use std::env::VarError::*;
let password = match std::env::var(ENV_VAR_PBS_PASSWORD) {
Ok(p) => Some(p),
Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", ENV_VAR_PBS_PASSWORD)),
Err(NotPresent) => None,
};
let options = HttpClientOptions::new_interactive(password, fingerprint);
HttpClient::new(server, port, auth_id, options)
}
/// like get, but simply ignore errors and return Null instead
pub async fn try_get(repo: &BackupRepository, url: &str) -> Value {
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
let password = std::env::var(ENV_VAR_PBS_PASSWORD).ok();
// ticket cache, but no questions asked
let options = HttpClientOptions::new_interactive(password, fingerprint)
.interactive(false);
let client = match HttpClient::new(repo.host(), repo.port(), repo.auth_id(), options) {
Ok(v) => v,
_ => return Value::Null,
};
let mut resp = match client.get(url, None).await {
Ok(v) => v,
_ => return Value::Null,
};
if let Some(map) = resp.as_object_mut() {
if let Some(data) = map.remove("data") {
return data;
}
}
Value::Null
}
pub fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
pbs_runtime::main(async { complete_backup_group_do(param).await })
}
pub async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let repo = match extract_repository_from_map(param) {
Some(v) => v,
_ => return result,
};
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
let data = try_get(&repo, &path).await;
if let Some(list) = data.as_array() {
for item in list {
if let (Some(backup_id), Some(backup_type)) =
(item["backup-id"].as_str(), item["backup-type"].as_str())
{
result.push(format!("{}/{}", backup_type, backup_id));
}
}
}
result
}
pub fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
pbs_runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
}
pub async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
if arg.matches('/').count() < 2 {
let groups = complete_backup_group_do(param).await;
let mut result = vec![];
for group in groups {
result.push(group.to_string());
result.push(format!("{}/", group));
}
return result;
}
complete_backup_snapshot_do(param).await
}
pub fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
pbs_runtime::main(async { complete_backup_snapshot_do(param).await })
}
pub async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let repo = match extract_repository_from_map(param) {
Some(v) => v,
_ => return result,
};
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
let data = try_get(&repo, &path).await;
if let Some(list) = data.as_array() {
for item in list {
if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
(item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
{
if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
result.push(snapshot.relative_path().to_str().unwrap().to_owned());
}
}
}
}
result
}
pub fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
pbs_runtime::main(async { complete_server_file_name_do(param).await })
}
pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let repo = match extract_repository_from_map(param) {
Some(v) => v,
_ => return result,
};
let snapshot: BackupDir = match param.get("snapshot") {
Some(path) => {
match path.parse() {
Ok(v) => v,
_ => return result,
}
}
_ => return result,
};
let query = json_object_to_query(json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
"backup-time": snapshot.backup_time(),
})).unwrap();
let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
let data = try_get(&repo, &path).await;
if let Some(list) = data.as_array() {
for item in list {
if let Some(filename) = item["filename"].as_str() {
result.push(filename.to_owned());
}
}
}
result
}
pub fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
complete_server_file_name(arg, param)
.iter()
.map(|v| pbs_tools::format::strip_server_file_extension(&v))
.collect()
}
pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
complete_server_file_name(arg, param)
.iter()
.filter_map(|name| {
if name.ends_with(".pxar.didx") {
Some(pbs_tools::format::strip_server_file_extension(name))
} else {
None
}
})
.collect()
}
pub fn complete_img_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
complete_server_file_name(arg, param)
.iter()
.filter_map(|name| {
if name.ends_with(".img.fidx") {
Some(pbs_tools::format::strip_server_file_extension(name))
} else {
None
}
})
.collect()
}
pub fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let mut size = 64;
loop {
result.push(size.to_string());
size *= 2;
if size > 4096 { break; }
}
result
}
pub fn complete_auth_id(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
pbs_runtime::main(async { complete_auth_id_do(param).await })
}
pub async fn complete_auth_id_do(param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let repo = match extract_repository_from_map(param) {
Some(v) => v,
_ => return result,
};
let data = try_get(&repo, "api2/json/access/users?include_tokens=true").await;
if let Ok(parsed) = serde_json::from_value::<Vec<UserWithTokens>>(data) {
for user in parsed {
result.push(user.userid.to_string());
for token in user.tokens {
result.push(token.tokenid.to_string());
}
}
};
result
}
pub fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let base = match BaseDirectories::with_prefix("proxmox-backup") {
Ok(v) => v,
_ => return result,
};
// usually $HOME/.cache/proxmox-backup/repo-list
let path = match base.place_cache_file("repo-list") {
Ok(v) => v,
_ => return result,
};
let data = file_get_json(&path, None).unwrap_or_else(|_| json!({}));
if let Some(map) = data.as_object() {
for (repo, _count) in map {
result.push(repo.to_owned());
}
}
result
}
pub fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let data: Vec<&str> = arg.splitn(2, ':').collect();
if data.len() != 2 {
result.push(String::from("root.pxar:/"));
result.push(String::from("etc.pxar:/etc"));
return result;
}
let files = tools::complete_file_name(data[1], param);
for file in files {
result.push(format!("{}:{}", data[0], file));
}
result
}
pub fn base_directories() -> Result<xdg::BaseDirectories, Error> {
xdg::BaseDirectories::with_prefix("proxmox-backup").map_err(Error::from)
}
/// Convenience helper for better error messages:
pub fn find_xdg_file(
file_name: impl AsRef<std::path::Path>,
description: &'static str,
) -> Result<Option<std::path::PathBuf>, Error> {
let file_name = file_name.as_ref();
base_directories()
.map(|base| base.find_config_file(file_name))
.with_context(|| format!("error searching for {}", description))
}
pub fn place_xdg_file(
file_name: impl AsRef<std::path::Path>,
description: &'static str,
) -> Result<std::path::PathBuf, Error> {
let file_name = file_name.as_ref();
base_directories()
.and_then(|base| base.place_config_file(file_name).map_err(Error::from))
.with_context(|| format!("failed to place {} in xdg home", description))
}
/// Returns a runtime dir owned by the current user.
/// Note that XDG_RUNTIME_DIR is not always available, especially for non-login users like
/// "www-data", so we use a custom one in /run/proxmox-backup/<uid> instead.
pub fn get_user_run_dir() -> Result<std::path::PathBuf, Error> {
let uid = nix::unistd::Uid::current();
let mut path: std::path::PathBuf = pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR.into();
path.push(uid.to_string());
tools::create_run_dir()?;
std::fs::create_dir_all(&path)?;
Ok(path)
}

View File

@ -1,19 +1,20 @@
//! Abstraction layer over different methods of accessing a block backup
use anyhow::{bail, Error};
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::collections::HashMap;
use std::future::Future;
use std::hash::BuildHasher;
use std::pin::Pin;
use proxmox_backup::backup::{BackupDir, BackupManifest};
use proxmox_backup::api2::types::ArchiveEntry;
use proxmox_backup::client::BackupRepository;
use anyhow::{bail, Error};
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use proxmox::api::{api, cli::*};
use pbs_client::BackupRepository;
use proxmox_backup::backup::{BackupDir, BackupManifest};
use proxmox_backup::api2::types::ArchiveEntry;
use super::block_driver_qemu::QemuBlockDriver;
/// Contains details about a snapshot that is to be accessed by block file restore

View File

@ -1,21 +1,23 @@
//! Block file access via a small QEMU restore VM using the PBS block driver in QEMU
use std::collections::HashMap;
use std::fs::{File, OpenOptions};
use std::io::{prelude::*, SeekFrom};
use anyhow::{bail, Error};
use futures::FutureExt;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::collections::HashMap;
use std::fs::{File, OpenOptions};
use std::io::{prelude::*, SeekFrom};
use proxmox::tools::fs::lock_file;
use pbs_client::{DEFAULT_VSOCK_PORT, BackupRepository, VsockClient};
use proxmox_backup::api2::types::ArchiveEntry;
use proxmox_backup::backup::BackupDir;
use proxmox_backup::client::*;
use proxmox_backup::tools;
use super::block_driver::*;
use crate::proxmox_client_tools::get_user_run_dir;
use crate::get_user_run_dir;
const RESTORE_VM_MAP: &str = "restore-vm-map.json";

View File

@ -13,8 +13,9 @@ use nix::unistd::Pid;
use proxmox::tools::fs::{create_path, file_read_string, make_tmp_file, CreateOptions};
use pbs_client::{VsockClient, DEFAULT_VSOCK_PORT};
use proxmox_backup::backup::backup_user;
use proxmox_backup::client::{VsockClient, DEFAULT_VSOCK_PORT};
use proxmox_backup::tools;
use super::SnapRestoreDetails;

View File

@ -19,12 +19,13 @@ use proxmox::api::{
};
use proxmox::{identity, list_subdirs_api_method, sortable};
use pbs_client::pxar::{create_archive, Flags, PxarCreateOptions, ENCODER_MAX_ENTRIES};
use pbs_tools::fs::read_subdir;
use pbs_tools::zip::zip_directory;
use proxmox_backup::api2::types::*;
use proxmox_backup::backup::DirEntryAttribute;
use proxmox_backup::pxar::{create_archive, Flags, PxarCreateOptions, ENCODER_MAX_ENTRIES};
use proxmox_backup::tools::{self, zip::zip_directory};
use proxmox_backup::tools;
use pxar::encoder::aio::TokioWriter;

View File

@ -3,12 +3,10 @@ use serde_json::Value;
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
use pbs_client::{connect_to_localhost, view_task_result};
use proxmox_backup::{
config,
client::{
connect_to_localhost,
view_task_result,
},
api2::{
self,
types::*,

View File

@ -12,13 +12,11 @@ use futures::select;
use tokio::signal::unix::{signal, SignalKind};
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
use pbs_client::pxar::{fuse, format_single_line_entry, ENCODER_MAX_ENTRIES, Flags, PxarExtractOptions};
use proxmox::api::cli::*;
use proxmox::api::api;
use proxmox_backup::tools;
use proxmox_backup::pxar::{fuse, format_single_line_entry, ENCODER_MAX_ENTRIES, Flags, PxarExtractOptions};
fn extract_archive_from_reader<R: std::io::Read>(
reader: &mut R,
target: &str,
@ -26,8 +24,7 @@ fn extract_archive_from_reader<R: std::io::Read>(
verbose: bool,
options: PxarExtractOptions,
) -> Result<(), Error> {
proxmox_backup::pxar::extract_archive(
pbs_client::pxar::extract_archive(
pxar::decoder::Decoder::from_std(reader)?,
Path::new(target),
feature_flags,
@ -327,7 +324,7 @@ async fn create_archive(
Some(HashSet::new())
};
let options = proxmox_backup::pxar::PxarCreateOptions {
let options = pbs_client::pxar::PxarCreateOptions {
entries_max: entries_max as usize,
device_set,
patterns,
@ -372,7 +369,7 @@ async fn create_archive(
}
let writer = pxar::encoder::sync::StandardWriter::new(writer);
proxmox_backup::pxar::create_archive(
pbs_client::pxar::create_archive(
dir,
writer,
feature_flags,
@ -464,29 +461,29 @@ fn main() {
"create",
CliCommand::new(&API_METHOD_CREATE_ARCHIVE)
.arg_param(&["archive", "source"])
.completion_cb("archive", tools::complete_file_name)
.completion_cb("source", tools::complete_file_name),
.completion_cb("archive", pbs_tools::fs::complete_file_name)
.completion_cb("source", pbs_tools::fs::complete_file_name),
)
.insert(
"extract",
CliCommand::new(&API_METHOD_EXTRACT_ARCHIVE)
.arg_param(&["archive", "target"])
.completion_cb("archive", tools::complete_file_name)
.completion_cb("target", tools::complete_file_name)
.completion_cb("files-from", tools::complete_file_name),
.completion_cb("archive", pbs_tools::fs::complete_file_name)
.completion_cb("target", pbs_tools::fs::complete_file_name)
.completion_cb("files-from", pbs_tools::fs::complete_file_name),
)
.insert(
"mount",
CliCommand::new(&API_METHOD_MOUNT_ARCHIVE)
.arg_param(&["archive", "mountpoint"])
.completion_cb("archive", tools::complete_file_name)
.completion_cb("mountpoint", tools::complete_file_name),
.completion_cb("archive", pbs_tools::fs::complete_file_name)
.completion_cb("mountpoint", pbs_tools::fs::complete_file_name),
)
.insert(
"list",
CliCommand::new(&API_METHOD_DUMP_ARCHIVE)
.arg_param(&["archive"])
.completion_cb("archive", tools::complete_file_name),
.completion_cb("archive", pbs_tools::fs::complete_file_name),
);
let rpcenv = CliEnvironment::new();

View File

@ -1,229 +0,0 @@
use anyhow::{format_err, Error};
use std::io::{Write, Seek, SeekFrom};
use std::fs::File;
use std::sync::Arc;
use std::os::unix::fs::OpenOptionsExt;
use futures::future::AbortHandle;
use serde_json::{json, Value};
use proxmox::tools::digest_to_hex;
use pbs_datastore::{CryptConfig, BackupManifest};
use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::data_blob_reader::DataBlobReader;
use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::MANIFEST_BLOB_NAME;
use pbs_tools::sha::sha256;
use super::{HttpClient, H2Client};
/// Backup Reader
pub struct BackupReader {
h2: H2Client,
abort: AbortHandle,
crypt_config: Option<Arc<CryptConfig>>,
}
impl Drop for BackupReader {
fn drop(&mut self) {
self.abort.abort();
}
}
impl BackupReader {
fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>) -> Arc<Self> {
Arc::new(Self { h2, abort, crypt_config})
}
/// Create a new instance by upgrading the connection at '/api2/json/reader'
pub async fn start(
client: HttpClient,
crypt_config: Option<Arc<CryptConfig>>,
datastore: &str,
backup_type: &str,
backup_id: &str,
backup_time: i64,
debug: bool,
) -> Result<Arc<BackupReader>, Error> {
let param = json!({
"backup-type": backup_type,
"backup-id": backup_id,
"backup-time": backup_time,
"store": datastore,
"debug": debug,
});
let req = HttpClient::request_builder(client.server(), client.port(), "GET", "/api2/json/reader", Some(param)).unwrap();
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!())).await?;
Ok(BackupReader::new(h2, abort, crypt_config))
}
/// Execute a GET request
pub async fn get(
&self,
path: &str,
param: Option<Value>,
) -> Result<Value, Error> {
self.h2.get(path, param).await
}
/// Execute a PUT request
pub async fn put(
&self,
path: &str,
param: Option<Value>,
) -> Result<Value, Error> {
self.h2.put(path, param).await
}
/// Execute a POST request
pub async fn post(
&self,
path: &str,
param: Option<Value>,
) -> Result<Value, Error> {
self.h2.post(path, param).await
}
/// Execute a GET request and send output to a writer
pub async fn download<W: Write + Send>(
&self,
file_name: &str,
output: W,
) -> Result<(), Error> {
let path = "download";
let param = json!({ "file-name": file_name });
self.h2.download(path, Some(param), output).await
}
/// Execute a special GET request and send output to a writer
///
/// This writes random data, and is only useful to test download speed.
pub async fn speedtest<W: Write + Send>(
&self,
output: W,
) -> Result<(), Error> {
self.h2.download("speedtest", None, output).await
}
/// Download a specific chunk
pub async fn download_chunk<W: Write + Send>(
&self,
digest: &[u8; 32],
output: W,
) -> Result<(), Error> {
let path = "chunk";
let param = json!({ "digest": digest_to_hex(digest) });
self.h2.download(path, Some(param), output).await
}
pub fn force_close(self) {
self.abort.abort();
}
/// Download backup manifest (index.json)
///
/// The manifest signature is verified if we have a crypt_config.
pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec<u8>), Error> {
let mut raw_data = Vec::with_capacity(64 * 1024);
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
// no expected digest available
let data = blob.decode(None, None)?;
let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
Ok((manifest, data))
}
/// Download a .blob file
///
/// This creates a temporary file in /tmp (using O_TMPFILE). The data is verified using
/// the provided manifest.
pub async fn download_blob(
&self,
manifest: &BackupManifest,
name: &str,
) -> Result<DataBlobReader<'_, File>, Error> {
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
.custom_flags(libc::O_TMPFILE)
.open("/tmp")?;
self.download(name, &mut tmpfile).await?;
tmpfile.seek(SeekFrom::Start(0))?;
let (csum, size) = sha256(&mut tmpfile)?;
manifest.verify_file(name, &csum, size)?;
tmpfile.seek(SeekFrom::Start(0))?;
DataBlobReader::new(tmpfile, self.crypt_config.clone())
}
/// Download dynamic index file
///
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
/// the provided manifest.
pub async fn download_dynamic_index(
&self,
manifest: &BackupManifest,
name: &str,
) -> Result<DynamicIndexReader, Error> {
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
.custom_flags(libc::O_TMPFILE)
.open("/tmp")?;
self.download(name, &mut tmpfile).await?;
let index = DynamicIndexReader::new(tmpfile)
.map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?;
// Note: do not use values stored in index (not trusted) - instead, computed them again
let (csum, size) = index.compute_csum();
manifest.verify_file(name, &csum, size)?;
Ok(index)
}
/// Download fixed index file
///
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
/// the provided manifest.
pub async fn download_fixed_index(
&self,
manifest: &BackupManifest,
name: &str,
) -> Result<FixedIndexReader, Error> {
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
.custom_flags(libc::O_TMPFILE)
.open("/tmp")?;
self.download(name, &mut tmpfile).await?;
let index = FixedIndexReader::new(tmpfile)
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?;
// Note: do not use values stored in index (not trusted) - instead, computed them again
let (csum, size) = index.compute_csum();
manifest.verify_file(name, &csum, size)?;
Ok(index)
}
}

View File

@ -1,101 +0,0 @@
use std::convert::TryFrom;
use std::fmt;
use anyhow::{format_err, Error};
use pbs_api_types::{BACKUP_REPO_URL_REGEX, IP_V6_REGEX, Authid, Userid};
/// Reference remote backup locations
///
#[derive(Debug)]
pub struct BackupRepository {
/// The user name used for Authentication
auth_id: Option<Authid>,
/// The host name or IP address
host: Option<String>,
/// The port
port: Option<u16>,
/// The name of the datastore
store: String,
}
impl BackupRepository {
pub fn new(auth_id: Option<Authid>, host: Option<String>, port: Option<u16>, store: String) -> Self {
let host = match host {
Some(host) if (IP_V6_REGEX.regex_obj)().is_match(&host) => {
Some(format!("[{}]", host))
},
other => other,
};
Self { auth_id, host, port, store }
}
pub fn auth_id(&self) -> &Authid {
if let Some(ref auth_id) = self.auth_id {
return auth_id;
}
&Authid::root_auth_id()
}
pub fn user(&self) -> &Userid {
if let Some(auth_id) = &self.auth_id {
return auth_id.user();
}
Userid::root_userid()
}
pub fn host(&self) -> &str {
if let Some(ref host) = self.host {
return host;
}
"localhost"
}
pub fn port(&self) -> u16 {
if let Some(port) = self.port {
return port;
}
8007
}
pub fn store(&self) -> &str {
&self.store
}
}
impl fmt::Display for BackupRepository {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match (&self.auth_id, &self.host, self.port) {
(Some(auth_id), _, _) => write!(f, "{}@{}:{}:{}", auth_id, self.host(), self.port(), self.store),
(None, Some(host), None) => write!(f, "{}:{}", host, self.store),
(None, _, Some(port)) => write!(f, "{}:{}:{}", self.host(), port, self.store),
(None, None, None) => write!(f, "{}", self.store),
}
}
}
impl std::str::FromStr for BackupRepository {
type Err = Error;
/// Parse a repository URL.
///
/// This parses strings like `user@host:datastore`. The `user` and
/// `host` parts are optional, where `host` defaults to the local
/// host, and `user` defaults to `root@pam`.
fn from_str(url: &str) -> Result<Self, Self::Err> {
let cap = (BACKUP_REPO_URL_REGEX.regex_obj)().captures(url)
.ok_or_else(|| format_err!("unable to parse repository url '{}'", url))?;
Ok(Self {
auth_id: cap.get(1).map(|m| Authid::try_from(m.as_str().to_owned())).transpose()?,
host: cap.get(2).map(|m| m.as_str().to_owned()),
port: cap.get(3).map(|m| m.as_str().parse::<u16>()).transpose()?,
store: cap[4].to_owned(),
})
}
}

View File

@ -1,39 +0,0 @@
use anyhow::{bail, Error};
use proxmox::api::schema::*;
proxmox::const_regex! {
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
}
pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
"Backup source specification ([<label>:<path>]).")
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
.schema();
pub enum BackupSpecificationType { PXAR, IMAGE, CONFIG, LOGFILE }
pub struct BackupSpecification {
pub archive_name: String, // left part
pub config_string: String, // right part
pub spec_type: BackupSpecificationType,
}
pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Error> {
if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) {
let archive_name = caps.get(1).unwrap().as_str().into();
let extension = caps.get(2).unwrap().as_str();
let config_string = caps.get(3).unwrap().as_str().into();
let spec_type = match extension {
"pxar" => BackupSpecificationType::PXAR,
"img" => BackupSpecificationType::IMAGE,
"conf" => BackupSpecificationType::CONFIG,
"log" => BackupSpecificationType::LOGFILE,
_ => bail!("unknown backup source type '{}'", extension),
};
return Ok(BackupSpecification { archive_name, config_string, spec_type });
}
bail!("unable to parse backup source specification '{}'", value);
}

View File

@ -1,842 +0,0 @@
use std::collections::HashSet;
use std::future::Future;
use std::os::unix::fs::OpenOptionsExt;
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use anyhow::{bail, format_err, Error};
use futures::future::{self, AbortHandle, Either, FutureExt, TryFutureExt};
use futures::stream::{Stream, StreamExt, TryStreamExt};
use serde_json::{json, Value};
use tokio::io::AsyncReadExt;
use tokio::sync::{mpsc, oneshot};
use tokio_stream::wrappers::ReceiverStream;
use proxmox::tools::digest_to_hex;
use pbs_datastore::{CATALOG_NAME, CryptConfig};
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME};
use pbs_tools::format::HumanByte;
use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
use super::{H2Client, HttpClient};
pub struct BackupWriter {
h2: H2Client,
abort: AbortHandle,
verbose: bool,
crypt_config: Option<Arc<CryptConfig>>,
}
impl Drop for BackupWriter {
fn drop(&mut self) {
self.abort.abort();
}
}
pub struct BackupStats {
pub size: u64,
pub csum: [u8; 32],
}
/// Options for uploading blobs/streams to the server
#[derive(Default, Clone)]
pub struct UploadOptions {
pub previous_manifest: Option<Arc<BackupManifest>>,
pub compress: bool,
pub encrypt: bool,
pub fixed_size: Option<u64>,
}
struct UploadStats {
chunk_count: usize,
chunk_reused: usize,
size: usize,
size_reused: usize,
size_compressed: usize,
duration: std::time::Duration,
csum: [u8; 32],
}
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>;
type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
impl BackupWriter {
fn new(
h2: H2Client,
abort: AbortHandle,
crypt_config: Option<Arc<CryptConfig>>,
verbose: bool,
) -> Arc<Self> {
Arc::new(Self {
h2,
abort,
crypt_config,
verbose,
})
}
// FIXME: extract into (flattened) parameter struct?
#[allow(clippy::too_many_arguments)]
pub async fn start(
client: HttpClient,
crypt_config: Option<Arc<CryptConfig>>,
datastore: &str,
backup_type: &str,
backup_id: &str,
backup_time: i64,
debug: bool,
benchmark: bool,
) -> Result<Arc<BackupWriter>, Error> {
let param = json!({
"backup-type": backup_type,
"backup-id": backup_id,
"backup-time": backup_time,
"store": datastore,
"debug": debug,
"benchmark": benchmark
});
let req = HttpClient::request_builder(
client.server(),
client.port(),
"GET",
"/api2/json/backup",
Some(param),
)
.unwrap();
let (h2, abort) = client
.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
.await?;
Ok(BackupWriter::new(h2, abort, crypt_config, debug))
}
pub async fn get(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
self.h2.get(path, param).await
}
pub async fn put(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
self.h2.put(path, param).await
}
pub async fn post(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
self.h2.post(path, param).await
}
pub async fn upload_post(
&self,
path: &str,
param: Option<Value>,
content_type: &str,
data: Vec<u8>,
) -> Result<Value, Error> {
self.h2
.upload("POST", path, param, content_type, data)
.await
}
pub async fn send_upload_request(
&self,
method: &str,
path: &str,
param: Option<Value>,
content_type: &str,
data: Vec<u8>,
) -> Result<h2::client::ResponseFuture, Error> {
let request =
H2Client::request_builder("localhost", method, path, param, Some(content_type))
.unwrap();
let response_future = self
.h2
.send_request(request, Some(bytes::Bytes::from(data.clone())))
.await?;
Ok(response_future)
}
pub async fn upload_put(
&self,
path: &str,
param: Option<Value>,
content_type: &str,
data: Vec<u8>,
) -> Result<Value, Error> {
self.h2.upload("PUT", path, param, content_type, data).await
}
pub async fn finish(self: Arc<Self>) -> Result<(), Error> {
let h2 = self.h2.clone();
h2.post("finish", None)
.map_ok(move |_| {
self.abort.abort();
})
.await
}
pub fn cancel(&self) {
self.abort.abort();
}
pub async fn upload_blob<R: std::io::Read>(
&self,
mut reader: R,
file_name: &str,
) -> Result<BackupStats, Error> {
let mut raw_data = Vec::new();
// fixme: avoid loading into memory
reader.read_to_end(&mut raw_data)?;
let csum = openssl::sha::sha256(&raw_data);
let param = json!({"encoded-size": raw_data.len(), "file-name": file_name });
let size = raw_data.len() as u64;
let _value = self
.h2
.upload(
"POST",
"blob",
Some(param),
"application/octet-stream",
raw_data,
)
.await?;
Ok(BackupStats { size, csum })
}
pub async fn upload_blob_from_data(
&self,
data: Vec<u8>,
file_name: &str,
options: UploadOptions,
) -> Result<BackupStats, Error> {
let blob = match (options.encrypt, &self.crypt_config) {
(false, _) => DataBlob::encode(&data, None, options.compress)?,
(true, None) => bail!("requested encryption without a crypt config"),
(true, Some(crypt_config)) => {
DataBlob::encode(&data, Some(crypt_config), options.compress)?
}
};
let raw_data = blob.into_inner();
let size = raw_data.len() as u64;
let csum = openssl::sha::sha256(&raw_data);
let param = json!({"encoded-size": size, "file-name": file_name });
let _value = self
.h2
.upload(
"POST",
"blob",
Some(param),
"application/octet-stream",
raw_data,
)
.await?;
Ok(BackupStats { size, csum })
}
pub async fn upload_blob_from_file<P: AsRef<std::path::Path>>(
&self,
src_path: P,
file_name: &str,
options: UploadOptions,
) -> Result<BackupStats, Error> {
let src_path = src_path.as_ref();
let mut file = tokio::fs::File::open(src_path)
.await
.map_err(|err| format_err!("unable to open file {:?} - {}", src_path, err))?;
let mut contents = Vec::new();
file.read_to_end(&mut contents)
.await
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
self.upload_blob_from_data(contents, file_name, options)
.await
}
pub async fn upload_stream(
&self,
archive_name: &str,
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
options: UploadOptions,
) -> Result<BackupStats, Error> {
let known_chunks = Arc::new(Mutex::new(HashSet::new()));
let mut param = json!({ "archive-name": archive_name });
let prefix = if let Some(size) = options.fixed_size {
param["size"] = size.into();
"fixed"
} else {
"dynamic"
};
if options.encrypt && self.crypt_config.is_none() {
bail!("requested encryption without a crypt config");
}
let index_path = format!("{}_index", prefix);
let close_path = format!("{}_close", prefix);
if let Some(manifest) = options.previous_manifest {
// try, but ignore errors
match ArchiveType::from_path(archive_name) {
Ok(ArchiveType::FixedIndex) => {
let _ = self
.download_previous_fixed_index(
archive_name,
&manifest,
known_chunks.clone(),
)
.await;
}
Ok(ArchiveType::DynamicIndex) => {
let _ = self
.download_previous_dynamic_index(
archive_name,
&manifest,
known_chunks.clone(),
)
.await;
}
_ => { /* do nothing */ }
}
}
let wid = self
.h2
.post(&index_path, Some(param))
.await?
.as_u64()
.unwrap();
let upload_stats = Self::upload_chunk_info_stream(
self.h2.clone(),
wid,
stream,
&prefix,
known_chunks.clone(),
if options.encrypt {
self.crypt_config.clone()
} else {
None
},
options.compress,
self.verbose,
)
.await?;
let size_dirty = upload_stats.size - upload_stats.size_reused;
let size: HumanByte = upload_stats.size.into();
let archive = if self.verbose {
archive_name.to_string()
} else {
pbs_tools::format::strip_server_file_extension(archive_name)
};
if archive_name != CATALOG_NAME {
let speed: HumanByte =
((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into();
let size_dirty: HumanByte = size_dirty.into();
let size_compressed: HumanByte = upload_stats.size_compressed.into();
println!(
"{}: had to backup {} of {} (compressed {}) in {:.2}s",
archive,
size_dirty,
size,
size_compressed,
upload_stats.duration.as_secs_f64()
);
println!("{}: average backup speed: {}/s", archive, speed);
} else {
println!("Uploaded backup catalog ({})", size);
}
if upload_stats.size_reused > 0 && upload_stats.size > 1024 * 1024 {
let reused_percent = upload_stats.size_reused as f64 * 100. / upload_stats.size as f64;
let reused: HumanByte = upload_stats.size_reused.into();
println!(
"{}: backup was done incrementally, reused {} ({:.1}%)",
archive, reused, reused_percent
);
}
if self.verbose && upload_stats.chunk_count > 0 {
println!(
"{}: Reused {} from {} chunks.",
archive, upload_stats.chunk_reused, upload_stats.chunk_count
);
println!(
"{}: Average chunk size was {}.",
archive,
HumanByte::from(upload_stats.size / upload_stats.chunk_count)
);
println!(
"{}: Average time per request: {} microseconds.",
archive,
(upload_stats.duration.as_micros()) / (upload_stats.chunk_count as u128)
);
}
let param = json!({
"wid": wid ,
"chunk-count": upload_stats.chunk_count,
"size": upload_stats.size,
"csum": proxmox::tools::digest_to_hex(&upload_stats.csum),
});
let _value = self.h2.post(&close_path, Some(param)).await?;
Ok(BackupStats {
size: upload_stats.size as u64,
csum: upload_stats.csum,
})
}
fn response_queue(
verbose: bool,
) -> (
mpsc::Sender<h2::client::ResponseFuture>,
oneshot::Receiver<Result<(), Error>>,
) {
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
let (verify_result_tx, verify_result_rx) = oneshot::channel();
// FIXME: check if this works as expected as replacement for the combinator below?
// tokio::spawn(async move {
// let result: Result<(), Error> = (async move {
// while let Some(response) = verify_queue_rx.recv().await {
// match H2Client::h2api_response(response.await?).await {
// Ok(result) => println!("RESPONSE: {:?}", result),
// Err(err) => bail!("pipelined request failed: {}", err),
// }
// }
// Ok(())
// }).await;
// let _ignore_closed_channel = verify_result_tx.send(result);
// });
// old code for reference?
tokio::spawn(
ReceiverStream::new(verify_queue_rx)
.map(Ok::<_, Error>)
.try_for_each(move |response: h2::client::ResponseFuture| {
response
.map_err(Error::from)
.and_then(H2Client::h2api_response)
.map_ok(move |result| {
if verbose {
println!("RESPONSE: {:?}", result)
}
})
.map_err(|err| format_err!("pipelined request failed: {}", err))
})
.map(|result| {
let _ignore_closed_channel = verify_result_tx.send(result);
}),
);
(verify_queue_tx, verify_result_rx)
}
fn append_chunk_queue(
h2: H2Client,
wid: u64,
path: String,
verbose: bool,
) -> (UploadQueueSender, UploadResultReceiver) {
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(64);
let (verify_result_tx, verify_result_rx) = oneshot::channel();
// FIXME: async-block-ify this code!
tokio::spawn(
ReceiverStream::new(verify_queue_rx)
.map(Ok::<_, Error>)
.and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<h2::client::ResponseFuture>)| {
match (response, merged_chunk_info) {
(Some(response), MergedChunkInfo::Known(list)) => {
Either::Left(
response
.map_err(Error::from)
.and_then(H2Client::h2api_response)
.and_then(move |_result| {
future::ok(MergedChunkInfo::Known(list))
})
)
}
(None, MergedChunkInfo::Known(list)) => {
Either::Right(future::ok(MergedChunkInfo::Known(list)))
}
_ => unreachable!(),
}
})
.merge_known_chunks()
.and_then(move |merged_chunk_info| {
match merged_chunk_info {
MergedChunkInfo::Known(chunk_list) => {
let mut digest_list = vec![];
let mut offset_list = vec![];
for (offset, digest) in chunk_list {
digest_list.push(digest_to_hex(&digest));
offset_list.push(offset);
}
if verbose { println!("append chunks list len ({})", digest_list.len()); }
let param = json!({ "wid": wid, "digest-list": digest_list, "offset-list": offset_list });
let request = H2Client::request_builder("localhost", "PUT", &path, None, Some("application/json")).unwrap();
let param_data = bytes::Bytes::from(param.to_string().into_bytes());
let upload_data = Some(param_data);
h2.send_request(request, upload_data)
.and_then(move |response| {
response
.map_err(Error::from)
.and_then(H2Client::h2api_response)
.map_ok(|_| ())
})
.map_err(|err| format_err!("pipelined request failed: {}", err))
}
_ => unreachable!(),
}
})
.try_for_each(|_| future::ok(()))
.map(|result| {
let _ignore_closed_channel = verify_result_tx.send(result);
})
);
(verify_queue_tx, verify_result_rx)
}
pub async fn download_previous_fixed_index(
&self,
archive_name: &str,
manifest: &BackupManifest,
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
) -> Result<FixedIndexReader, Error> {
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
.custom_flags(libc::O_TMPFILE)
.open("/tmp")?;
let param = json!({ "archive-name": archive_name });
self.h2
.download("previous", Some(param), &mut tmpfile)
.await?;
let index = FixedIndexReader::new(tmpfile).map_err(|err| {
format_err!("unable to read fixed index '{}' - {}", archive_name, err)
})?;
// Note: do not use values stored in index (not trusted) - instead, computed them again
let (csum, size) = index.compute_csum();
manifest.verify_file(archive_name, &csum, size)?;
// add index chunks to known chunks
let mut known_chunks = known_chunks.lock().unwrap();
for i in 0..index.index_count() {
known_chunks.insert(*index.index_digest(i).unwrap());
}
if self.verbose {
println!(
"{}: known chunks list length is {}",
archive_name,
index.index_count()
);
}
Ok(index)
}
pub async fn download_previous_dynamic_index(
&self,
archive_name: &str,
manifest: &BackupManifest,
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
) -> Result<DynamicIndexReader, Error> {
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
.custom_flags(libc::O_TMPFILE)
.open("/tmp")?;
let param = json!({ "archive-name": archive_name });
self.h2
.download("previous", Some(param), &mut tmpfile)
.await?;
let index = DynamicIndexReader::new(tmpfile).map_err(|err| {
format_err!("unable to read dynmamic index '{}' - {}", archive_name, err)
})?;
// Note: do not use values stored in index (not trusted) - instead, computed them again
let (csum, size) = index.compute_csum();
manifest.verify_file(archive_name, &csum, size)?;
// add index chunks to known chunks
let mut known_chunks = known_chunks.lock().unwrap();
for i in 0..index.index_count() {
known_chunks.insert(*index.index_digest(i).unwrap());
}
if self.verbose {
println!(
"{}: known chunks list length is {}",
archive_name,
index.index_count()
);
}
Ok(index)
}
/// Retrieve backup time of last backup
pub async fn previous_backup_time(&self) -> Result<Option<i64>, Error> {
let data = self.h2.get("previous_backup_time", None).await?;
serde_json::from_value(data).map_err(|err| {
format_err!(
"Failed to parse backup time value returned by server - {}",
err
)
})
}
/// Download backup manifest (index.json) of last backup
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
let mut raw_data = Vec::with_capacity(64 * 1024);
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
self.h2
.download("previous", Some(param), &mut raw_data)
.await?;
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
// no expected digest available
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref), None)?;
let manifest =
BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
Ok(manifest)
}
// We have no `self` here for `h2` and `verbose`, the only other arg "common" with 1 other
// function in the same path is `wid`, so those 3 could be in a struct, but there's no real use
// since this is a private method.
#[allow(clippy::too_many_arguments)]
fn upload_chunk_info_stream(
h2: H2Client,
wid: u64,
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
prefix: &str,
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
crypt_config: Option<Arc<CryptConfig>>,
compress: bool,
verbose: bool,
) -> impl Future<Output = Result<UploadStats, Error>> {
let total_chunks = Arc::new(AtomicUsize::new(0));
let total_chunks2 = total_chunks.clone();
let known_chunk_count = Arc::new(AtomicUsize::new(0));
let known_chunk_count2 = known_chunk_count.clone();
let stream_len = Arc::new(AtomicUsize::new(0));
let stream_len2 = stream_len.clone();
let compressed_stream_len = Arc::new(AtomicU64::new(0));
let compressed_stream_len2 = compressed_stream_len.clone();
let reused_len = Arc::new(AtomicUsize::new(0));
let reused_len2 = reused_len.clone();
let append_chunk_path = format!("{}_index", prefix);
let upload_chunk_path = format!("{}_chunk", prefix);
let is_fixed_chunk_size = prefix == "fixed";
let (upload_queue, upload_result) =
Self::append_chunk_queue(h2.clone(), wid, append_chunk_path, verbose);
let start_time = std::time::Instant::now();
let index_csum = Arc::new(Mutex::new(Some(openssl::sha::Sha256::new())));
let index_csum_2 = index_csum.clone();
stream
.and_then(move |data| {
let chunk_len = data.len();
total_chunks.fetch_add(1, Ordering::SeqCst);
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
let mut chunk_builder = DataChunkBuilder::new(data.as_ref()).compress(compress);
if let Some(ref crypt_config) = crypt_config {
chunk_builder = chunk_builder.crypt_config(crypt_config);
}
let mut known_chunks = known_chunks.lock().unwrap();
let digest = chunk_builder.digest();
let mut guard = index_csum.lock().unwrap();
let csum = guard.as_mut().unwrap();
let chunk_end = offset + chunk_len as u64;
if !is_fixed_chunk_size {
csum.update(&chunk_end.to_le_bytes());
}
csum.update(digest);
let chunk_is_known = known_chunks.contains(digest);
if chunk_is_known {
known_chunk_count.fetch_add(1, Ordering::SeqCst);
reused_len.fetch_add(chunk_len, Ordering::SeqCst);
future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
} else {
let compressed_stream_len2 = compressed_stream_len.clone();
known_chunks.insert(*digest);
future::ready(chunk_builder.build().map(move |(chunk, digest)| {
compressed_stream_len2.fetch_add(chunk.raw_size(), Ordering::SeqCst);
MergedChunkInfo::New(ChunkInfo {
chunk,
digest,
chunk_len: chunk_len as u64,
offset,
})
}))
}
})
.merge_known_chunks()
.try_for_each(move |merged_chunk_info| {
let upload_queue = upload_queue.clone();
if let MergedChunkInfo::New(chunk_info) = merged_chunk_info {
let offset = chunk_info.offset;
let digest = chunk_info.digest;
let digest_str = digest_to_hex(&digest);
/* too verbose, needs finer verbosity setting granularity
if verbose {
println!("upload new chunk {} ({} bytes, offset {})", digest_str,
chunk_info.chunk_len, offset);
}
*/
let chunk_data = chunk_info.chunk.into_inner();
let param = json!({
"wid": wid,
"digest": digest_str,
"size": chunk_info.chunk_len,
"encoded-size": chunk_data.len(),
});
let ct = "application/octet-stream";
let request = H2Client::request_builder(
"localhost",
"POST",
&upload_chunk_path,
Some(param),
Some(ct),
)
.unwrap();
let upload_data = Some(bytes::Bytes::from(chunk_data));
let new_info = MergedChunkInfo::Known(vec![(offset, digest)]);
Either::Left(h2.send_request(request, upload_data).and_then(
move |response| async move {
upload_queue
.send((new_info, Some(response)))
.await
.map_err(|err| {
format_err!("failed to send to upload queue: {}", err)
})
},
))
} else {
Either::Right(async move {
upload_queue
.send((merged_chunk_info, None))
.await
.map_err(|err| format_err!("failed to send to upload queue: {}", err))
})
}
})
.then(move |result| async move { upload_result.await?.and(result) }.boxed())
.and_then(move |_| {
let duration = start_time.elapsed();
let chunk_count = total_chunks2.load(Ordering::SeqCst);
let chunk_reused = known_chunk_count2.load(Ordering::SeqCst);
let size = stream_len2.load(Ordering::SeqCst);
let size_reused = reused_len2.load(Ordering::SeqCst);
let size_compressed = compressed_stream_len2.load(Ordering::SeqCst) as usize;
let mut guard = index_csum_2.lock().unwrap();
let csum = guard.take().unwrap().finish();
futures::future::ok(UploadStats {
chunk_count,
chunk_reused,
size,
size_reused,
size_compressed,
duration,
csum,
})
})
}
/// Upload speed test - prints result to stderr
pub async fn upload_speedtest(&self, verbose: bool) -> Result<f64, Error> {
let mut data = vec![];
// generate pseudo random byte sequence
for i in 0..1024 * 1024 {
for j in 0..4 {
let byte = ((i >> (j << 3)) & 0xff) as u8;
data.push(byte);
}
}
let item_len = data.len();
let mut repeat = 0;
let (upload_queue, upload_result) = Self::response_queue(verbose);
let start_time = std::time::Instant::now();
loop {
repeat += 1;
if start_time.elapsed().as_secs() >= 5 {
break;
}
if verbose {
eprintln!("send test data ({} bytes)", data.len());
}
let request =
H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
let request_future = self
.h2
.send_request(request, Some(bytes::Bytes::from(data.clone())))
.await?;
upload_queue.send(request_future).await?;
}
drop(upload_queue); // close queue
let _ = upload_result.await?;
eprintln!(
"Uploaded {} chunks in {} seconds.",
repeat,
start_time.elapsed().as_secs()
);
let speed = ((item_len * (repeat as usize)) as f64) / start_time.elapsed().as_secs_f64();
eprintln!(
"Time per request: {} microseconds.",
(start_time.elapsed().as_micros()) / (repeat as u128)
);
Ok(speed)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,97 +0,0 @@
use std::pin::Pin;
use std::task::{Context, Poll};
use anyhow::Error;
use futures::{ready, Stream};
use pin_project::pin_project;
use pbs_datastore::data_blob::ChunkInfo;
pub enum MergedChunkInfo {
Known(Vec<(u64, [u8; 32])>),
New(ChunkInfo),
}
pub trait MergeKnownChunks: Sized {
fn merge_known_chunks(self) -> MergeKnownChunksQueue<Self>;
}
#[pin_project]
pub struct MergeKnownChunksQueue<S> {
#[pin]
input: S,
buffer: Option<MergedChunkInfo>,
}
impl<S> MergeKnownChunks for S
where
S: Stream<Item = Result<MergedChunkInfo, Error>>,
{
fn merge_known_chunks(self) -> MergeKnownChunksQueue<Self> {
MergeKnownChunksQueue {
input: self,
buffer: None,
}
}
}
impl<S> Stream for MergeKnownChunksQueue<S>
where
S: Stream<Item = Result<MergedChunkInfo, Error>>,
{
type Item = Result<MergedChunkInfo, Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
let mut this = self.project();
loop {
match ready!(this.input.as_mut().poll_next(cx)) {
Some(Err(err)) => return Poll::Ready(Some(Err(err))),
None => {
if let Some(last) = this.buffer.take() {
return Poll::Ready(Some(Ok(last)));
} else {
return Poll::Ready(None);
}
}
Some(Ok(mergerd_chunk_info)) => {
match mergerd_chunk_info {
MergedChunkInfo::Known(list) => {
let last = this.buffer.take();
match last {
None => {
*this.buffer = Some(MergedChunkInfo::Known(list));
// continue
}
Some(MergedChunkInfo::Known(mut last_list)) => {
last_list.extend_from_slice(&list);
let len = last_list.len();
*this.buffer = Some(MergedChunkInfo::Known(last_list));
if len >= 64 {
return Poll::Ready(this.buffer.take().map(Ok));
}
// continue
}
Some(MergedChunkInfo::New(_)) => {
*this.buffer = Some(MergedChunkInfo::Known(list));
return Poll::Ready(last.map(Ok));
}
}
}
MergedChunkInfo::New(chunk_info) => {
let new = MergedChunkInfo::New(chunk_info);
if let Some(last) = this.buffer.take() {
*this.buffer = Some(new);
return Poll::Ready(Some(Ok(last)));
} else {
return Poll::Ready(Some(Ok(new)));
}
}
}
}
}
}
}
}

View File

@ -1,66 +0,0 @@
//! Client side interface to the proxmox backup server
//!
//! This library implements the client side to access the backups
//! server using https.
use anyhow::Error;
use pbs_api_types::{Authid, Userid};
use pbs_tools::ticket::Ticket;
use pbs_tools::cert::CertInfo;
use pbs_tools::auth::private_auth_key;
mod merge_known_chunks;
pub mod pipe_to_stream;
mod http_client;
pub use http_client::*;
mod vsock_client;
pub use vsock_client::*;
mod task_log;
pub use task_log::*;
mod backup_reader;
pub use backup_reader::*;
mod backup_writer;
pub use backup_writer::*;
mod remote_chunk_reader;
pub use remote_chunk_reader::*;
mod pxar_backup_stream;
pub use pxar_backup_stream::*;
mod backup_repo;
pub use backup_repo::*;
mod backup_specification;
pub use backup_specification::*;
pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
/// Connect to localhost:8007 as root@pam
///
/// This automatically creates a ticket if run as 'root' user.
pub fn connect_to_localhost() -> Result<HttpClient, Error> {
let uid = nix::unistd::Uid::current();
let client = if uid.is_root() {
let ticket = Ticket::new("PBS", Userid::root_userid())?
.sign(private_auth_key(), None)?;
let fingerprint = CertInfo::new()?.fingerprint()?;
let options = HttpClientOptions::new_non_interactive(ticket, Some(fingerprint));
HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
} else {
let options = HttpClientOptions::new_interactive(None, None);
HttpClient::new("localhost", 8007, Authid::root_auth_id(), options)?
};
Ok(client)
}

View File

@ -1,70 +0,0 @@
// Implement simple flow control for h2 client
//
// See also: hyper/src/proto/h2/mod.rs
use std::pin::Pin;
use std::task::{Context, Poll};
use anyhow::{format_err, Error};
use bytes::Bytes;
use futures::{ready, Future};
use h2::SendStream;
pub struct PipeToSendStream {
body_tx: SendStream<Bytes>,
data: Option<Bytes>,
}
impl PipeToSendStream {
pub fn new(data: Bytes, tx: SendStream<Bytes>) -> PipeToSendStream {
PipeToSendStream {
body_tx: tx,
data: Some(data),
}
}
}
impl Future for PipeToSendStream {
type Output = Result<(), Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let this = self.get_mut();
if this.data != None {
// just reserve 1 byte to make sure there's some
// capacity available. h2 will handle the capacity
// management for the actual body chunk.
this.body_tx.reserve_capacity(1);
if this.body_tx.capacity() == 0 {
loop {
match ready!(this.body_tx.poll_capacity(cx)) {
Some(Err(err)) => return Poll::Ready(Err(Error::from(err))),
Some(Ok(0)) => {}
Some(Ok(_)) => break,
None => return Poll::Ready(Err(format_err!("protocol canceled"))),
}
}
} else if let Poll::Ready(reset) = this.body_tx.poll_reset(cx) {
return Poll::Ready(Err(match reset {
Ok(reason) => format_err!("stream received RST_STREAM: {:?}", reason),
Err(err) => Error::from(err),
}));
}
this.body_tx
.send_data(this.data.take().unwrap(), true)
.map_err(Error::from)?;
Poll::Ready(Ok(()))
} else {
if let Poll::Ready(reset) = this.body_tx.poll_reset(cx) {
return Poll::Ready(Err(match reset {
Ok(reason) => format_err!("stream received RST_STREAM: {:?}", reason),
Err(err) => Error::from(err),
}));
}
Poll::Ready(Ok(()))
}
}
}

View File

@ -1,125 +0,0 @@
use std::io::Write;
//use std::os::unix::io::FromRawFd;
use std::path::Path;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{Context, Poll};
use anyhow::{format_err, Error};
use futures::stream::Stream;
use futures::future::{Abortable, AbortHandle};
use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
use pbs_datastore::catalog::CatalogWriter;
use pbs_tools::sync::StdChannelWriter;
use pbs_tools::tokio::TokioWriterAdapter;
/// Stream implementation to encode and upload .pxar archives.
///
/// The hyper client needs an async Stream for file upload, so we
/// spawn an extra thread to encode the .pxar data and pipe it to the
/// consumer.
pub struct PxarBackupStream {
rx: Option<std::sync::mpsc::Receiver<Result<Vec<u8>, Error>>>,
handle: Option<AbortHandle>,
error: Arc<Mutex<Option<String>>>,
}
impl Drop for PxarBackupStream {
fn drop(&mut self) {
self.rx = None;
self.handle.take().unwrap().abort();
}
}
impl PxarBackupStream {
pub fn new<W: Write + Send + 'static>(
dir: Dir,
catalog: Arc<Mutex<CatalogWriter<W>>>,
options: crate::pxar::PxarCreateOptions,
) -> Result<Self, Error> {
let (tx, rx) = std::sync::mpsc::sync_channel(10);
let buffer_size = 256 * 1024;
let error = Arc::new(Mutex::new(None));
let error2 = Arc::clone(&error);
let handler = async move {
let writer = TokioWriterAdapter::new(std::io::BufWriter::with_capacity(
buffer_size,
StdChannelWriter::new(tx),
));
let verbose = options.verbose;
let writer = pxar::encoder::sync::StandardWriter::new(writer);
if let Err(err) = crate::pxar::create_archive(
dir,
writer,
crate::pxar::Flags::DEFAULT,
move |path| {
if verbose {
println!("{:?}", path);
}
Ok(())
},
Some(catalog),
options,
).await {
let mut error = error2.lock().unwrap();
*error = Some(err.to_string());
}
};
let (handle, registration) = AbortHandle::new_pair();
let future = Abortable::new(handler, registration);
tokio::spawn(future);
Ok(Self {
rx: Some(rx),
handle: Some(handle),
error,
})
}
pub fn open<W: Write + Send + 'static>(
dirname: &Path,
catalog: Arc<Mutex<CatalogWriter<W>>>,
options: crate::pxar::PxarCreateOptions,
) -> Result<Self, Error> {
let dir = nix::dir::Dir::open(dirname, OFlag::O_DIRECTORY, Mode::empty())?;
Self::new(
dir,
catalog,
options,
)
}
}
impl Stream for PxarBackupStream {
type Item = Result<Vec<u8>, Error>;
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
{
// limit lock scope
let error = self.error.lock().unwrap();
if let Some(ref msg) = *error {
return Poll::Ready(Some(Err(format_err!("{}", msg))));
}
}
match pbs_runtime::block_in_place(|| self.rx.as_ref().unwrap().recv()) {
Ok(data) => Poll::Ready(Some(data)),
Err(_) => {
let error = self.error.lock().unwrap();
if let Some(ref msg) = *error {
return Poll::Ready(Some(Err(format_err!("{}", msg))));
}
Poll::Ready(None) // channel closed, no error
}
}
}
}

View File

@ -1,125 +0,0 @@
use std::future::Future;
use std::collections::HashMap;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use anyhow::{bail, Error};
use pbs_datastore::{CryptConfig, CryptMode};
use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::read_chunk::ReadChunk;
use pbs_datastore::read_chunk::AsyncReadChunk;
use pbs_runtime::block_on;
use super::BackupReader;
/// Read chunks from remote host using ``BackupReader``
#[derive(Clone)]
pub struct RemoteChunkReader {
client: Arc<BackupReader>,
crypt_config: Option<Arc<CryptConfig>>,
crypt_mode: CryptMode,
cache_hint: Arc<HashMap<[u8; 32], usize>>,
cache: Arc<Mutex<HashMap<[u8; 32], Vec<u8>>>>,
}
impl RemoteChunkReader {
/// Create a new instance.
///
/// Chunks listed in ``cache_hint`` are cached and kept in RAM.
pub fn new(
client: Arc<BackupReader>,
crypt_config: Option<Arc<CryptConfig>>,
crypt_mode: CryptMode,
cache_hint: HashMap<[u8; 32], usize>,
) -> Self {
Self {
client,
crypt_config,
crypt_mode,
cache_hint: Arc::new(cache_hint),
cache: Arc::new(Mutex::new(HashMap::new())),
}
}
/// Downloads raw chunk. This only verifies the (untrusted) CRC32, use
/// DataBlob::verify_unencrypted or DataBlob::decode before storing/processing further.
pub async fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
self.client
.download_chunk(&digest, &mut chunk_data)
.await?;
let chunk = DataBlob::load_from_reader(&mut &chunk_data[..])?;
match self.crypt_mode {
CryptMode::Encrypt => {
match chunk.crypt_mode()? {
CryptMode::Encrypt => Ok(chunk),
CryptMode::SignOnly | CryptMode::None => bail!("Index and chunk CryptMode don't match."),
}
},
CryptMode::SignOnly | CryptMode::None => {
match chunk.crypt_mode()? {
CryptMode::Encrypt => bail!("Index and chunk CryptMode don't match."),
CryptMode::SignOnly | CryptMode::None => Ok(chunk),
}
},
}
}
}
impl ReadChunk for RemoteChunkReader {
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
block_on(Self::read_raw_chunk(self, digest))
}
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
return Ok(raw_data.to_vec());
}
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
let use_cache = self.cache_hint.contains_key(digest);
if use_cache {
(*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
}
Ok(raw_data)
}
}
impl AsyncReadChunk for RemoteChunkReader {
fn read_raw_chunk<'a>(
&'a self,
digest: &'a [u8; 32],
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
Box::pin(Self::read_raw_chunk(self, digest))
}
fn read_chunk<'a>(
&'a self,
digest: &'a [u8; 32],
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
Box::pin(async move {
if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
return Ok(raw_data.to_vec());
}
let chunk = Self::read_raw_chunk(self, digest).await?;
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
let use_cache = self.cache_hint.contains_key(digest);
if use_cache {
(*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
}
Ok(raw_data)
})
}
}

View File

@ -1,117 +0,0 @@
use std::sync::{Arc, atomic::{AtomicUsize, Ordering}};
use anyhow::{bail, Error};
use serde_json::{json, Value};
use tokio::signal::unix::{signal, SignalKind};
use futures::*;
use proxmox::api::cli::format_and_print_result;
use pbs_tools::percent_encoding::percent_encode_component;
use super::HttpClient;
/// Display task log on console
///
/// This polls the task API and prints the log to the console. It also
/// catches interrupt signals, and sends a abort request to the task if
/// the user presses CTRL-C. Two interrupts cause an immediate end of
/// the loop. The task may still run in that case.
pub async fn display_task_log(
client: &mut HttpClient,
upid_str: &str,
strip_date: bool,
) -> Result<(), Error> {
let mut signal_stream = signal(SignalKind::interrupt())?;
let abort_count = Arc::new(AtomicUsize::new(0));
let abort_count2 = Arc::clone(&abort_count);
let abort_future = async move {
while signal_stream.recv().await.is_some() {
println!("got shutdown request (SIGINT)");
let prev_count = abort_count2.fetch_add(1, Ordering::SeqCst);
if prev_count >= 1 {
println!("forced exit (task still running)");
break;
}
}
Ok::<_, Error>(())
};
let request_future = async move {
let mut start = 1;
let limit = 500;
loop {
let abort = abort_count.load(Ordering::Relaxed);
if abort > 0 {
let path = format!("api2/json/nodes/localhost/tasks/{}", percent_encode_component(upid_str));
let _ = client.delete(&path, None).await?;
}
let param = json!({ "start": start, "limit": limit, "test-status": true });
let path = format!("api2/json/nodes/localhost/tasks/{}/log", percent_encode_component(upid_str));
let result = client.get(&path, Some(param)).await?;
let active = result["active"].as_bool().unwrap();
let total = result["total"].as_u64().unwrap();
let data = result["data"].as_array().unwrap();
let lines = data.len();
for item in data {
let n = item["n"].as_u64().unwrap();
let t = item["t"].as_str().unwrap();
if n != start { bail!("got wrong line number in response data ({} != {}", n, start); }
if strip_date && t.len() > 27 && &t[25..27] == ": " {
let line = &t[27..];
println!("{}", line);
} else {
println!("{}", t);
}
start += 1;
}
if start > total {
if active {
tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await;
} else {
break;
}
} else if lines != limit {
bail!("got wrong number of lines from server ({} != {})", lines, limit);
}
}
Ok(())
};
futures::select!{
request = request_future.fuse() => request?,
abort = abort_future.fuse() => abort?,
};
Ok(())
}
/// Display task result (upid), or view task log - depending on output format
pub async fn view_task_result(
client: &mut HttpClient,
result: Value,
output_format: &str,
) -> Result<(), Error> {
let data = &result["data"];
if output_format == "text" {
if let Some(upid) = data.as_str() {
display_task_log(client, upid, true).await?;
}
} else {
format_and_print_result(&data, &output_format);
}
Ok(())
}

View File

@ -1,256 +0,0 @@
use std::pin::Pin;
use std::task::{Context, Poll};
use anyhow::{bail, format_err, Error};
use futures::*;
use http::Uri;
use http::{Request, Response};
use hyper::client::connect::{Connected, Connection};
use hyper::client::Client;
use hyper::Body;
use pin_project::pin_project;
use serde_json::Value;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
use tokio::net::UnixStream;
use proxmox::api::error::HttpError;
pub const DEFAULT_VSOCK_PORT: u16 = 807;
#[derive(Clone)]
struct VsockConnector;
#[pin_project]
/// Wrapper around UnixStream so we can implement hyper::client::connect::Connection
struct UnixConnection {
#[pin]
stream: UnixStream,
}
impl tower_service::Service<Uri> for VsockConnector {
type Response = UnixConnection;
type Error = Error;
type Future = Pin<Box<dyn Future<Output = Result<UnixConnection, Error>> + Send>>;
fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, dst: Uri) -> Self::Future {
use nix::sys::socket::*;
use std::os::unix::io::FromRawFd;
// connect can block, so run in blocking task (though in reality it seems to immediately
// return with either ENODEV or ETIMEDOUT in case of error)
tokio::task::spawn_blocking(move || {
if dst.scheme_str().unwrap_or_default() != "vsock" {
bail!("invalid URI (scheme) for vsock connector: {}", dst);
}
let cid = match dst.host() {
Some(host) => host.parse().map_err(|err| {
format_err!(
"invalid URI (host not a number) for vsock connector: {} ({})",
dst,
err
)
})?,
None => bail!("invalid URI (no host) for vsock connector: {}", dst),
};
let port = match dst.port_u16() {
Some(port) => port,
None => bail!("invalid URI (bad port) for vsock connector: {}", dst),
};
let sock_fd = socket(
AddressFamily::Vsock,
SockType::Stream,
SockFlag::empty(),
None,
)?;
let sock_addr = VsockAddr::new(cid, port as u32);
connect(sock_fd, &SockAddr::Vsock(sock_addr))?;
// connect sync, but set nonblock after (tokio requires it)
let std_stream = unsafe { std::os::unix::net::UnixStream::from_raw_fd(sock_fd) };
std_stream.set_nonblocking(true)?;
let stream = tokio::net::UnixStream::from_std(std_stream)?;
let connection = UnixConnection { stream };
Ok(connection)
})
// unravel the thread JoinHandle to a usable future
.map(|res| match res {
Ok(res) => res,
Err(err) => Err(format_err!("thread join error on vsock connect: {}", err)),
})
.boxed()
}
}
impl Connection for UnixConnection {
fn connected(&self) -> Connected {
Connected::new()
}
}
impl AsyncRead for UnixConnection {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf,
) -> Poll<Result<(), std::io::Error>> {
let this = self.project();
this.stream.poll_read(cx, buf)
}
}
impl AsyncWrite for UnixConnection {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<tokio::io::Result<usize>> {
let this = self.project();
this.stream.poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<tokio::io::Result<()>> {
let this = self.project();
this.stream.poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<tokio::io::Result<()>> {
let this = self.project();
this.stream.poll_shutdown(cx)
}
}
/// Slimmed down version of HttpClient for virtio-vsock connections (file restore daemon)
pub struct VsockClient {
client: Client<VsockConnector>,
cid: i32,
port: u16,
auth: Option<String>,
}
impl VsockClient {
pub fn new(cid: i32, port: u16, auth: Option<String>) -> Self {
let conn = VsockConnector {};
let client = Client::builder().build::<_, Body>(conn);
Self {
client,
cid,
port,
auth,
}
}
pub async fn get(&self, path: &str, data: Option<Value>) -> Result<Value, Error> {
let req = self.request_builder("GET", path, data)?;
self.api_request(req).await
}
pub async fn post(&self, path: &str, data: Option<Value>) -> Result<Value, Error> {
let req = self.request_builder("POST", path, data)?;
self.api_request(req).await
}
pub async fn download(
&self,
path: &str,
data: Option<Value>,
output: &mut (dyn AsyncWrite + Send + Unpin),
) -> Result<(), Error> {
let req = self.request_builder("GET", path, data)?;
let client = self.client.clone();
let resp = client
.request(req)
.await
.map_err(|_| format_err!("vsock download request timed out"))?;
let status = resp.status();
if !status.is_success() {
Self::api_response(resp).await.map(|_| ())?
} else {
resp.into_body()
.map_err(Error::from)
.try_fold(output, move |acc, chunk| async move {
acc.write_all(&chunk).await?;
Ok::<_, Error>(acc)
})
.await?;
}
Ok(())
}
async fn api_response(response: Response<Body>) -> Result<Value, Error> {
let status = response.status();
let data = hyper::body::to_bytes(response.into_body()).await?;
let text = String::from_utf8(data.to_vec()).unwrap();
if status.is_success() {
if text.is_empty() {
Ok(Value::Null)
} else {
let value: Value = serde_json::from_str(&text)?;
Ok(value)
}
} else {
Err(Error::from(HttpError::new(status, text)))
}
}
async fn api_request(&self, req: Request<Body>) -> Result<Value, Error> {
self.client
.request(req)
.map_err(Error::from)
.and_then(Self::api_response)
.await
}
fn request_builder(
&self,
method: &str,
path: &str,
data: Option<Value>,
) -> Result<Request<Body>, Error> {
let path = path.trim_matches('/');
let url: Uri = format!("vsock://{}:{}/{}", self.cid, self.port, path).parse()?;
let make_builder = |content_type: &str, url: &Uri| {
let mut builder = Request::builder()
.method(method)
.uri(url)
.header(hyper::header::CONTENT_TYPE, content_type);
if let Some(auth) = &self.auth {
builder = builder.header(hyper::header::AUTHORIZATION, auth);
}
builder
};
if let Some(data) = data {
if method == "POST" {
let builder = make_builder("application/json", &url);
let request = builder.body(Body::from(data.to_string()))?;
return Ok(request);
} else {
let query = pbs_tools::json::json_object_to_query(data)?;
let url: Uri =
format!("vsock://{}:{}/{}?{}", self.cid, self.port, path, query).parse()?;
let builder = make_builder("application/x-www-form-urlencoded", &url);
let request = builder.body(Body::empty())?;
return Ok(request);
}
}
let builder = make_builder("application/x-www-form-urlencoded", &url);
let request = builder.body(Body::empty())?;
Ok(request)
}
}

View File

@ -3,10 +3,8 @@ use std::sync::{Arc, RwLock};
use anyhow::{bail, Error};
use lazy_static::lazy_static;
use serde::{Serialize, Deserialize};
use proxmox::api::{
api,
schema::*,
section_config::{
SectionConfig,
@ -17,154 +15,18 @@ use proxmox::api::{
use proxmox::tools::{fs::replace_file, fs::CreateOptions};
use crate::api2::types::*;
use pbs_api_types::{Authid, Userid};
pub use pbs_api_types::{ApiToken, User};
pub use pbs_api_types::{
EMAIL_SCHEMA, ENABLE_USER_SCHEMA, EXPIRE_USER_SCHEMA, FIRST_NAME_SCHEMA, LAST_NAME_SCHEMA,
};
use crate::tools::Memcom;
lazy_static! {
pub static ref CONFIG: SectionConfig = init();
}
pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new(
"Enable the account (default). You can set this to '0' to disable the account.")
.default(true)
.schema();
pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new(
"Account expiration date (seconds since epoch). '0' means no expiration date.")
.default(0)
.minimum(0)
.schema();
pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(2)
.max_length(64)
.schema();
pub const LAST_NAME_SCHEMA: Schema = StringSchema::new("Last name.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(2)
.max_length(64)
.schema();
pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(2)
.max_length(64)
.schema();
#[api(
properties: {
tokenid: {
schema: PROXMOX_TOKEN_ID_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
optional: true,
schema: ENABLE_USER_SCHEMA,
},
expire: {
optional: true,
schema: EXPIRE_USER_SCHEMA,
},
}
)]
#[derive(Serialize,Deserialize)]
/// ApiToken properties.
pub struct ApiToken {
pub tokenid: Authid,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub enable: Option<bool>,
#[serde(skip_serializing_if="Option::is_none")]
pub expire: Option<i64>,
}
impl ApiToken {
pub fn is_active(&self) -> bool {
if !self.enable.unwrap_or(true) {
return false;
}
if let Some(expire) = self.expire {
let now = proxmox::tools::time::epoch_i64();
if expire > 0 && expire <= now {
return false;
}
}
true
}
}
#[api(
properties: {
userid: {
type: Userid,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
optional: true,
schema: ENABLE_USER_SCHEMA,
},
expire: {
optional: true,
schema: EXPIRE_USER_SCHEMA,
},
firstname: {
optional: true,
schema: FIRST_NAME_SCHEMA,
},
lastname: {
schema: LAST_NAME_SCHEMA,
optional: true,
},
email: {
schema: EMAIL_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize,Deserialize)]
/// User properties.
pub struct User {
pub userid: Userid,
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub enable: Option<bool>,
#[serde(skip_serializing_if="Option::is_none")]
pub expire: Option<i64>,
#[serde(skip_serializing_if="Option::is_none")]
pub firstname: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub lastname: Option<String>,
#[serde(skip_serializing_if="Option::is_none")]
pub email: Option<String>,
}
impl User {
pub fn is_active(&self) -> bool {
if !self.enable.unwrap_or(true) {
return false;
}
if let Some(expire) = self.expire {
let now = proxmox::tools::time::epoch_i64();
if expire > 0 && expire <= now {
return false;
}
}
true
}
}
fn init() -> SectionConfig {
let mut config = SectionConfig::new(&Authid::API_SCHEMA);

View File

@ -9,8 +9,6 @@ pub mod tools;
#[macro_use]
pub mod server;
pub mod pxar;
#[macro_use]
pub mod backup;
@ -18,8 +16,6 @@ pub mod config;
pub mod api2;
pub mod client;
pub mod auth_helpers;
pub mod auth;

File diff suppressed because it is too large Load Diff

View File

@ -1,162 +0,0 @@
use std::ffi::OsString;
use std::os::unix::io::{AsRawFd, RawFd};
use std::path::{Path, PathBuf};
use anyhow::{bail, format_err, Error};
use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::{mkdirat, Mode};
use proxmox::sys::error::SysError;
use proxmox::tools::fd::BorrowedFd;
use pxar::Metadata;
use crate::pxar::tools::{assert_single_path_component, perms_from_metadata};
pub struct PxarDir {
file_name: OsString,
metadata: Metadata,
dir: Option<Dir>,
}
impl PxarDir {
pub fn new(file_name: OsString, metadata: Metadata) -> Self {
Self {
file_name,
metadata,
dir: None,
}
}
pub fn with_dir(dir: Dir, metadata: Metadata) -> Self {
Self {
file_name: OsString::from("."),
metadata,
dir: Some(dir),
}
}
fn create_dir(
&mut self,
parent: RawFd,
allow_existing_dirs: bool,
) -> Result<BorrowedFd, Error> {
match mkdirat(
parent,
self.file_name.as_os_str(),
perms_from_metadata(&self.metadata)?,
) {
Ok(()) => (),
Err(err) => {
if !(allow_existing_dirs && err.already_exists()) {
return Err(err.into());
}
}
}
self.open_dir(parent)
}
fn open_dir(&mut self, parent: RawFd) -> Result<BorrowedFd, Error> {
let dir = Dir::openat(
parent,
self.file_name.as_os_str(),
OFlag::O_DIRECTORY,
Mode::empty(),
)?;
let fd = BorrowedFd::new(&dir);
self.dir = Some(dir);
Ok(fd)
}
pub fn try_as_borrowed_fd(&self) -> Option<BorrowedFd> {
self.dir.as_ref().map(BorrowedFd::new)
}
pub fn metadata(&self) -> &Metadata {
&self.metadata
}
}
pub struct PxarDirStack {
dirs: Vec<PxarDir>,
path: PathBuf,
created: usize,
}
impl PxarDirStack {
pub fn new(root: Dir, metadata: Metadata) -> Self {
Self {
dirs: vec![PxarDir::with_dir(root, metadata)],
path: PathBuf::from("/"),
created: 1, // the root directory exists
}
}
pub fn is_empty(&self) -> bool {
self.dirs.is_empty()
}
pub fn push(&mut self, file_name: OsString, metadata: Metadata) -> Result<(), Error> {
assert_single_path_component(&file_name)?;
self.path.push(&file_name);
self.dirs.push(PxarDir::new(file_name, metadata));
Ok(())
}
pub fn pop(&mut self) -> Result<Option<PxarDir>, Error> {
let out = self.dirs.pop();
if !self.path.pop() {
if self.path.as_os_str() == "/" {
// we just finished the root directory, make sure this can only happen once:
self.path = PathBuf::new();
} else {
bail!("lost track of path");
}
}
self.created = self.created.min(self.dirs.len());
Ok(out)
}
pub fn last_dir_fd(&mut self, allow_existing_dirs: bool) -> Result<BorrowedFd, Error> {
// should not be possible given the way we use it:
assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
let dirs_len = self.dirs.len();
let mut fd = self.dirs[self.created - 1]
.try_as_borrowed_fd()
.ok_or_else(|| format_err!("lost track of directory file descriptors"))?
.as_raw_fd();
while self.created < dirs_len {
fd = self.dirs[self.created]
.create_dir(fd, allow_existing_dirs)?
.as_raw_fd();
self.created += 1;
}
self.dirs[self.created - 1]
.try_as_borrowed_fd()
.ok_or_else(|| format_err!("lost track of directory file descriptors"))
}
pub fn create_last_dir(&mut self, allow_existing_dirs: bool) -> Result<(), Error> {
let _: BorrowedFd = self.last_dir_fd(allow_existing_dirs)?;
Ok(())
}
pub fn root_dir_fd(&self) -> Result<BorrowedFd, Error> {
// should not be possible given the way we use it:
assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
self.dirs[0]
.try_as_borrowed_fd()
.ok_or_else(|| format_err!("lost track of directory file descriptors"))
}
pub fn path(&self) -> &Path {
&self.path
}
}

View File

@ -1,864 +0,0 @@
//! Code for extraction of pxar contents onto the file system.
use std::convert::TryFrom;
use std::ffi::{CStr, CString, OsStr, OsString};
use std::io;
use std::os::unix::ffi::OsStrExt;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use std::pin::Pin;
use futures::future::Future;
use anyhow::{bail, format_err, Error};
use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
use pathpatterns::{MatchEntry, MatchList, MatchType};
use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
use pxar::decoder::aio::Decoder;
use pxar::format::Device;
use pxar::{Entry, EntryKind, Metadata};
use proxmox::c_result;
use proxmox::tools::{
fs::{create_path, CreateOptions},
io::{sparse_copy, sparse_copy_async},
};
use crate::pxar::dir_stack::PxarDirStack;
use crate::pxar::metadata;
use crate::pxar::Flags;
use crate::tools::zip::{ZipEncoder, ZipEntry};
pub struct PxarExtractOptions<'a> {
pub match_list: &'a[MatchEntry],
pub extract_match_default: bool,
pub allow_existing_dirs: bool,
pub on_error: Option<ErrorHandler>,
}
pub type ErrorHandler = Box<dyn FnMut(Error) -> Result<(), Error> + Send>;
pub fn extract_archive<T, F>(
mut decoder: pxar::decoder::Decoder<T>,
destination: &Path,
feature_flags: Flags,
mut callback: F,
options: PxarExtractOptions,
) -> Result<(), Error>
where
T: pxar::decoder::SeqRead,
F: FnMut(&Path),
{
// we use this to keep track of our directory-traversal
decoder.enable_goodbye_entries(true);
let root = decoder
.next()
.ok_or_else(|| format_err!("found empty pxar archive"))?
.map_err(|err| format_err!("error reading pxar archive: {}", err))?;
if !root.is_dir() {
bail!("pxar archive does not start with a directory entry!");
}
create_path(
&destination,
None,
Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
)
.map_err(|err| format_err!("error creating directory {:?}: {}", destination, err))?;
let dir = Dir::open(
destination,
OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
Mode::empty(),
)
.map_err(|err| format_err!("unable to open target directory {:?}: {}", destination, err,))?;
let mut extractor = Extractor::new(
dir,
root.metadata().clone(),
options.allow_existing_dirs,
feature_flags,
);
if let Some(on_error) = options.on_error {
extractor.on_error(on_error);
}
let mut match_stack = Vec::new();
let mut err_path_stack = vec![OsString::from("/")];
let mut current_match = options.extract_match_default;
while let Some(entry) = decoder.next() {
let entry = entry.map_err(|err| format_err!("error reading pxar archive: {}", err))?;
let file_name_os = entry.file_name();
// safety check: a file entry in an archive must never contain slashes:
if file_name_os.as_bytes().contains(&b'/') {
bail!("archive file entry contains slashes, which is invalid and a security concern");
}
let file_name = CString::new(file_name_os.as_bytes())
.map_err(|_| format_err!("encountered file name with null-bytes"))?;
let metadata = entry.metadata();
extractor.set_path(entry.path().as_os_str().to_owned());
let match_result = options.match_list.matches(
entry.path().as_os_str().as_bytes(),
Some(metadata.file_type() as u32),
);
let did_match = match match_result {
Some(MatchType::Include) => true,
Some(MatchType::Exclude) => false,
None => current_match,
};
match (did_match, entry.kind()) {
(_, EntryKind::Directory) => {
callback(entry.path());
let create = current_match && match_result != Some(MatchType::Exclude);
extractor
.enter_directory(file_name_os.to_owned(), metadata.clone(), create)
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
// We're starting a new directory, push our old matching state and replace it with
// our new one:
match_stack.push(current_match);
current_match = did_match;
// When we hit the goodbye table we'll try to apply metadata to the directory, but
// the Goodbye entry will not contain the path, so push it to our path stack for
// error messages:
err_path_stack.push(extractor.clone_path());
Ok(())
}
(_, EntryKind::GoodbyeTable) => {
// go up a directory
extractor.set_path(err_path_stack.pop().ok_or_else(|| {
format_err!(
"error at entry {:?}: unexpected end of directory",
file_name_os
)
})?);
extractor
.leave_directory()
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
// We left a directory, also get back our previous matching state. This is in sync
// with `dir_stack` so this should never be empty except for the final goodbye
// table, in which case we get back to the default of `true`.
current_match = match_stack.pop().unwrap_or(true);
Ok(())
}
(true, EntryKind::Symlink(link)) => {
callback(entry.path());
extractor.extract_symlink(&file_name, metadata, link.as_ref())
}
(true, EntryKind::Hardlink(link)) => {
callback(entry.path());
extractor.extract_hardlink(&file_name, link.as_os_str())
}
(true, EntryKind::Device(dev)) => {
if extractor.contains_flags(Flags::WITH_DEVICE_NODES) {
callback(entry.path());
extractor.extract_device(&file_name, metadata, dev)
} else {
Ok(())
}
}
(true, EntryKind::Fifo) => {
if extractor.contains_flags(Flags::WITH_FIFOS) {
callback(entry.path());
extractor.extract_special(&file_name, metadata, 0)
} else {
Ok(())
}
}
(true, EntryKind::Socket) => {
if extractor.contains_flags(Flags::WITH_SOCKETS) {
callback(entry.path());
extractor.extract_special(&file_name, metadata, 0)
} else {
Ok(())
}
}
(true, EntryKind::File { size, .. }) => extractor.extract_file(
&file_name,
metadata,
*size,
&mut decoder.contents().ok_or_else(|| {
format_err!("found regular file entry without contents in archive")
})?,
),
(false, _) => Ok(()), // skip this
}
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
}
if !extractor.dir_stack.is_empty() {
bail!("unexpected eof while decoding pxar archive");
}
Ok(())
}
/// Common state for file extraction.
pub(crate) struct Extractor {
feature_flags: Flags,
allow_existing_dirs: bool,
dir_stack: PxarDirStack,
/// For better error output we need to track the current path in the Extractor state.
current_path: Arc<Mutex<OsString>>,
/// Error callback. Includes `current_path` in the reformatted error, should return `Ok` to
/// continue extracting or the passed error as `Err` to bail out.
on_error: ErrorHandler,
}
impl Extractor {
/// Create a new extractor state for a target directory.
pub fn new(
root_dir: Dir,
metadata: Metadata,
allow_existing_dirs: bool,
feature_flags: Flags,
) -> Self {
Self {
dir_stack: PxarDirStack::new(root_dir, metadata),
allow_existing_dirs,
feature_flags,
current_path: Arc::new(Mutex::new(OsString::new())),
on_error: Box::new(Err),
}
}
/// We call this on errors. The error will be reformatted to include `current_path`. The
/// callback should decide whether this error was fatal (simply return it) to bail out early,
/// or log/remember/accumulate errors somewhere and return `Ok(())` in its place to continue
/// extracting.
pub fn on_error(&mut self, mut on_error: Box<dyn FnMut(Error) -> Result<(), Error> + Send>) {
let path = Arc::clone(&self.current_path);
self.on_error = Box::new(move |err: Error| -> Result<(), Error> {
on_error(format_err!("error at {:?}: {}", path.lock().unwrap(), err))
});
}
pub fn set_path(&mut self, path: OsString) {
*self.current_path.lock().unwrap() = path;
}
pub fn clone_path(&self) -> OsString {
self.current_path.lock().unwrap().clone()
}
/// When encountering a directory during extraction, this is used to keep track of it. If
/// `create` is true it is immediately created and its metadata will be updated once we leave
/// it. If `create` is false it will only be created if it is going to have any actual content.
pub fn enter_directory(
&mut self,
file_name: OsString,
metadata: Metadata,
create: bool,
) -> Result<(), Error> {
self.dir_stack.push(file_name, metadata)?;
if create {
self.dir_stack.create_last_dir(self.allow_existing_dirs)?;
}
Ok(())
}
/// When done with a directory we can apply its metadata if it has been created.
pub fn leave_directory(&mut self) -> Result<(), Error> {
let path_info = self.dir_stack.path().to_owned();
let dir = self
.dir_stack
.pop()
.map_err(|err| format_err!("unexpected end of directory entry: {}", err))?
.ok_or_else(|| format_err!("broken pxar archive (directory stack underrun)"))?;
if let Some(fd) = dir.try_as_borrowed_fd() {
metadata::apply(
self.feature_flags,
dir.metadata(),
fd.as_raw_fd(),
&path_info,
&mut self.on_error,
)
.map_err(|err| format_err!("failed to apply directory metadata: {}", err))?;
}
Ok(())
}
fn contains_flags(&self, flag: Flags) -> bool {
self.feature_flags.contains(flag)
}
fn parent_fd(&mut self) -> Result<RawFd, Error> {
self.dir_stack
.last_dir_fd(self.allow_existing_dirs)
.map(|d| d.as_raw_fd())
.map_err(|err| format_err!("failed to get parent directory file descriptor: {}", err))
}
pub fn extract_symlink(
&mut self,
file_name: &CStr,
metadata: &Metadata,
link: &OsStr,
) -> Result<(), Error> {
let parent = self.parent_fd()?;
nix::unistd::symlinkat(link, Some(parent), file_name)?;
metadata::apply_at(
self.feature_flags,
metadata,
parent,
file_name,
self.dir_stack.path(),
&mut self.on_error,
)
}
pub fn extract_hardlink(&mut self, file_name: &CStr, link: &OsStr) -> Result<(), Error> {
crate::pxar::tools::assert_relative_path(link)?;
let parent = self.parent_fd()?;
let root = self.dir_stack.root_dir_fd()?;
let target = CString::new(link.as_bytes())?;
nix::unistd::linkat(
Some(root.as_raw_fd()),
target.as_c_str(),
Some(parent),
file_name,
nix::unistd::LinkatFlags::NoSymlinkFollow,
)?;
Ok(())
}
pub fn extract_device(
&mut self,
file_name: &CStr,
metadata: &Metadata,
device: &Device,
) -> Result<(), Error> {
self.extract_special(file_name, metadata, device.to_dev_t())
}
pub fn extract_special(
&mut self,
file_name: &CStr,
metadata: &Metadata,
device: libc::dev_t,
) -> Result<(), Error> {
let mode = metadata.stat.mode;
let mode = u32::try_from(mode).map_err(|_| {
format_err!(
"device node's mode contains illegal bits: 0x{:x} (0o{:o})",
mode,
mode,
)
})?;
let parent = self.parent_fd()?;
unsafe { c_result!(libc::mknodat(parent, file_name.as_ptr(), mode, device)) }
.map_err(|err| format_err!("failed to create device node: {}", err))?;
metadata::apply_at(
self.feature_flags,
metadata,
parent,
file_name,
self.dir_stack.path(),
&mut self.on_error,
)
}
pub fn extract_file(
&mut self,
file_name: &CStr,
metadata: &Metadata,
size: u64,
contents: &mut dyn io::Read,
) -> Result<(), Error> {
let parent = self.parent_fd()?;
let mut file = unsafe {
std::fs::File::from_raw_fd(
nix::fcntl::openat(
parent,
file_name,
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
Mode::from_bits(0o600).unwrap(),
)
.map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?,
)
};
metadata::apply_initial_flags(
self.feature_flags,
metadata,
file.as_raw_fd(),
&mut self.on_error,
)
.map_err(|err| format_err!("failed to apply initial flags: {}", err))?;
let result = sparse_copy(&mut *contents, &mut file)
.map_err(|err| format_err!("failed to copy file contents: {}", err))?;
if size != result.written {
bail!(
"extracted {} bytes of a file of {} bytes",
result.written,
size
);
}
if result.seeked_last {
while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
Ok(_) => false,
Err(nix::Error::Sys(errno)) if errno == nix::errno::Errno::EINTR => true,
Err(err) => bail!("error setting file size: {}", err),
} {}
}
metadata::apply(
self.feature_flags,
metadata,
file.as_raw_fd(),
self.dir_stack.path(),
&mut self.on_error,
)
}
pub async fn async_extract_file<T: tokio::io::AsyncRead + Unpin>(
&mut self,
file_name: &CStr,
metadata: &Metadata,
size: u64,
contents: &mut T,
) -> Result<(), Error> {
let parent = self.parent_fd()?;
let mut file = tokio::fs::File::from_std(unsafe {
std::fs::File::from_raw_fd(
nix::fcntl::openat(
parent,
file_name,
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
Mode::from_bits(0o600).unwrap(),
)
.map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?,
)
});
metadata::apply_initial_flags(
self.feature_flags,
metadata,
file.as_raw_fd(),
&mut self.on_error,
)
.map_err(|err| format_err!("failed to apply initial flags: {}", err))?;
let result = sparse_copy_async(&mut *contents, &mut file)
.await
.map_err(|err| format_err!("failed to copy file contents: {}", err))?;
if size != result.written {
bail!(
"extracted {} bytes of a file of {} bytes",
result.written,
size
);
}
if result.seeked_last {
while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
Ok(_) => false,
Err(nix::Error::Sys(errno)) if errno == nix::errno::Errno::EINTR => true,
Err(err) => bail!("error setting file size: {}", err),
} {}
}
metadata::apply(
self.feature_flags,
metadata,
file.as_raw_fd(),
self.dir_stack.path(),
&mut self.on_error,
)
}
}
pub async fn create_zip<T, W, P>(
output: W,
decoder: Accessor<T>,
path: P,
verbose: bool,
) -> Result<(), Error>
where
T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
W: tokio::io::AsyncWrite + Unpin + Send + 'static,
P: AsRef<Path>,
{
let root = decoder.open_root().await?;
let file = root
.lookup(&path).await?
.ok_or(format_err!("error opening '{:?}'", path.as_ref()))?;
let mut prefix = PathBuf::new();
let mut components = file.entry().path().components();
components.next_back(); // discar last
for comp in components {
prefix.push(comp);
}
let mut zipencoder = ZipEncoder::new(output);
let mut decoder = decoder;
recurse_files_zip(&mut zipencoder, &mut decoder, &prefix, file, verbose)
.await
.map_err(|err| {
eprintln!("error during creating of zip: {}", err);
err
})?;
zipencoder
.finish()
.await
.map_err(|err| {
eprintln!("error during finishing of zip: {}", err);
err
})
}
fn recurse_files_zip<'a, T, W>(
zip: &'a mut ZipEncoder<W>,
decoder: &'a mut Accessor<T>,
prefix: &'a Path,
file: FileEntry<T>,
verbose: bool,
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
where
T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
W: tokio::io::AsyncWrite + Unpin + Send + 'static,
{
Box::pin(async move {
let metadata = file.entry().metadata();
let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
match file.kind() {
EntryKind::File { .. } => {
if verbose {
eprintln!("adding '{}' to zip", path.display());
}
let entry = ZipEntry::new(
path,
metadata.stat.mtime.secs,
metadata.stat.mode as u16,
true,
);
zip.add_entry(entry, Some(file.contents().await?))
.await
.map_err(|err| format_err!("could not send file entry: {}", err))?;
}
EntryKind::Hardlink(_) => {
let realfile = decoder.follow_hardlink(&file).await?;
if verbose {
eprintln!("adding '{}' to zip", path.display());
}
let entry = ZipEntry::new(
path,
metadata.stat.mtime.secs,
metadata.stat.mode as u16,
true,
);
zip.add_entry(entry, Some(realfile.contents().await?))
.await
.map_err(|err| format_err!("could not send file entry: {}", err))?;
}
EntryKind::Directory => {
let dir = file.enter_directory().await?;
let mut readdir = dir.read_dir();
if verbose {
eprintln!("adding '{}' to zip", path.display());
}
let entry = ZipEntry::new(
path,
metadata.stat.mtime.secs,
metadata.stat.mode as u16,
false,
);
zip.add_entry::<FileContents<T>>(entry, None).await?;
while let Some(entry) = readdir.next().await {
let entry = entry?.decode_entry().await?;
recurse_files_zip(zip, decoder, prefix, entry, verbose).await?;
}
}
_ => {} // ignore all else
};
Ok(())
})
}
fn get_extractor<DEST>(destination: DEST, metadata: Metadata) -> Result<Extractor, Error>
where
DEST: AsRef<Path>,
{
create_path(
&destination,
None,
Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
)
.map_err(|err| {
format_err!(
"error creating directory {:?}: {}",
destination.as_ref(),
err
)
})?;
let dir = Dir::open(
destination.as_ref(),
OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
Mode::empty(),
)
.map_err(|err| {
format_err!(
"unable to open target directory {:?}: {}",
destination.as_ref(),
err,
)
})?;
Ok(Extractor::new(dir, metadata, false, Flags::DEFAULT))
}
pub async fn extract_sub_dir<T, DEST, PATH>(
destination: DEST,
decoder: Accessor<T>,
path: PATH,
verbose: bool,
) -> Result<(), Error>
where
T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
DEST: AsRef<Path>,
PATH: AsRef<Path>,
{
let root = decoder.open_root().await?;
let mut extractor = get_extractor(
destination,
root.lookup_self().await?.entry().metadata().clone(),
)?;
let file = root
.lookup(&path)
.await?
.ok_or(format_err!("error opening '{:?}'", path.as_ref()))?;
recurse_files_extractor(&mut extractor, file, verbose).await
}
pub async fn extract_sub_dir_seq<S, DEST>(
destination: DEST,
mut decoder: Decoder<S>,
verbose: bool,
) -> Result<(), Error>
where
S: pxar::decoder::SeqRead + Unpin + Send + 'static,
DEST: AsRef<Path>,
{
decoder.enable_goodbye_entries(true);
let root = match decoder.next().await {
Some(Ok(root)) => root,
Some(Err(err)) => bail!("error getting root entry from pxar: {}", err),
None => bail!("cannot extract empty archive"),
};
let mut extractor = get_extractor(destination, root.metadata().clone())?;
if let Err(err) = seq_files_extractor(&mut extractor, decoder, verbose).await {
eprintln!("error extracting pxar archive: {}", err);
}
Ok(())
}
fn extract_special(
extractor: &mut Extractor,
entry: &Entry,
file_name: &CStr,
) -> Result<(), Error> {
let metadata = entry.metadata();
match entry.kind() {
EntryKind::Symlink(link) => {
extractor.extract_symlink(file_name, metadata, link.as_ref())?;
}
EntryKind::Hardlink(link) => {
extractor.extract_hardlink(file_name, link.as_os_str())?;
}
EntryKind::Device(dev) => {
if extractor.contains_flags(Flags::WITH_DEVICE_NODES) {
extractor.extract_device(file_name, metadata, dev)?;
}
}
EntryKind::Fifo => {
if extractor.contains_flags(Flags::WITH_FIFOS) {
extractor.extract_special(file_name, metadata, 0)?;
}
}
EntryKind::Socket => {
if extractor.contains_flags(Flags::WITH_SOCKETS) {
extractor.extract_special(file_name, metadata, 0)?;
}
}
_ => bail!("extract_special used with unsupported entry kind"),
}
Ok(())
}
fn get_filename(entry: &Entry) -> Result<(OsString, CString), Error> {
let file_name_os = entry.file_name().to_owned();
// safety check: a file entry in an archive must never contain slashes:
if file_name_os.as_bytes().contains(&b'/') {
bail!("archive file entry contains slashes, which is invalid and a security concern");
}
let file_name = CString::new(file_name_os.as_bytes())
.map_err(|_| format_err!("encountered file name with null-bytes"))?;
Ok((file_name_os, file_name))
}
async fn recurse_files_extractor<'a, T>(
extractor: &'a mut Extractor,
file: FileEntry<T>,
verbose: bool,
) -> Result<(), Error>
where
T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
{
let entry = file.entry();
let metadata = entry.metadata();
let (file_name_os, file_name) = get_filename(entry)?;
if verbose {
eprintln!("extracting: {}", file.path().display());
}
match file.kind() {
EntryKind::Directory => {
extractor
.enter_directory(file_name_os.to_owned(), metadata.clone(), true)
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
let dir = file.enter_directory().await?;
let mut seq_decoder = dir.decode_full().await?;
seq_decoder.enable_goodbye_entries(true);
seq_files_extractor(extractor, seq_decoder, verbose).await?;
extractor.leave_directory()?;
}
EntryKind::File { size, .. } => {
extractor
.async_extract_file(
&file_name,
metadata,
*size,
&mut file.contents().await.map_err(|_| {
format_err!("found regular file entry without contents in archive")
})?,
)
.await?
}
EntryKind::GoodbyeTable => {} // ignore
_ => extract_special(extractor, entry, &file_name)?,
}
Ok(())
}
async fn seq_files_extractor<'a, T>(
extractor: &'a mut Extractor,
mut decoder: pxar::decoder::aio::Decoder<T>,
verbose: bool,
) -> Result<(), Error>
where
T: pxar::decoder::SeqRead,
{
let mut dir_level = 0;
loop {
let entry = match decoder.next().await {
Some(entry) => entry?,
None => return Ok(()),
};
let metadata = entry.metadata();
let (file_name_os, file_name) = get_filename(&entry)?;
if verbose && !matches!(entry.kind(), EntryKind::GoodbyeTable) {
eprintln!("extracting: {}", entry.path().display());
}
if let Err(err) = async {
match entry.kind() {
EntryKind::Directory => {
dir_level += 1;
extractor
.enter_directory(file_name_os.to_owned(), metadata.clone(), true)
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
}
EntryKind::File { size, .. } => {
extractor
.async_extract_file(
&file_name,
metadata,
*size,
&mut decoder.contents().ok_or_else(|| {
format_err!("found regular file entry without contents in archive")
})?,
)
.await?
}
EntryKind::GoodbyeTable => {
dir_level -= 1;
extractor.leave_directory()?;
}
_ => extract_special(extractor, &entry, &file_name)?,
}
Ok(()) as Result<(), Error>
}
.await
{
let display = entry.path().display().to_string();
eprintln!(
"error extracting {}: {}",
if matches!(entry.kind(), EntryKind::GoodbyeTable) {
"<directory>"
} else {
&display
},
err
);
}
if dir_level < 0 {
// we've encountered one Goodbye more then Directory, meaning we've left the dir we
// started in - exit early, otherwise the extractor might panic
return Ok(());
}
}
}

View File

@ -1,378 +0,0 @@
//! Feature flags for *pxar* allow to control what is stored/restored in/from the
//! archive.
//! Flags for known supported features for a given filesystem can be derived
//! from the superblocks magic number.
use libc::c_long;
use bitflags::bitflags;
bitflags! {
pub struct Flags: u64 {
/// FAT-style 2s time granularity
const WITH_2SEC_TIME = 0x40;
/// Preserve read only flag of files
const WITH_READ_ONLY = 0x80;
/// Preserve unix permissions
const WITH_PERMISSIONS = 0x100;
/// Include symbolik links
const WITH_SYMLINKS = 0x200;
/// Include device nodes
const WITH_DEVICE_NODES = 0x400;
/// Include FIFOs
const WITH_FIFOS = 0x800;
/// Include Sockets
const WITH_SOCKETS = 0x1000;
/// Preserve DOS file flag `HIDDEN`
const WITH_FLAG_HIDDEN = 0x2000;
/// Preserve DOS file flag `SYSTEM`
const WITH_FLAG_SYSTEM = 0x4000;
/// Preserve DOS file flag `ARCHIVE`
const WITH_FLAG_ARCHIVE = 0x8000;
// chattr() flags
/// Linux file attribute `APPEND`
const WITH_FLAG_APPEND = 0x10000;
/// Linux file attribute `NOATIME`
const WITH_FLAG_NOATIME = 0x20000;
/// Linux file attribute `COMPR`
const WITH_FLAG_COMPR = 0x40000;
/// Linux file attribute `NOCOW`
const WITH_FLAG_NOCOW = 0x80000;
/// Linux file attribute `NODUMP`
const WITH_FLAG_NODUMP = 0x0010_0000;
/// Linux file attribute `DIRSYNC`
const WITH_FLAG_DIRSYNC = 0x0020_0000;
/// Linux file attribute `IMMUTABLE`
const WITH_FLAG_IMMUTABLE = 0x0040_0000;
/// Linux file attribute `SYNC`
const WITH_FLAG_SYNC = 0x0080_0000;
/// Linux file attribute `NOCOMP`
const WITH_FLAG_NOCOMP = 0x0100_0000;
/// Linux file attribute `PROJINHERIT`
const WITH_FLAG_PROJINHERIT = 0x0200_0000;
/// Preserve BTRFS subvolume flag
const WITH_SUBVOLUME = 0x0400_0000;
/// Preserve BTRFS read-only subvolume flag
const WITH_SUBVOLUME_RO = 0x0800_0000;
/// Preserve Extended Attribute metadata
const WITH_XATTRS = 0x1000_0000;
/// Preserve Access Control List metadata
const WITH_ACL = 0x2000_0000;
/// Preserve SELinux security context
const WITH_SELINUX = 0x4000_0000;
/// Preserve "security.capability" xattr
const WITH_FCAPS = 0x8000_0000;
/// Preserve XFS/ext4/ZFS project quota ID
const WITH_QUOTA_PROJID = 0x0001_0000_0000;
/// Support ".pxarexclude" files
const EXCLUDE_FILE = 0x1000_0000_0000_0000;
/// Exclude submounts
const EXCLUDE_SUBMOUNTS = 0x4000_0000_0000_0000;
/// Exclude entries with chattr flag NODUMP
const EXCLUDE_NODUMP = 0x8000_0000_0000_0000;
// Definitions of typical feature flags for the *pxar* encoder/decoder.
// By this expensive syscalls for unsupported features are avoided.
/// All chattr file attributes
const WITH_CHATTR =
Flags::WITH_FLAG_APPEND.bits() |
Flags::WITH_FLAG_NOATIME.bits() |
Flags::WITH_FLAG_COMPR.bits() |
Flags::WITH_FLAG_NOCOW.bits() |
Flags::WITH_FLAG_NODUMP.bits() |
Flags::WITH_FLAG_DIRSYNC.bits() |
Flags::WITH_FLAG_IMMUTABLE.bits() |
Flags::WITH_FLAG_SYNC.bits() |
Flags::WITH_FLAG_NOCOMP.bits() |
Flags::WITH_FLAG_PROJINHERIT.bits();
/// All FAT file attributes
const WITH_FAT_ATTRS =
Flags::WITH_FLAG_HIDDEN.bits() |
Flags::WITH_FLAG_SYSTEM.bits() |
Flags::WITH_FLAG_ARCHIVE.bits();
/// All bits that may also be exposed via fuse
const WITH_FUSE =
Flags::WITH_2SEC_TIME.bits() |
Flags::WITH_READ_ONLY.bits() |
Flags::WITH_PERMISSIONS.bits() |
Flags::WITH_SYMLINKS.bits() |
Flags::WITH_DEVICE_NODES.bits() |
Flags::WITH_FIFOS.bits() |
Flags::WITH_SOCKETS.bits() |
Flags::WITH_FAT_ATTRS.bits() |
Flags::WITH_CHATTR.bits() |
Flags::WITH_XATTRS.bits();
/// Default feature flags for encoder/decoder
const DEFAULT =
Flags::WITH_SYMLINKS.bits() |
Flags::WITH_DEVICE_NODES.bits() |
Flags::WITH_FIFOS.bits() |
Flags::WITH_SOCKETS.bits() |
Flags::WITH_FLAG_HIDDEN.bits() |
Flags::WITH_FLAG_SYSTEM.bits() |
Flags::WITH_FLAG_ARCHIVE.bits() |
Flags::WITH_FLAG_APPEND.bits() |
Flags::WITH_FLAG_NOATIME.bits() |
Flags::WITH_FLAG_COMPR.bits() |
Flags::WITH_FLAG_NOCOW.bits() |
//WITH_FLAG_NODUMP.bits() |
Flags::WITH_FLAG_DIRSYNC.bits() |
Flags::WITH_FLAG_IMMUTABLE.bits() |
Flags::WITH_FLAG_SYNC.bits() |
Flags::WITH_FLAG_NOCOMP.bits() |
Flags::WITH_FLAG_PROJINHERIT.bits() |
Flags::WITH_SUBVOLUME.bits() |
Flags::WITH_SUBVOLUME_RO.bits() |
Flags::WITH_XATTRS.bits() |
Flags::WITH_ACL.bits() |
Flags::WITH_SELINUX.bits() |
Flags::WITH_FCAPS.bits() |
Flags::WITH_QUOTA_PROJID.bits() |
Flags::EXCLUDE_NODUMP.bits() |
Flags::EXCLUDE_FILE.bits();
}
}
impl Default for Flags {
fn default() -> Flags {
Flags::DEFAULT
}
}
// form /usr/include/linux/fs.h
const FS_APPEND_FL: c_long = 0x0000_0020;
const FS_NOATIME_FL: c_long = 0x0000_0080;
const FS_COMPR_FL: c_long = 0x0000_0004;
const FS_NOCOW_FL: c_long = 0x0080_0000;
const FS_NODUMP_FL: c_long = 0x0000_0040;
const FS_DIRSYNC_FL: c_long = 0x0001_0000;
const FS_IMMUTABLE_FL: c_long = 0x0000_0010;
const FS_SYNC_FL: c_long = 0x0000_0008;
const FS_NOCOMP_FL: c_long = 0x0000_0400;
const FS_PROJINHERIT_FL: c_long = 0x2000_0000;
pub(crate) const INITIAL_FS_FLAGS: c_long =
FS_NOATIME_FL
| FS_COMPR_FL
| FS_NOCOW_FL
| FS_NOCOMP_FL
| FS_PROJINHERIT_FL;
#[rustfmt::skip]
const CHATTR_MAP: [(Flags, c_long); 10] = [
( Flags::WITH_FLAG_APPEND, FS_APPEND_FL ),
( Flags::WITH_FLAG_NOATIME, FS_NOATIME_FL ),
( Flags::WITH_FLAG_COMPR, FS_COMPR_FL ),
( Flags::WITH_FLAG_NOCOW, FS_NOCOW_FL ),
( Flags::WITH_FLAG_NODUMP, FS_NODUMP_FL ),
( Flags::WITH_FLAG_DIRSYNC, FS_DIRSYNC_FL ),
( Flags::WITH_FLAG_IMMUTABLE, FS_IMMUTABLE_FL ),
( Flags::WITH_FLAG_SYNC, FS_SYNC_FL ),
( Flags::WITH_FLAG_NOCOMP, FS_NOCOMP_FL ),
( Flags::WITH_FLAG_PROJINHERIT, FS_PROJINHERIT_FL ),
];
// from /usr/include/linux/msdos_fs.h
const ATTR_HIDDEN: u32 = 2;
const ATTR_SYS: u32 = 4;
const ATTR_ARCH: u32 = 32;
#[rustfmt::skip]
const FAT_ATTR_MAP: [(Flags, u32); 3] = [
( Flags::WITH_FLAG_HIDDEN, ATTR_HIDDEN ),
( Flags::WITH_FLAG_SYSTEM, ATTR_SYS ),
( Flags::WITH_FLAG_ARCHIVE, ATTR_ARCH ),
];
impl Flags {
/// Get a set of feature flags from file attributes.
pub fn from_chattr(attr: c_long) -> Flags {
let mut flags = Flags::empty();
for (fe_flag, fs_flag) in &CHATTR_MAP {
if (attr & fs_flag) != 0 {
flags |= *fe_flag;
}
}
flags
}
/// Get the chattr bit representation of these feature flags.
pub fn to_chattr(self) -> c_long {
let mut flags: c_long = 0;
for (fe_flag, fs_flag) in &CHATTR_MAP {
if self.contains(*fe_flag) {
flags |= *fs_flag;
}
}
flags
}
pub fn to_initial_chattr(self) -> c_long {
self.to_chattr() & INITIAL_FS_FLAGS
}
/// Get a set of feature flags from FAT attributes.
pub fn from_fat_attr(attr: u32) -> Flags {
let mut flags = Flags::empty();
for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
if (attr & fs_flag) != 0 {
flags |= *fe_flag;
}
}
flags
}
/// Get the fat attribute bit representation of these feature flags.
pub fn to_fat_attr(self) -> u32 {
let mut flags = 0u32;
for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
if self.contains(*fe_flag) {
flags |= *fs_flag;
}
}
flags
}
/// Return the supported *pxar* feature flags based on the magic number of the filesystem.
pub fn from_magic(magic: i64) -> Flags {
use proxmox::sys::linux::magic::*;
match magic {
MSDOS_SUPER_MAGIC => {
Flags::WITH_2SEC_TIME |
Flags::WITH_READ_ONLY |
Flags::WITH_FAT_ATTRS
},
EXT4_SUPER_MAGIC => {
Flags::WITH_2SEC_TIME |
Flags::WITH_READ_ONLY |
Flags::WITH_PERMISSIONS |
Flags::WITH_SYMLINKS |
Flags::WITH_DEVICE_NODES |
Flags::WITH_FIFOS |
Flags::WITH_SOCKETS |
Flags::WITH_FLAG_APPEND |
Flags::WITH_FLAG_NOATIME |
Flags::WITH_FLAG_NODUMP |
Flags::WITH_FLAG_DIRSYNC |
Flags::WITH_FLAG_IMMUTABLE |
Flags::WITH_FLAG_SYNC |
Flags::WITH_XATTRS |
Flags::WITH_ACL |
Flags::WITH_SELINUX |
Flags::WITH_FCAPS |
Flags::WITH_QUOTA_PROJID
},
XFS_SUPER_MAGIC => {
Flags::WITH_2SEC_TIME |
Flags::WITH_READ_ONLY |
Flags::WITH_PERMISSIONS |
Flags::WITH_SYMLINKS |
Flags::WITH_DEVICE_NODES |
Flags::WITH_FIFOS |
Flags::WITH_SOCKETS |
Flags::WITH_FLAG_APPEND |
Flags::WITH_FLAG_NOATIME |
Flags::WITH_FLAG_NODUMP |
Flags::WITH_FLAG_IMMUTABLE |
Flags::WITH_FLAG_SYNC |
Flags::WITH_XATTRS |
Flags::WITH_ACL |
Flags::WITH_SELINUX |
Flags::WITH_FCAPS |
Flags::WITH_QUOTA_PROJID
},
ZFS_SUPER_MAGIC => {
Flags::WITH_2SEC_TIME |
Flags::WITH_READ_ONLY |
Flags::WITH_PERMISSIONS |
Flags::WITH_SYMLINKS |
Flags::WITH_DEVICE_NODES |
Flags::WITH_FIFOS |
Flags::WITH_SOCKETS |
Flags::WITH_FLAG_APPEND |
Flags::WITH_FLAG_NOATIME |
Flags::WITH_FLAG_NODUMP |
Flags::WITH_FLAG_DIRSYNC |
Flags::WITH_FLAG_IMMUTABLE |
Flags::WITH_FLAG_SYNC |
Flags::WITH_XATTRS |
Flags::WITH_ACL |
Flags::WITH_SELINUX |
Flags::WITH_FCAPS |
Flags::WITH_QUOTA_PROJID
},
BTRFS_SUPER_MAGIC => {
Flags::WITH_2SEC_TIME |
Flags::WITH_READ_ONLY |
Flags::WITH_PERMISSIONS |
Flags::WITH_SYMLINKS |
Flags::WITH_DEVICE_NODES |
Flags::WITH_FIFOS |
Flags::WITH_SOCKETS |
Flags::WITH_FLAG_APPEND |
Flags::WITH_FLAG_NOATIME |
Flags::WITH_FLAG_COMPR |
Flags::WITH_FLAG_NOCOW |
Flags::WITH_FLAG_NODUMP |
Flags::WITH_FLAG_DIRSYNC |
Flags::WITH_FLAG_IMMUTABLE |
Flags::WITH_FLAG_SYNC |
Flags::WITH_FLAG_NOCOMP |
Flags::WITH_XATTRS |
Flags::WITH_ACL |
Flags::WITH_SELINUX |
Flags::WITH_SUBVOLUME |
Flags::WITH_SUBVOLUME_RO |
Flags::WITH_FCAPS
},
TMPFS_MAGIC => {
Flags::WITH_2SEC_TIME |
Flags::WITH_READ_ONLY |
Flags::WITH_PERMISSIONS |
Flags::WITH_SYMLINKS |
Flags::WITH_DEVICE_NODES |
Flags::WITH_FIFOS |
Flags::WITH_SOCKETS |
Flags::WITH_ACL |
Flags::WITH_SELINUX
},
// FUSE mounts are special as the supported feature set
// is not clear a priori.
FUSE_SUPER_MAGIC => {
Flags::WITH_FUSE
},
_ => {
Flags::WITH_2SEC_TIME |
Flags::WITH_READ_ONLY |
Flags::WITH_PERMISSIONS |
Flags::WITH_SYMLINKS |
Flags::WITH_DEVICE_NODES |
Flags::WITH_FIFOS |
Flags::WITH_SOCKETS |
Flags::WITH_XATTRS |
Flags::WITH_ACL |
Flags::WITH_FCAPS
},
}
}
}

View File

@ -1,690 +0,0 @@
//! Asynchronous fuse implementation.
use std::collections::BTreeMap;
use std::convert::TryFrom;
use std::ffi::{OsStr, OsString};
use std::future::Future;
use std::io;
use std::mem;
use std::ops::Range;
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
use std::pin::Pin;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, RwLock};
use std::task::{Context, Poll};
use anyhow::{format_err, Error};
use futures::channel::mpsc::UnboundedSender;
use futures::select;
use futures::sink::SinkExt;
use futures::stream::{StreamExt, TryStreamExt};
use proxmox::tools::vec;
use pxar::accessor::{self, EntryRangeInfo, ReadAt};
use proxmox_fuse::requests::{self, FuseRequest};
use proxmox_fuse::{EntryParam, Fuse, ReplyBufState, Request, ROOT_ID};
use crate::tools::xattr;
/// We mark inodes for regular files this way so we know how to access them.
const NON_DIRECTORY_INODE: u64 = 1u64 << 63;
#[inline]
fn is_dir_inode(inode: u64) -> bool {
0 == (inode & NON_DIRECTORY_INODE)
}
/// Our reader type instance used for accessors.
pub type Reader = Arc<dyn ReadAt + Send + Sync + 'static>;
/// Our Accessor type instance.
pub type Accessor = accessor::aio::Accessor<Reader>;
/// Our Directory type instance.
pub type Directory = accessor::aio::Directory<Reader>;
/// Our FileEntry type instance.
pub type FileEntry = accessor::aio::FileEntry<Reader>;
/// Our FileContents type instance.
pub type FileContents = accessor::aio::FileContents<Reader>;
pub struct Session {
fut: Pin<Box<dyn Future<Output = Result<(), Error>> + Send + Sync + 'static>>,
}
impl Session {
/// Create a fuse session for an archive.
pub async fn mount_path(
archive_path: &Path,
options: &OsStr,
verbose: bool,
mountpoint: &Path,
) -> Result<Self, Error> {
// TODO: Add a buffered/caching ReadAt layer?
let file = std::fs::File::open(archive_path)?;
let file_size = file.metadata()?.len();
let reader: Reader = Arc::new(accessor::sync::FileReader::new(file));
let accessor = Accessor::new(reader, file_size).await?;
Self::mount(accessor, options, verbose, mountpoint)
}
/// Create a new fuse session for the given pxar `Accessor`.
pub fn mount(
accessor: Accessor,
options: &OsStr,
verbose: bool,
path: &Path,
) -> Result<Self, Error> {
let fuse = Fuse::builder("pxar-mount")?
.debug()
.options_os(options)?
.enable_readdirplus()
.enable_read()
.enable_readlink()
.enable_read_xattr()
.build()?
.mount(path)?;
let session = SessionImpl::new(accessor, verbose);
Ok(Self {
fut: Box::pin(session.main(fuse)),
})
}
}
impl Future for Session {
type Output = Result<(), Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
Pin::new(&mut self.fut).poll(cx)
}
}
/// We use this to return an errno value back to the kernel.
macro_rules! io_return {
($errno:expr) => {
return Err(::std::io::Error::from_raw_os_error($errno).into());
};
}
/// Format an "other" error, see `io_bail` below for details.
macro_rules! io_format_err {
($($fmt:tt)*) => {
::std::io::Error::new(::std::io::ErrorKind::Other, format!($($fmt)*))
}
}
/// We use this to bail out of a functionin an unexpected error case. This will cause the fuse
/// request to be answered with a generic `EIO` error code. The error message contained in here
/// will be printed to stdout if the verbose flag is used, otherwise silently dropped.
macro_rules! io_bail {
($($fmt:tt)*) => { return Err(io_format_err!($($fmt)*).into()); }
}
/// This is what we need to cache as a "lookup" entry. The kernel assumes that these are easily
/// accessed.
struct Lookup {
refs: AtomicUsize,
inode: u64,
parent: u64,
entry_range_info: EntryRangeInfo,
content_range: Option<Range<u64>>,
}
impl Lookup {
fn new(
inode: u64,
parent: u64,
entry_range_info: EntryRangeInfo,
content_range: Option<Range<u64>>,
) -> Box<Lookup> {
Box::new(Self {
refs: AtomicUsize::new(1),
inode,
parent,
entry_range_info,
content_range,
})
}
/// Decrease the reference count by `count`. Note that this must not include the reference held
/// by `self` itself, so this must not decrease the count below 2.
fn forget(&self, count: usize) -> Result<(), Error> {
loop {
let old = self.refs.load(Ordering::Acquire);
if count >= old {
io_bail!("reference count underflow");
}
let new = old - count;
match self
.refs
.compare_exchange(old, new, Ordering::SeqCst, Ordering::SeqCst)
{
Ok(_) => break Ok(()),
Err(_) => continue,
}
}
}
fn get_ref<'a>(&self, session: &'a SessionImpl) -> LookupRef<'a> {
if self.refs.fetch_add(1, Ordering::AcqRel) == 0 {
panic!("atomic refcount increased from 0 to 1");
}
LookupRef {
session,
lookup: self as *const Lookup,
}
}
}
struct LookupRef<'a> {
session: &'a SessionImpl,
lookup: *const Lookup,
}
unsafe impl<'a> Send for LookupRef<'a> {}
unsafe impl<'a> Sync for LookupRef<'a> {}
impl<'a> Clone for LookupRef<'a> {
fn clone(&self) -> Self {
self.get_ref(self.session)
}
}
impl<'a> std::ops::Deref for LookupRef<'a> {
type Target = Lookup;
fn deref(&self) -> &Self::Target {
unsafe { &*self.lookup }
}
}
impl<'a> Drop for LookupRef<'a> {
fn drop(&mut self) {
if self.lookup.is_null() {
return;
}
if self.refs.fetch_sub(1, Ordering::AcqRel) == 1 {
let inode = self.inode;
drop(self.session.lookups.write().unwrap().remove(&inode));
}
}
}
impl<'a> LookupRef<'a> {
fn leak(mut self) -> &'a Lookup {
unsafe { &*mem::replace(&mut self.lookup, std::ptr::null()) }
}
}
struct SessionImpl {
accessor: Accessor,
verbose: bool,
lookups: RwLock<BTreeMap<u64, Box<Lookup>>>,
}
impl SessionImpl {
fn new(accessor: Accessor, verbose: bool) -> Self {
let root = Lookup::new(
ROOT_ID,
ROOT_ID,
EntryRangeInfo::toplevel(0..accessor.size()),
None,
);
let mut tree = BTreeMap::new();
tree.insert(ROOT_ID, root);
Self {
accessor,
verbose,
lookups: RwLock::new(tree),
}
}
/// Here's how we deal with errors:
///
/// Any error will be printed if the verbose flag was set, otherwise the message will be
/// silently dropped.
///
/// Opaque errors will cause the fuse main loop to bail out with that error.
///
/// `io::Error`s will cause the fuse request to responded to with the given `io::Error`. An
/// `io::ErrorKind::Other` translates to a generic `EIO`.
async fn handle_err(
&self,
request: impl FuseRequest,
err: Error,
mut sender: UnboundedSender<Error>,
) {
let final_result = match err.downcast::<io::Error>() {
Ok(err) => {
if err.kind() == io::ErrorKind::Other && self.verbose {
eprintln!("an IO error occurred: {}", err);
}
// fail the request
request.io_fail(err).map_err(Error::from)
}
Err(err) => {
// `bail` (non-`io::Error`) is used for fatal errors which should actually cancel:
if self.verbose {
eprintln!("internal error: {}, bailing out", err);
}
Err(err)
}
};
if let Err(err) = final_result {
// either we failed to send the error code to fuse, or the above was not an
// `io::Error`, so in this case notify the main loop:
sender
.send(err)
.await
.expect("failed to propagate error to main loop");
}
}
async fn main(self, fuse: Fuse) -> Result<(), Error> {
Arc::new(self).main_do(fuse).await
}
async fn main_do(self: Arc<Self>, fuse: Fuse) -> Result<(), Error> {
let (err_send, mut err_recv) = futures::channel::mpsc::unbounded::<Error>();
let mut fuse = fuse.fuse(); // make this a futures::stream::FusedStream!
loop {
select! {
request = fuse.try_next() => match request? {
Some(request) => {
tokio::spawn(Arc::clone(&self).handle_request(request, err_send.clone()));
}
None => break,
},
err = err_recv.next() => match err {
Some(err) => if self.verbose {
eprintln!("cancelling fuse main loop due to error: {}", err);
return Err(err);
},
None => panic!("error channel was closed unexpectedly"),
},
}
}
Ok(())
}
async fn handle_request(
self: Arc<Self>,
request: Request,
mut err_sender: UnboundedSender<Error>,
) {
let result: Result<(), Error> = match request {
Request::Lookup(request) => {
match self.lookup(request.parent, &request.file_name).await {
Ok((entry, lookup)) => match request.reply(&entry) {
Ok(()) => {
lookup.leak();
Ok(())
}
Err(err) => Err(Error::from(err)),
},
Err(err) => return self.handle_err(request, err, err_sender).await,
}
}
Request::Forget(request) => match self.forget(request.inode, request.count as usize) {
Ok(()) => {
request.reply();
Ok(())
}
Err(err) => return self.handle_err(request, err, err_sender).await,
},
Request::Getattr(request) => match self.getattr(request.inode).await {
Ok(stat) => request.reply(&stat, std::f64::MAX).map_err(Error::from),
Err(err) => return self.handle_err(request, err, err_sender).await,
},
Request::ReaddirPlus(mut request) => match self.readdirplus(&mut request).await {
Ok(lookups) => match request.reply() {
Ok(()) => {
for i in lookups {
i.leak();
}
Ok(())
}
Err(err) => Err(Error::from(err)),
},
Err(err) => return self.handle_err(request, err, err_sender).await,
},
Request::Read(request) => {
match self.read(request.inode, request.size, request.offset).await {
Ok(data) => request.reply(&data).map_err(Error::from),
Err(err) => return self.handle_err(request, err, err_sender).await,
}
}
Request::Readlink(request) => match self.readlink(request.inode).await {
Ok(data) => request.reply(&data).map_err(Error::from),
Err(err) => return self.handle_err(request, err, err_sender).await,
},
Request::ListXAttrSize(request) => match self.listxattrs(request.inode).await {
Ok(data) => request
.reply(
data.into_iter()
.fold(0, |sum, i| sum + i.name().to_bytes_with_nul().len()),
)
.map_err(Error::from),
Err(err) => return self.handle_err(request, err, err_sender).await,
},
Request::ListXAttr(mut request) => match self.listxattrs_into(&mut request).await {
Ok(ReplyBufState::Ok) => request.reply().map_err(Error::from),
Ok(ReplyBufState::Full) => request.fail_full().map_err(Error::from),
Err(err) => return self.handle_err(request, err, err_sender).await,
},
Request::GetXAttrSize(request) => {
match self.getxattr(request.inode, &request.attr_name).await {
Ok(xattr) => request.reply(xattr.value().len()).map_err(Error::from),
Err(err) => return self.handle_err(request, err, err_sender).await,
}
}
Request::GetXAttr(request) => {
match self.getxattr(request.inode, &request.attr_name).await {
Ok(xattr) => request.reply(xattr.value()).map_err(Error::from),
Err(err) => return self.handle_err(request, err, err_sender).await,
}
}
other => {
if self.verbose {
eprintln!("Received unexpected fuse request");
}
other.fail(libc::ENOSYS).map_err(Error::from)
}
};
if let Err(err) = result {
err_sender
.send(err)
.await
.expect("failed to propagate error to main loop");
}
}
fn get_lookup(&self, inode: u64) -> Result<LookupRef, Error> {
let lookups = self.lookups.read().unwrap();
if let Some(lookup) = lookups.get(&inode) {
return Ok(lookup.get_ref(self));
}
io_return!(libc::ENOENT);
}
async fn open_dir(&self, inode: u64) -> Result<Directory, Error> {
if inode == ROOT_ID {
Ok(self.accessor.open_root().await?)
} else if !is_dir_inode(inode) {
io_return!(libc::ENOTDIR);
} else {
Ok(unsafe { self.accessor.open_dir_at_end(inode).await? })
}
}
async fn open_entry(&self, lookup: &LookupRef<'_>) -> io::Result<FileEntry> {
unsafe {
self.accessor
.open_file_at_range(&lookup.entry_range_info)
.await
}
}
fn open_content(&self, lookup: &LookupRef) -> Result<FileContents, Error> {
if is_dir_inode(lookup.inode) {
io_return!(libc::EISDIR);
}
match lookup.content_range.clone() {
Some(range) => Ok(unsafe { self.accessor.open_contents_at_range(range) }),
None => io_return!(libc::EBADF),
}
}
fn make_lookup(&self, parent: u64, inode: u64, entry: &FileEntry) -> Result<LookupRef, Error> {
let lookups = self.lookups.read().unwrap();
if let Some(lookup) = lookups.get(&inode) {
return Ok(lookup.get_ref(self));
}
drop(lookups);
let entry = Lookup::new(
inode,
parent,
entry.entry_range_info().clone(),
entry.content_range()?,
);
let reference = entry.get_ref(self);
entry.refs.store(1, Ordering::Release);
let mut lookups = self.lookups.write().unwrap();
if let Some(lookup) = lookups.get(&inode) {
return Ok(lookup.get_ref(self));
}
lookups.insert(inode, entry);
drop(lookups);
Ok(reference)
}
fn forget(&self, inode: u64, count: usize) -> Result<(), Error> {
let node = self.get_lookup(inode)?;
node.forget(count)?;
Ok(())
}
async fn lookup(
&'_ self,
parent: u64,
file_name: &OsStr,
) -> Result<(EntryParam, LookupRef<'_>), Error> {
let dir = self.open_dir(parent).await?;
let entry = match { dir }.lookup(file_name).await? {
Some(entry) => entry,
None => io_return!(libc::ENOENT),
};
let entry = if let pxar::EntryKind::Hardlink(_) = entry.kind() {
// we don't know the file's end-offset, so we'll just allow the decoder to decode the
// entire rest of the archive until we figure out something better...
let entry = self.accessor.follow_hardlink(&entry).await?;
if let pxar::EntryKind::Hardlink(_) = entry.kind() {
// hardlinks must not point to other hardlinks...
io_return!(libc::ELOOP);
}
entry
} else {
entry
};
let response = to_entry(&entry)?;
let inode = response.inode;
Ok((response, self.make_lookup(parent, inode, &entry)?))
}
async fn getattr(&self, inode: u64) -> Result<libc::stat, Error> {
let entry = unsafe {
self.accessor.open_file_at_range(&self.get_lookup(inode)?.entry_range_info).await?
};
to_stat(inode, &entry)
}
async fn readdirplus(
&'_ self,
request: &mut requests::ReaddirPlus,
) -> Result<Vec<LookupRef<'_>>, Error> {
let mut lookups = Vec::new();
let offset = usize::try_from(request.offset)
.map_err(|_| io_format_err!("directory offset out of range"))?;
let dir = self.open_dir(request.inode).await?;
let dir_lookup = self.get_lookup(request.inode)?;
let entry_count = dir.read_dir().count() as isize;
let mut next = offset as isize;
let mut iter = dir.read_dir().skip(offset);
while let Some(file) = iter.next().await {
next += 1;
let file = file?.decode_entry().await?;
let stat = to_stat(to_inode(&file), &file)?;
let name = file.file_name();
match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
ReplyBufState::Ok => (),
ReplyBufState::Full => return Ok(lookups),
}
lookups.push(self.make_lookup(request.inode, stat.st_ino, &file)?);
}
if next == entry_count {
next += 1;
let file = dir.lookup_self().await?;
let stat = to_stat(to_inode(&file), &file)?;
let name = OsStr::new(".");
match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
ReplyBufState::Ok => (),
ReplyBufState::Full => return Ok(lookups),
}
lookups.push(LookupRef::clone(&dir_lookup));
}
if next == entry_count + 1 {
next += 1;
let lookup = self.get_lookup(dir_lookup.parent)?;
let parent_dir = self.open_dir(lookup.inode).await?;
let file = parent_dir.lookup_self().await?;
let stat = to_stat(to_inode(&file), &file)?;
let name = OsStr::new("..");
match request.add_entry(name, &stat, next, 1, std::f64::MAX, std::f64::MAX)? {
ReplyBufState::Ok => (),
ReplyBufState::Full => return Ok(lookups),
}
lookups.push(lookup);
}
Ok(lookups)
}
async fn read(&self, inode: u64, len: usize, offset: u64) -> Result<Vec<u8>, Error> {
let file = self.get_lookup(inode)?;
let content = self.open_content(&file)?;
let mut buf = vec::undefined(len);
let got = content.read_at(&mut buf, offset).await?;
buf.truncate(got);
Ok(buf)
}
async fn readlink(&self, inode: u64) -> Result<OsString, Error> {
let lookup = self.get_lookup(inode)?;
let file = self.open_entry(&lookup).await?;
match file.get_symlink() {
None => io_return!(libc::EINVAL),
Some(link) => Ok(link.to_owned()),
}
}
async fn listxattrs(&self, inode: u64) -> Result<Vec<pxar::format::XAttr>, Error> {
let lookup = self.get_lookup(inode)?;
let metadata = self
.open_entry(&lookup)
.await?
.into_entry()
.into_metadata();
let mut xattrs = metadata.xattrs;
use pxar::format::XAttr;
if let Some(fcaps) = metadata.fcaps {
xattrs.push(XAttr::new(xattr::xattr_name_fcaps().to_bytes(), fcaps.data));
}
// TODO: Special cases:
// b"system.posix_acl_access
// b"system.posix_acl_default
//
// For these we need to be able to create posix acl format entries, at that point we could
// just ditch libacl as well...
Ok(xattrs)
}
async fn listxattrs_into(
&self,
request: &mut requests::ListXAttr,
) -> Result<ReplyBufState, Error> {
let xattrs = self.listxattrs(request.inode).await?;
for entry in xattrs {
match request.add_c_string(entry.name()) {
ReplyBufState::Ok => (),
ReplyBufState::Full => return Ok(ReplyBufState::Full),
}
}
Ok(ReplyBufState::Ok)
}
async fn getxattr(&self, inode: u64, xattr: &OsStr) -> Result<pxar::format::XAttr, Error> {
// TODO: pxar::Accessor could probably get a more optimized method to fetch a specific
// xattr for an entry...
let xattrs = self.listxattrs(inode).await?;
for entry in xattrs {
if entry.name().to_bytes() == xattr.as_bytes() {
return Ok(entry);
}
}
io_return!(libc::ENODATA);
}
}
#[inline]
fn to_entry(entry: &FileEntry) -> Result<EntryParam, Error> {
to_entry_param(to_inode(&entry), &entry)
}
#[inline]
fn to_inode(entry: &FileEntry) -> u64 {
if entry.is_dir() {
entry.entry_range_info().entry_range.end
} else {
entry.entry_range_info().entry_range.start | NON_DIRECTORY_INODE
}
}
fn to_entry_param(inode: u64, entry: &pxar::Entry) -> Result<EntryParam, Error> {
Ok(EntryParam::simple(inode, to_stat(inode, entry)?))
}
fn to_stat(inode: u64, entry: &pxar::Entry) -> Result<libc::stat, Error> {
let nlink = if entry.is_dir() { 2 } else { 1 };
let metadata = entry.metadata();
let mut stat: libc::stat = unsafe { mem::zeroed() };
stat.st_ino = inode;
stat.st_nlink = nlink;
stat.st_mode = u32::try_from(metadata.stat.mode)
.map_err(|err| format_err!("mode does not fit into st_mode field: {}", err))?;
stat.st_size = i64::try_from(entry.file_size().unwrap_or(0))
.map_err(|err| format_err!("size does not fit into st_size field: {}", err))?;
stat.st_uid = metadata.stat.uid;
stat.st_gid = metadata.stat.gid;
stat.st_atime = metadata.stat.mtime.secs;
stat.st_atime_nsec = metadata.stat.mtime.nanos as _;
stat.st_mtime = metadata.stat.mtime.secs;
stat.st_mtime_nsec = metadata.stat.mtime.nanos as _;
stat.st_ctime = metadata.stat.mtime.secs;
stat.st_ctime_nsec = metadata.stat.mtime.nanos as _;
Ok(stat)
}

View File

@ -1,408 +0,0 @@
use std::ffi::{CStr, CString};
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::Path;
use anyhow::{bail, format_err, Error};
use nix::errno::Errno;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
use pxar::Metadata;
use proxmox::c_result;
use proxmox::sys::error::SysError;
use proxmox::tools::fd::RawFdNum;
use pbs_tools::fs;
use crate::pxar::tools::perms_from_metadata;
use crate::pxar::Flags;
use crate::tools::{acl, xattr};
//
// utility functions
//
fn allow_notsupp<E: SysError>(err: E) -> Result<(), E> {
if err.is_errno(Errno::EOPNOTSUPP) {
Ok(())
} else {
Err(err)
}
}
fn allow_notsupp_remember<E: SysError>(err: E, not_supp: &mut bool) -> Result<(), E> {
if err.is_errno(Errno::EOPNOTSUPP) {
*not_supp = true;
Ok(())
} else {
Err(err)
}
}
fn timestamp_to_update_timespec(mtime: &pxar::format::StatxTimestamp) -> [libc::timespec; 2] {
// restore mtime
const UTIME_OMIT: i64 = (1 << 30) - 2;
[
libc::timespec {
tv_sec: 0,
tv_nsec: UTIME_OMIT,
},
libc::timespec {
tv_sec: mtime.secs,
tv_nsec: mtime.nanos as _,
},
]
}
//
// metadata application:
//
pub fn apply_at(
flags: Flags,
metadata: &Metadata,
parent: RawFd,
file_name: &CStr,
path_info: &Path,
on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
) -> Result<(), Error> {
let fd = proxmox::tools::fd::Fd::openat(
&unsafe { RawFdNum::from_raw_fd(parent) },
file_name,
OFlag::O_PATH | OFlag::O_CLOEXEC | OFlag::O_NOFOLLOW,
Mode::empty(),
)?;
apply(flags, metadata, fd.as_raw_fd(), path_info, on_error)
}
pub fn apply_initial_flags(
flags: Flags,
metadata: &Metadata,
fd: RawFd,
on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
) -> Result<(), Error> {
let entry_flags = Flags::from_bits_truncate(metadata.stat.flags);
apply_chattr(
fd,
entry_flags.to_initial_chattr(),
flags.to_initial_chattr(),
)
.or_else(on_error)?;
Ok(())
}
pub fn apply(
flags: Flags,
metadata: &Metadata,
fd: RawFd,
path_info: &Path,
on_error: &mut (dyn FnMut(Error) -> Result<(), Error> + Send),
) -> Result<(), Error> {
let c_proc_path = CString::new(format!("/proc/self/fd/{}", fd)).unwrap();
unsafe {
// UID and GID first, as this fails if we lose access anyway.
c_result!(libc::chown(
c_proc_path.as_ptr(),
metadata.stat.uid,
metadata.stat.gid
))
.map(drop)
.or_else(allow_notsupp)
.map_err(|err| format_err!("failed to set ownership: {}", err))
.or_else(&mut *on_error)?;
}
let mut skip_xattrs = false;
apply_xattrs(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)
.or_else(&mut *on_error)?;
add_fcaps(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs).or_else(&mut *on_error)?;
apply_acls(flags, &c_proc_path, metadata, path_info)
.map_err(|err| format_err!("failed to apply acls: {}", err))
.or_else(&mut *on_error)?;
apply_quota_project_id(flags, fd, metadata).or_else(&mut *on_error)?;
// Finally mode and time. We may lose access with mode, but the changing the mode also
// affects times.
if !metadata.is_symlink() {
c_result!(unsafe {
libc::chmod(c_proc_path.as_ptr(), perms_from_metadata(metadata)?.bits())
})
.map(drop)
.or_else(allow_notsupp)
.map_err(|err| format_err!("failed to change file mode: {}", err))
.or_else(&mut *on_error)?;
}
let res = c_result!(unsafe {
libc::utimensat(
libc::AT_FDCWD,
c_proc_path.as_ptr(),
timestamp_to_update_timespec(&metadata.stat.mtime).as_ptr(),
0,
)
});
match res {
Ok(_) => (),
Err(ref err) if err.is_errno(Errno::EOPNOTSUPP) => (),
Err(err) => {
on_error(format_err!(
"failed to restore mtime attribute on {:?}: {}",
path_info,
err
))?;
}
}
if metadata.stat.flags != 0 {
apply_flags(flags, fd, metadata.stat.flags).or_else(&mut *on_error)?;
}
Ok(())
}
fn add_fcaps(
flags: Flags,
c_proc_path: *const libc::c_char,
metadata: &Metadata,
skip_xattrs: &mut bool,
) -> Result<(), Error> {
if *skip_xattrs || !flags.contains(Flags::WITH_FCAPS) {
return Ok(());
}
let fcaps = match metadata.fcaps.as_ref() {
Some(fcaps) => fcaps,
None => return Ok(()),
};
c_result!(unsafe {
libc::setxattr(
c_proc_path,
xattr::xattr_name_fcaps().as_ptr(),
fcaps.data.as_ptr() as *const libc::c_void,
fcaps.data.len(),
0,
)
})
.map(drop)
.or_else(|err| allow_notsupp_remember(err, skip_xattrs))
.map_err(|err| format_err!("failed to apply file capabilities: {}", err))?;
Ok(())
}
fn apply_xattrs(
flags: Flags,
c_proc_path: *const libc::c_char,
metadata: &Metadata,
skip_xattrs: &mut bool,
) -> Result<(), Error> {
if *skip_xattrs || !flags.contains(Flags::WITH_XATTRS) {
return Ok(());
}
for xattr in &metadata.xattrs {
if *skip_xattrs {
return Ok(());
}
if !xattr::is_valid_xattr_name(xattr.name()) {
eprintln!("skipping invalid xattr named {:?}", xattr.name());
continue;
}
c_result!(unsafe {
libc::setxattr(
c_proc_path,
xattr.name().as_ptr() as *const libc::c_char,
xattr.value().as_ptr() as *const libc::c_void,
xattr.value().len(),
0,
)
})
.map(drop)
.or_else(|err| allow_notsupp_remember(err, &mut *skip_xattrs))
.map_err(|err| format_err!("failed to apply extended attributes: {}", err))?;
}
Ok(())
}
fn apply_acls(
flags: Flags,
c_proc_path: &CStr,
metadata: &Metadata,
path_info: &Path,
) -> Result<(), Error> {
if !flags.contains(Flags::WITH_ACL) || metadata.acl.is_empty() {
return Ok(());
}
let mut acl = acl::ACL::init(5)?;
// acl type access:
acl.add_entry_full(
acl::ACL_USER_OBJ,
None,
acl::mode_user_to_acl_permissions(metadata.stat.mode),
)?;
acl.add_entry_full(
acl::ACL_OTHER,
None,
acl::mode_other_to_acl_permissions(metadata.stat.mode),
)?;
match metadata.acl.group_obj.as_ref() {
Some(group_obj) => {
acl.add_entry_full(
acl::ACL_MASK,
None,
acl::mode_group_to_acl_permissions(metadata.stat.mode),
)?;
acl.add_entry_full(acl::ACL_GROUP_OBJ, None, group_obj.permissions.0)?;
}
None => {
let mode = acl::mode_group_to_acl_permissions(metadata.stat.mode);
acl.add_entry_full(acl::ACL_GROUP_OBJ, None, mode)?;
if !metadata.acl.users.is_empty() || !metadata.acl.groups.is_empty() {
eprintln!(
"Warning: {:?}: Missing GROUP_OBJ entry in ACL, resetting to value of MASK",
path_info,
);
acl.add_entry_full(acl::ACL_MASK, None, mode)?;
}
}
}
for user in &metadata.acl.users {
acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
}
for group in &metadata.acl.groups {
acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
}
if !acl.is_valid() {
bail!("Error while restoring ACL - ACL invalid");
}
acl.set_file(c_proc_path, acl::ACL_TYPE_ACCESS)?;
drop(acl);
// acl type default:
if let Some(default) = metadata.acl.default.as_ref() {
let mut acl = acl::ACL::init(5)?;
acl.add_entry_full(acl::ACL_USER_OBJ, None, default.user_obj_permissions.0)?;
acl.add_entry_full(acl::ACL_GROUP_OBJ, None, default.group_obj_permissions.0)?;
acl.add_entry_full(acl::ACL_OTHER, None, default.other_permissions.0)?;
if default.mask_permissions != pxar::format::acl::Permissions::NO_MASK {
acl.add_entry_full(acl::ACL_MASK, None, default.mask_permissions.0)?;
}
for user in &metadata.acl.default_users {
acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
}
for group in &metadata.acl.default_groups {
acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
}
if !acl.is_valid() {
bail!("Error while restoring ACL - ACL invalid");
}
acl.set_file(c_proc_path, acl::ACL_TYPE_DEFAULT)?;
}
Ok(())
}
fn apply_quota_project_id(flags: Flags, fd: RawFd, metadata: &Metadata) -> Result<(), Error> {
if !flags.contains(Flags::WITH_QUOTA_PROJID) {
return Ok(());
}
let projid = match metadata.quota_project_id {
Some(projid) => projid,
None => return Ok(()),
};
let mut fsxattr = fs::FSXAttr::default();
unsafe {
fs::fs_ioc_fsgetxattr(fd, &mut fsxattr).map_err(|err| {
format_err!(
"error while getting fsxattr to restore quota project id - {}",
err
)
})?;
fsxattr.fsx_projid = projid.projid as u32;
fs::fs_ioc_fssetxattr(fd, &fsxattr).map_err(|err| {
format_err!(
"error while setting fsxattr to restore quota project id - {}",
err
)
})?;
}
Ok(())
}
pub(crate) fn errno_is_unsupported(errno: Errno) -> bool {
matches!(errno, Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL)
}
fn apply_chattr(fd: RawFd, chattr: libc::c_long, mask: libc::c_long) -> Result<(), Error> {
if chattr == 0 {
return Ok(());
}
let mut fattr: libc::c_long = 0;
match unsafe { fs::read_attr_fd(fd, &mut fattr) } {
Ok(_) => (),
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
return Ok(());
}
Err(err) => bail!("failed to read file attributes: {}", err),
}
let attr = (chattr & mask) | (fattr & !mask);
if attr == fattr {
return Ok(());
}
match unsafe { fs::write_attr_fd(fd, &attr) } {
Ok(_) => Ok(()),
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => Ok(()),
Err(err) => bail!("failed to set file attributes: {}", err),
}
}
fn apply_flags(flags: Flags, fd: RawFd, entry_flags: u64) -> Result<(), Error> {
let entry_flags = Flags::from_bits_truncate(entry_flags);
apply_chattr(fd, entry_flags.to_chattr(), flags.to_chattr())?;
let fatattr = (flags & entry_flags).to_fat_attr();
if fatattr != 0 {
match unsafe { fs::write_fat_attr_fd(fd, &fatattr) } {
Ok(_) => (),
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => (),
Err(err) => bail!("failed to set file FAT attributes: {}", err),
}
}
Ok(())
}

View File

@ -1,71 +0,0 @@
//! *pxar* Implementation (proxmox file archive format)
//!
//! This code implements a slightly modified version of the *catar*
//! format used in the [casync](https://github.com/systemd/casync)
//! toolkit (we are not 100\% binary compatible). It is a file archive
//! format defined by 'Lennart Poettering', specially defined for
//! efficient deduplication.
//! Every archive contains items in the following order:
//! * `ENTRY` -- containing general stat() data and related bits
//! * `USER` -- user name as text, if enabled
//! * `GROUP` -- group name as text, if enabled
//! * `XATTR` -- one extended attribute
//! * ... -- more of these when there are multiple defined
//! * `ACL_USER` -- one `USER ACL` entry
//! * ... -- more of these when there are multiple defined
//! * `ACL_GROUP` -- one `GROUP ACL` entry
//! * ... -- more of these when there are multiple defined
//! * `ACL_GROUP_OBJ` -- The `ACL_GROUP_OBJ`
//! * `ACL_DEFAULT` -- The various default ACL fields if there's one defined
//! * `ACL_DEFAULT_USER` -- one USER ACL entry
//! * ... -- more of these when multiple are defined
//! * `ACL_DEFAULT_GROUP` -- one GROUP ACL entry
//! * ... -- more of these when multiple are defined
//! * `FCAPS` -- file capability in Linux disk format
//! * `QUOTA_PROJECT_ID` -- the ext4/xfs quota project ID
//! * `PAYLOAD` -- file contents, if it is one
//! * `SYMLINK` -- symlink target, if it is one
//! * `DEVICE` -- device major/minor, if it is a block/char device
//!
//! If we are serializing a directory, then this is followed by:
//!
//! * `FILENAME` -- name of the first directory entry (strictly ordered!)
//! * `<archive>` -- serialization of the first directory entry's metadata and contents,
//! following the exact same archive format
//! * `FILENAME` -- name of the second directory entry (strictly ordered!)
//! * `<archive>` -- serialization of the second directory entry
//! * ...
//! * `GOODBYE` -- lookup table at the end of a list of directory entries
//!
//! The original format has no way to deal with hardlinks, so we
//! extended the format by a special `HARDLINK` tag, which can replace
//! an `ENTRY` tag. The `HARDLINK` tag contains an 64bit offset which
//! points to the linked `ENTRY` inside the archive, followed by the
//! full path name of that `ENTRY`. `HARDLINK`s may not have further data
//! (user, group, acl, ...) because this is already defined by the
//! linked `ENTRY`.
pub(crate) mod create;
pub(crate) mod dir_stack;
pub(crate) mod extract;
pub(crate) mod metadata;
pub mod fuse;
pub(crate) mod tools;
mod flags;
pub use flags::Flags;
pub use create::{create_archive, PxarCreateOptions};
pub use extract::{
create_zip, extract_archive, extract_sub_dir, extract_sub_dir_seq, ErrorHandler,
PxarExtractOptions,
};
/// The format requires to build sorted directory lookup tables in
/// memory, so we restrict the number of allowed entries to limit
/// maximum memory usage.
pub const ENCODER_MAX_ENTRIES: usize = 1024 * 1024;
pub use tools::{format_multi_line_entry, format_single_line_entry};

View File

@ -1,202 +0,0 @@
//! Some common methods used within the pxar code.
use std::convert::TryFrom;
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
use anyhow::{bail, format_err, Error};
use nix::sys::stat::Mode;
use pxar::{mode, Entry, EntryKind, Metadata, format::StatxTimestamp};
/// Get the file permissions as `nix::Mode`
pub fn perms_from_metadata(meta: &Metadata) -> Result<Mode, Error> {
let mode = meta.stat.get_permission_bits();
u32::try_from(mode)
.map_err(drop)
.and_then(|mode| Mode::from_bits(mode).ok_or(()))
.map_err(|_| format_err!("mode contains illegal bits: 0x{:x} (0o{:o})", mode, mode))
}
/// Make sure path is relative and not '.' or '..'.
pub fn assert_relative_path<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
assert_relative_path_do(Path::new(path))
}
/// Make sure path is a single component and not '.' or '..'.
pub fn assert_single_path_component<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
assert_single_path_component_do(Path::new(path))
}
fn assert_relative_path_do(path: &Path) -> Result<(), Error> {
if !path.is_relative() {
bail!("bad absolute file name in archive: {:?}", path);
}
Ok(())
}
fn assert_single_path_component_do(path: &Path) -> Result<(), Error> {
assert_relative_path_do(path)?;
let mut components = path.components();
match components.next() {
Some(std::path::Component::Normal(_)) => (),
_ => bail!("invalid path component in archive: {:?}", path),
}
if components.next().is_some() {
bail!(
"invalid path with multiple components in archive: {:?}",
path
);
}
Ok(())
}
#[rustfmt::skip]
fn symbolic_mode(c: u64, special: bool, special_x: u8, special_no_x: u8) -> [u8; 3] {
[
if 0 != c & 4 { b'r' } else { b'-' },
if 0 != c & 2 { b'w' } else { b'-' },
match (c & 1, special) {
(0, false) => b'-',
(0, true) => special_no_x,
(_, false) => b'x',
(_, true) => special_x,
}
]
}
fn mode_string(entry: &Entry) -> String {
// https://www.gnu.org/software/coreutils/manual/html_node/What-information-is-listed.html#What-information-is-listed
// additionally we use:
// file type capital 'L' hard links
// a second '+' after the mode to show non-acl xattr presence
//
// Trwxrwxrwx++ uid/gid size mtime filename [-> destination]
let meta = entry.metadata();
let mode = meta.stat.mode;
let type_char = if entry.is_hardlink() {
'L'
} else {
match mode & mode::IFMT {
mode::IFREG => '-',
mode::IFBLK => 'b',
mode::IFCHR => 'c',
mode::IFDIR => 'd',
mode::IFLNK => 'l',
mode::IFIFO => 'p',
mode::IFSOCK => 's',
_ => '?',
}
};
let fmt_u = symbolic_mode((mode >> 6) & 7, 0 != mode & mode::ISUID, b's', b'S');
let fmt_g = symbolic_mode((mode >> 3) & 7, 0 != mode & mode::ISGID, b's', b'S');
let fmt_o = symbolic_mode((mode >> 3) & 7, 0 != mode & mode::ISVTX, b't', b'T');
let has_acls = if meta.acl.is_empty() { ' ' } else { '+' };
let has_xattrs = if meta.xattrs.is_empty() { ' ' } else { '+' };
format!(
"{}{}{}{}{}{}",
type_char,
unsafe { std::str::from_utf8_unchecked(&fmt_u) },
unsafe { std::str::from_utf8_unchecked(&fmt_g) },
unsafe { std::str::from_utf8_unchecked(&fmt_o) },
has_acls,
has_xattrs,
)
}
fn format_mtime(mtime: &StatxTimestamp) -> String {
if let Ok(s) = proxmox::tools::time::strftime_local("%Y-%m-%d %H:%M:%S", mtime.secs) {
return s;
}
format!("{}.{}", mtime.secs, mtime.nanos)
}
pub fn format_single_line_entry(entry: &Entry) -> String {
let mode_string = mode_string(entry);
let meta = entry.metadata();
let (size, link) = match entry.kind() {
EntryKind::File { size, .. } => (format!("{}", *size), String::new()),
EntryKind::Symlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
EntryKind::Hardlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
EntryKind::Device(dev) => (format!("{},{}", dev.major, dev.minor), String::new()),
_ => ("0".to_string(), String::new()),
};
format!(
"{} {:<13} {} {:>8} {:?}{}",
mode_string,
format!("{}/{}", meta.stat.uid, meta.stat.gid),
format_mtime(&meta.stat.mtime),
size,
entry.path(),
link,
)
}
pub fn format_multi_line_entry(entry: &Entry) -> String {
let mode_string = mode_string(entry);
let meta = entry.metadata();
let (size, link, type_name) = match entry.kind() {
EntryKind::File { size, .. } => (format!("{}", *size), String::new(), "file"),
EntryKind::Symlink(link) => (
"0".to_string(),
format!(" -> {:?}", link.as_os_str()),
"symlink",
),
EntryKind::Hardlink(link) => (
"0".to_string(),
format!(" -> {:?}", link.as_os_str()),
"symlink",
),
EntryKind::Device(dev) => (
format!("{},{}", dev.major, dev.minor),
String::new(),
if meta.stat.is_chardev() {
"characters pecial file"
} else if meta.stat.is_blockdev() {
"block special file"
} else {
"device"
},
),
EntryKind::Socket => ("0".to_string(), String::new(), "socket"),
EntryKind::Fifo => ("0".to_string(), String::new(), "fifo"),
EntryKind::Directory => ("0".to_string(), String::new(), "directory"),
EntryKind::GoodbyeTable => ("0".to_string(), String::new(), "bad entry"),
};
let file_name = match std::str::from_utf8(entry.path().as_os_str().as_bytes()) {
Ok(name) => std::borrow::Cow::Borrowed(name),
Err(_) => std::borrow::Cow::Owned(format!("{:?}", entry.path())),
};
format!(
" File: {}{}\n \
Size: {:<13} Type: {}\n\
Access: ({:o}/{}) Uid: {:<5} Gid: {:<5}\n\
Modify: {}\n",
file_name,
link,
size,
type_name,
meta.file_mode(),
mode_string,
meta.stat.uid,
meta.stat.gid,
format_mtime(&meta.stat.mtime),
)
}

View File

@ -22,10 +22,10 @@ use pbs_datastore::manifest::{
CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME, ArchiveType, BackupManifest, FileInfo, archive_type
};
use pbs_tools::sha::sha256;
use pbs_client::{BackupReader, BackupRepository, HttpClient, HttpClientOptions, RemoteChunkReader};
use crate::{
backup::DataStore,
client::{BackupReader, BackupRepository, HttpClient, HttpClientOptions, RemoteChunkReader},
server::WorkerTask,
tools::ParallelHandler,
};

View File

@ -30,6 +30,8 @@ use proxmox::api::{
};
use proxmox::http_err;
use pbs_tools::compression::{DeflateEncoder, Level};
use super::auth::AuthError;
use super::environment::RestEnvironment;
use super::formatter::*;
@ -39,7 +41,7 @@ use crate::api2::types::{Authid, Userid};
use crate::auth_helpers::*;
use crate::config::cached_user_info::CachedUserInfo;
use crate::tools;
use crate::tools::compression::{CompressionMethod, DeflateEncoder, Level};
use crate::tools::compression::CompressionMethod;
use crate::tools::AsyncReaderStream;
use crate::tools::FileLogger;

View File

@ -1,334 +0,0 @@
//! Implementation of the calls to handle POSIX access control lists
// see C header file <sys/acl.h> for reference
extern crate libc;
use std::ffi::CString;
use std::marker::PhantomData;
use std::os::unix::ffi::OsStrExt;
use std::os::unix::io::RawFd;
use std::path::Path;
use std::ptr;
use libc::{c_char, c_int, c_uint, c_void};
use nix::errno::Errno;
use nix::NixPath;
// from: acl/include/acl.h
pub const ACL_UNDEFINED_ID: u32 = 0xffffffff;
// acl_perm_t values
pub type ACLPerm = c_uint;
pub const ACL_READ: ACLPerm = 0x04;
pub const ACL_WRITE: ACLPerm = 0x02;
pub const ACL_EXECUTE: ACLPerm = 0x01;
// acl_tag_t values
pub type ACLTag = c_int;
pub const ACL_UNDEFINED_TAG: ACLTag = 0x00;
pub const ACL_USER_OBJ: ACLTag = 0x01;
pub const ACL_USER: ACLTag = 0x02;
pub const ACL_GROUP_OBJ: ACLTag = 0x04;
pub const ACL_GROUP: ACLTag = 0x08;
pub const ACL_MASK: ACLTag = 0x10;
pub const ACL_OTHER: ACLTag = 0x20;
// acl_type_t values
pub type ACLType = c_uint;
pub const ACL_TYPE_ACCESS: ACLType = 0x8000;
pub const ACL_TYPE_DEFAULT: ACLType = 0x4000;
// acl entry constants
pub const ACL_FIRST_ENTRY: c_int = 0;
pub const ACL_NEXT_ENTRY: c_int = 1;
// acl to extended attribute names constants
// from: acl/include/acl_ea.h
pub const ACL_EA_ACCESS: &str = "system.posix_acl_access";
pub const ACL_EA_DEFAULT: &str = "system.posix_acl_default";
pub const ACL_EA_VERSION: u32 = 0x0002;
#[link(name = "acl")]
extern "C" {
fn acl_get_file(path: *const c_char, acl_type: ACLType) -> *mut c_void;
fn acl_set_file(path: *const c_char, acl_type: ACLType, acl: *mut c_void) -> c_int;
fn acl_get_fd(fd: RawFd) -> *mut c_void;
fn acl_get_entry(acl: *const c_void, entry_id: c_int, entry: *mut *mut c_void) -> c_int;
fn acl_create_entry(acl: *mut *mut c_void, entry: *mut *mut c_void) -> c_int;
fn acl_get_tag_type(entry: *mut c_void, tag_type: *mut ACLTag) -> c_int;
fn acl_set_tag_type(entry: *mut c_void, tag_type: ACLTag) -> c_int;
fn acl_get_permset(entry: *mut c_void, permset: *mut *mut c_void) -> c_int;
fn acl_clear_perms(permset: *mut c_void) -> c_int;
fn acl_get_perm(permset: *mut c_void, perm: ACLPerm) -> c_int;
fn acl_add_perm(permset: *mut c_void, perm: ACLPerm) -> c_int;
fn acl_get_qualifier(entry: *mut c_void) -> *mut c_void;
fn acl_set_qualifier(entry: *mut c_void, qualifier: *const c_void) -> c_int;
fn acl_init(count: c_int) -> *mut c_void;
fn acl_valid(ptr: *const c_void) -> c_int;
fn acl_free(ptr: *mut c_void) -> c_int;
}
#[derive(Debug)]
pub struct ACL {
ptr: *mut c_void,
}
impl Drop for ACL {
fn drop(&mut self) {
let ret = unsafe { acl_free(self.ptr) };
if ret != 0 {
panic!("invalid pointer encountered while dropping ACL - {}", Errno::last());
}
}
}
impl ACL {
pub fn init(count: usize) -> Result<ACL, nix::errno::Errno> {
let ptr = unsafe { acl_init(count as i32 as c_int) };
if ptr.is_null() {
return Err(Errno::last());
}
Ok(ACL { ptr })
}
pub fn get_file<P: AsRef<Path>>(path: P, acl_type: ACLType) -> Result<ACL, nix::errno::Errno> {
let path_cstr = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap();
let ptr = unsafe { acl_get_file(path_cstr.as_ptr(), acl_type) };
if ptr.is_null() {
return Err(Errno::last());
}
Ok(ACL { ptr })
}
pub fn set_file<P: NixPath + ?Sized>(&self, path: &P, acl_type: ACLType) -> nix::Result<()> {
path.with_nix_path(|path| {
Errno::result(unsafe { acl_set_file(path.as_ptr(), acl_type, self.ptr) })
})?
.map(drop)
}
pub fn get_fd(fd: RawFd) -> Result<ACL, nix::errno::Errno> {
let ptr = unsafe { acl_get_fd(fd) };
if ptr.is_null() {
return Err(Errno::last());
}
Ok(ACL { ptr })
}
pub fn create_entry(&mut self) -> Result<ACLEntry, nix::errno::Errno> {
let mut ptr = ptr::null_mut() as *mut c_void;
let res = unsafe { acl_create_entry(&mut self.ptr, &mut ptr) };
if res < 0 {
return Err(Errno::last());
}
Ok(ACLEntry {
ptr,
_phantom: PhantomData,
})
}
pub fn is_valid(&self) -> bool {
let res = unsafe { acl_valid(self.ptr) };
if res == 0 {
return true;
}
false
}
pub fn entries(self) -> ACLEntriesIterator {
ACLEntriesIterator {
acl: self,
current: ACL_FIRST_ENTRY,
}
}
pub fn add_entry_full(&mut self, tag: ACLTag, qualifier: Option<u64>, permissions: u64)
-> Result<(), nix::errno::Errno>
{
let mut entry = self.create_entry()?;
entry.set_tag_type(tag)?;
if let Some(qualifier) = qualifier {
entry.set_qualifier(qualifier)?;
}
entry.set_permissions(permissions)?;
Ok(())
}
}
#[derive(Debug)]
pub struct ACLEntry<'a> {
ptr: *mut c_void,
_phantom: PhantomData<&'a mut ()>,
}
impl<'a> ACLEntry<'a> {
pub fn get_tag_type(&self) -> Result<ACLTag, nix::errno::Errno> {
let mut tag = ACL_UNDEFINED_TAG;
let res = unsafe { acl_get_tag_type(self.ptr, &mut tag as *mut ACLTag) };
if res < 0 {
return Err(Errno::last());
}
Ok(tag)
}
pub fn set_tag_type(&mut self, tag: ACLTag) -> Result<(), nix::errno::Errno> {
let res = unsafe { acl_set_tag_type(self.ptr, tag) };
if res < 0 {
return Err(Errno::last());
}
Ok(())
}
pub fn get_permissions(&self) -> Result<u64, nix::errno::Errno> {
let mut permissions = 0;
let mut permset = ptr::null_mut() as *mut c_void;
let mut res = unsafe { acl_get_permset(self.ptr, &mut permset) };
if res < 0 {
return Err(Errno::last());
}
for &perm in &[ACL_READ, ACL_WRITE, ACL_EXECUTE] {
res = unsafe { acl_get_perm(permset, perm) };
if res < 0 {
return Err(Errno::last());
}
if res == 1 {
permissions |= perm as u64;
}
}
Ok(permissions)
}
pub fn set_permissions(&mut self, permissions: u64) -> Result<u64, nix::errno::Errno> {
let mut permset = ptr::null_mut() as *mut c_void;
let mut res = unsafe { acl_get_permset(self.ptr, &mut permset) };
if res < 0 {
return Err(Errno::last());
}
res = unsafe { acl_clear_perms(permset) };
if res < 0 {
return Err(Errno::last());
}
for &perm in &[ACL_READ, ACL_WRITE, ACL_EXECUTE] {
if permissions & perm as u64 == perm as u64 {
res = unsafe { acl_add_perm(permset, perm) };
if res < 0 {
return Err(Errno::last());
}
}
}
Ok(permissions)
}
pub fn get_qualifier(&self) -> Result<u64, nix::errno::Errno> {
let qualifier = unsafe { acl_get_qualifier(self.ptr) };
if qualifier.is_null() {
return Err(Errno::last());
}
let result = unsafe { *(qualifier as *const u32) as u64 };
let ret = unsafe { acl_free(qualifier) };
if ret != 0 {
panic!("invalid pointer encountered while dropping ACL qualifier - {}", Errno::last());
}
Ok(result)
}
pub fn set_qualifier(&mut self, qualifier: u64) -> Result<(), nix::errno::Errno> {
let val = qualifier as u32;
let val_ptr: *const u32 = &val;
let res = unsafe { acl_set_qualifier(self.ptr, val_ptr as *const c_void) };
if res < 0 {
return Err(Errno::last());
}
Ok(())
}
}
#[derive(Debug)]
pub struct ACLEntriesIterator {
acl: ACL,
current: c_int,
}
impl<'a> Iterator for &'a mut ACLEntriesIterator {
type Item = ACLEntry<'a>;
fn next(&mut self) -> Option<Self::Item> {
let mut entry_ptr = ptr::null_mut();
let res = unsafe { acl_get_entry(self.acl.ptr, self.current, &mut entry_ptr) };
self.current = ACL_NEXT_ENTRY;
if res == 1 {
return Some(ACLEntry { ptr: entry_ptr, _phantom: PhantomData });
}
None
}
}
/// Helper to transform `PxarEntry`s user mode to acl permissions.
pub fn mode_user_to_acl_permissions(mode: u64) -> u64 {
(mode >> 6) & 7
}
/// Helper to transform `PxarEntry`s group mode to acl permissions.
pub fn mode_group_to_acl_permissions(mode: u64) -> u64 {
(mode >> 3) & 7
}
/// Helper to transform `PxarEntry`s other mode to acl permissions.
pub fn mode_other_to_acl_permissions(mode: u64) -> u64 {
mode & 7
}
/// Buffer to compose ACLs as extended attribute.
pub struct ACLXAttrBuffer {
buffer: Vec<u8>,
}
impl ACLXAttrBuffer {
/// Create a new buffer to write ACLs as extended attribute.
///
/// `version` defines the ACL_EA_VERSION found in acl/include/acl_ea.h
pub fn new(version: u32) -> Self {
let mut buffer = Vec::new();
buffer.extend_from_slice(&version.to_le_bytes());
Self { buffer }
}
/// Add ACL entry to buffer.
pub fn add_entry(&mut self, tag: ACLTag, qualifier: Option<u64>, permissions: u64) {
self.buffer.extend_from_slice(&(tag as u16).to_le_bytes());
self.buffer.extend_from_slice(&(permissions as u16).to_le_bytes());
match qualifier {
Some(qualifier) => self.buffer.extend_from_slice(&(qualifier as u32).to_le_bytes()),
None => self.buffer.extend_from_slice(&ACL_UNDEFINED_ID.to_le_bytes()),
}
}
/// Length of the buffer in bytes.
pub fn len(&self) -> usize {
self.buffer.len()
}
/// The buffer always contains at least the version, it is never empty
pub const fn is_empty(&self) -> bool { false }
/// Borrow raw buffer as mut slice.
pub fn as_mut_slice(&mut self) -> &mut [u8] {
self.buffer.as_mut_slice()
}
}

View File

@ -1,19 +1,5 @@
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use anyhow::{bail, Error};
use bytes::Bytes;
use flate2::{Compress, Compression, FlushCompress};
use futures::ready;
use futures::stream::Stream;
use hyper::header;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use proxmox::io_format_err;
use proxmox::tools::byte_buffer::ByteBuffer;
const BUFFER_SIZE: usize = 8192;
/// Possible Compression Methods, order determines preference (later is preferred)
#[derive(Eq, Ord, PartialEq, PartialOrd, Debug)]
@ -51,182 +37,3 @@ impl std::str::FromStr for CompressionMethod {
}
}
}
pub enum Level {
Fastest,
Best,
Default,
Precise(u32),
}
#[derive(Eq, PartialEq)]
enum EncoderState {
Reading,
Writing,
Flushing,
Finished,
}
pub struct DeflateEncoder<T> {
inner: T,
compressor: Compress,
buffer: ByteBuffer,
input_buffer: Bytes,
state: EncoderState,
}
impl<T> DeflateEncoder<T> {
pub fn new(inner: T) -> Self {
Self::with_quality(inner, Level::Default)
}
pub fn with_quality(inner: T, level: Level) -> Self {
let level = match level {
Level::Fastest => Compression::fast(),
Level::Best => Compression::best(),
Level::Default => Compression::new(3),
Level::Precise(val) => Compression::new(val),
};
Self {
inner,
compressor: Compress::new(level, false),
buffer: ByteBuffer::with_capacity(BUFFER_SIZE),
input_buffer: Bytes::new(),
state: EncoderState::Reading,
}
}
pub fn total_in(&self) -> u64 {
self.compressor.total_in()
}
pub fn total_out(&self) -> u64 {
self.compressor.total_out()
}
pub fn into_inner(self) -> T {
self.inner
}
fn encode(
&mut self,
inbuf: &[u8],
flush: FlushCompress,
) -> Result<(usize, flate2::Status), io::Error> {
let old_in = self.compressor.total_in();
let old_out = self.compressor.total_out();
let res = self
.compressor
.compress(&inbuf[..], self.buffer.get_free_mut_slice(), flush)?;
let new_in = (self.compressor.total_in() - old_in) as usize;
let new_out = (self.compressor.total_out() - old_out) as usize;
self.buffer.add_size(new_out);
Ok((new_in, res))
}
}
impl DeflateEncoder<Vec<u8>> {
// assume small files
pub async fn compress_vec<R>(&mut self, reader: &mut R, size_hint: usize) -> Result<(), Error>
where
R: AsyncRead + Unpin,
{
let mut buffer = Vec::with_capacity(size_hint);
reader.read_to_end(&mut buffer).await?;
self.inner.reserve(size_hint); // should be enough since we want smalller files
self.compressor.compress_vec(&buffer[..], &mut self.inner, FlushCompress::Finish)?;
Ok(())
}
}
impl<T: AsyncWrite + Unpin> DeflateEncoder<T> {
pub async fn compress<R>(&mut self, reader: &mut R) -> Result<(), Error>
where
R: AsyncRead + Unpin,
{
let mut buffer = ByteBuffer::with_capacity(BUFFER_SIZE);
let mut eof = false;
loop {
if !eof && !buffer.is_full() {
let read = buffer.read_from_async(reader).await?;
if read == 0 {
eof = true;
}
}
let (read, _res) = self.encode(&buffer[..], FlushCompress::None)?;
buffer.consume(read);
self.inner.write_all(&self.buffer[..]).await?;
self.buffer.clear();
if buffer.is_empty() && eof {
break;
}
}
loop {
let (_read, res) = self.encode(&[][..], FlushCompress::Finish)?;
self.inner.write_all(&self.buffer[..]).await?;
self.buffer.clear();
if res == flate2::Status::StreamEnd {
break;
}
}
Ok(())
}
}
impl<T, O> Stream for DeflateEncoder<T>
where
T: Stream<Item = Result<O, io::Error>> + Unpin,
O: Into<Bytes>
{
type Item = Result<Bytes, io::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
loop {
match this.state {
EncoderState::Reading => {
if let Some(res) = ready!(Pin::new(&mut this.inner).poll_next(cx)) {
let buf = res?;
this.input_buffer = buf.into();
this.state = EncoderState::Writing;
} else {
this.state = EncoderState::Flushing;
}
}
EncoderState::Writing => {
if this.input_buffer.is_empty() {
return Poll::Ready(Some(Err(io_format_err!("empty input during write"))));
}
let mut buf = this.input_buffer.split_off(0);
let (read, res) = this.encode(&buf[..], FlushCompress::None)?;
this.input_buffer = buf.split_off(read);
if this.input_buffer.is_empty() {
this.state = EncoderState::Reading;
}
if this.buffer.is_full() || res == flate2::Status::BufError {
let bytes = this.buffer.remove_data(this.buffer.len()).to_vec();
return Poll::Ready(Some(Ok(bytes.into())));
}
}
EncoderState::Flushing => {
let (_read, res) = this.encode(&[][..], FlushCompress::Finish)?;
if !this.buffer.is_empty() {
let bytes = this.buffer.remove_data(this.buffer.len()).to_vec();
return Poll::Ready(Some(Ok(bytes.into())));
}
if res == flate2::Status::StreamEnd {
this.state = EncoderState::Finished;
}
}
EncoderState::Finished => return Poll::Ready(None),
}
}
}
}

View File

@ -2,8 +2,6 @@
//!
//! This is a collection of small and useful tools.
use std::any::Any;
use std::collections::HashMap;
use std::hash::BuildHasher;
use std::fs::File;
use std::io::{self, BufRead};
use std::os::unix::io::RawFd;
@ -29,7 +27,6 @@ pub use pbs_tools::process_locker::{
ProcessLocker, ProcessLockExclusiveGuard, ProcessLockSharedGuard
};
pub mod acl;
pub mod apt;
pub mod async_io;
pub mod compression;
@ -51,8 +48,6 @@ pub mod statistics;
pub mod subscription;
pub mod systemd;
pub mod ticket;
pub mod xattr;
pub mod zip;
pub mod sgutils2;
pub mod paperkey;
@ -69,6 +64,7 @@ mod file_logger;
pub use file_logger::{FileLogger, FileLogOptions};
pub use pbs_tools::broadcast_future::{BroadcastData, BroadcastFuture};
pub use pbs_tools::ops::ControlFlow;
/// The `BufferedRead` trait provides a single function
/// `buffered_read`. It returns a reference to an internal buffer. The
@ -122,65 +118,6 @@ pub fn required_array_property<'a>(param: &'a Value, name: &str) -> Result<&'a [
}
}
pub fn complete_file_name<S>(arg: &str, _param: &HashMap<String, String, S>) -> Vec<String>
where
S: BuildHasher,
{
let mut result = vec![];
use nix::fcntl::AtFlags;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
let mut dirname = std::path::PathBuf::from(if arg.is_empty() { "./" } else { arg });
let is_dir = match nix::sys::stat::fstatat(libc::AT_FDCWD, &dirname, AtFlags::empty()) {
Ok(stat) => (stat.st_mode & libc::S_IFMT) == libc::S_IFDIR,
Err(_) => false,
};
if !is_dir {
if let Some(parent) = dirname.parent() {
dirname = parent.to_owned();
}
}
let mut dir =
match nix::dir::Dir::openat(libc::AT_FDCWD, &dirname, OFlag::O_DIRECTORY, Mode::empty()) {
Ok(d) => d,
Err(_) => return result,
};
for item in dir.iter() {
if let Ok(entry) = item {
if let Ok(name) = entry.file_name().to_str() {
if name == "." || name == ".." {
continue;
}
let mut newpath = dirname.clone();
newpath.push(name);
if let Ok(stat) =
nix::sys::stat::fstatat(libc::AT_FDCWD, &newpath, AtFlags::empty())
{
if (stat.st_mode & libc::S_IFMT) == libc::S_IFDIR {
newpath.push("");
if let Some(newpath) = newpath.to_str() {
result.push(newpath.to_owned());
}
continue;
}
}
if let Some(newpath) = newpath.to_str() {
result.push(newpath.to_owned());
}
}
}
}
result
}
/// Shortcut for md5 sums.
pub fn md5sum(data: &[u8]) -> Result<DigestBytes, Error> {
hash(MessageDigest::md5(), data).map_err(Error::from)
@ -373,17 +310,6 @@ pub fn setup_safe_path_env() {
}
}
pub fn strip_ascii_whitespace(line: &[u8]) -> &[u8] {
let line = match line.iter().position(|&b| !b.is_ascii_whitespace()) {
Some(n) => &line[n..],
None => return &[],
};
match line.iter().rev().position(|&b| !b.is_ascii_whitespace()) {
Some(n) => &line[..(line.len() - n)],
None => &[],
}
}
/// Create the base run-directory.
///
/// This exists to fixate the permissions for the run *base* directory while allowing intermediate
@ -396,14 +322,3 @@ pub fn create_run_dir() -> Result<(), Error> {
let _: bool = create_path(pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR_M!(), None, Some(opts))?;
Ok(())
}
/// Modeled after the nightly `std::ops::ControlFlow`.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ControlFlow<B, C = ()> {
Continue(C),
Break(B),
}
impl<B> ControlFlow<B> {
pub const CONTINUE: ControlFlow<B, ()> = ControlFlow::Continue(());
}

View File

@ -1,240 +0,0 @@
//! Wrapper functions for the libc xattr calls
use std::ffi::CStr;
use std::os::unix::io::RawFd;
use nix::errno::Errno;
use proxmox::c_str;
use proxmox::tools::vec;
/// `"security.capability"` as a CStr to avoid typos.
///
/// This cannot be `const` until `const_cstr_unchecked` is stable.
#[inline]
pub fn xattr_name_fcaps() -> &'static CStr {
c_str!("security.capability")
}
/// `"system.posix_acl_access"` as a CStr to avoid typos.
///
/// This cannot be `const` until `const_cstr_unchecked` is stable.
#[inline]
pub fn xattr_acl_access() -> &'static CStr {
c_str!("system.posix_acl_access")
}
/// `"system.posix_acl_default"` as a CStr to avoid typos.
///
/// This cannot be `const` until `const_cstr_unchecked` is stable.
#[inline]
pub fn xattr_acl_default() -> &'static CStr {
c_str!("system.posix_acl_default")
}
/// Result of `flistxattr`, allows iterating over the attributes as a list of `&CStr`s.
///
/// Listing xattrs produces a list separated by zeroes, inherently making them available as `&CStr`
/// already, so we make use of this fact and reflect this in the interface.
pub struct ListXAttr {
data: Vec<u8>,
}
impl ListXAttr {
fn new(data: Vec<u8>) -> Self {
Self { data }
}
}
impl<'a> IntoIterator for &'a ListXAttr {
type Item = &'a CStr;
type IntoIter = ListXAttrIter<'a>;
fn into_iter(self) -> Self::IntoIter {
ListXAttrIter {
data: &self.data,
at: 0,
}
}
}
/// Iterator over the extended attribute entries in a `ListXAttr`.
pub struct ListXAttrIter<'a> {
data: &'a [u8],
at: usize,
}
impl<'a> Iterator for ListXAttrIter<'a> {
type Item = &'a CStr;
fn next(&mut self) -> Option<&'a CStr> {
let data = &self.data[self.at..];
let next = data.iter().position(|b| *b == 0)? + 1;
self.at += next;
Some(unsafe { CStr::from_bytes_with_nul_unchecked(&data[..next]) })
}
}
/// Return a list of extended attributes accessible as an iterator over items of type `&CStr`.
pub fn flistxattr(fd: RawFd) -> Result<ListXAttr, nix::errno::Errno> {
// Initial buffer size for the attribute list, if content does not fit
// it gets dynamically increased until big enough.
let mut size = 256;
let mut buffer = vec::undefined(size);
let mut bytes = unsafe {
libc::flistxattr(fd, buffer.as_mut_ptr() as *mut libc::c_char, buffer.len())
};
while bytes < 0 {
let err = Errno::last();
match err {
Errno::ERANGE => {
// Buffer was not big enough to fit the list, retry with double the size
size = size.checked_mul(2).ok_or(Errno::ENOMEM)?;
},
_ => return Err(err),
}
// Retry to read the list with new buffer
buffer.resize(size, 0);
bytes = unsafe {
libc::flistxattr(fd, buffer.as_mut_ptr() as *mut libc::c_char, buffer.len())
};
}
buffer.truncate(bytes as usize);
Ok(ListXAttr::new(buffer))
}
/// Get an extended attribute by name.
///
/// Extended attributes may not contain zeroes, which we enforce in the API by using a `&CStr`
/// type.
pub fn fgetxattr(fd: RawFd, name: &CStr) -> Result<Vec<u8>, nix::errno::Errno> {
let mut size = 256;
let mut buffer = vec::undefined(size);
let mut bytes = unsafe {
libc::fgetxattr(fd, name.as_ptr(), buffer.as_mut_ptr() as *mut core::ffi::c_void, buffer.len())
};
while bytes < 0 {
let err = Errno::last();
match err {
Errno::ERANGE => {
// Buffer was not big enough to fit the value, retry with double the size
size = size.checked_mul(2).ok_or(Errno::ENOMEM)?;
},
_ => return Err(err),
}
buffer.resize(size, 0);
bytes = unsafe {
libc::fgetxattr(fd, name.as_ptr() as *const libc::c_char, buffer.as_mut_ptr() as *mut core::ffi::c_void, buffer.len())
};
}
buffer.resize(bytes as usize, 0);
Ok(buffer)
}
/// Set an extended attribute on a file descriptor.
pub fn fsetxattr(fd: RawFd, name: &CStr, data: &[u8]) -> Result<(), nix::errno::Errno> {
let flags = 0 as libc::c_int;
let result = unsafe {
libc::fsetxattr(fd, name.as_ptr(), data.as_ptr() as *const libc::c_void, data.len(), flags)
};
if result < 0 {
return Err(Errno::last());
}
Ok(())
}
pub fn fsetxattr_fcaps(fd: RawFd, fcaps: &[u8]) -> Result<(), nix::errno::Errno> {
// TODO casync checks and removes capabilities if they are set
fsetxattr(fd, xattr_name_fcaps(), fcaps)
}
pub fn is_security_capability(name: &CStr) -> bool {
name.to_bytes() == xattr_name_fcaps().to_bytes()
}
pub fn is_acl(name: &CStr) -> bool {
name.to_bytes() == xattr_acl_access().to_bytes()
|| name.to_bytes() == xattr_acl_default().to_bytes()
}
/// Check if the passed name buffer starts with a valid xattr namespace prefix
/// and is within the length limit of 255 bytes
pub fn is_valid_xattr_name(c_name: &CStr) -> bool {
let name = c_name.to_bytes();
if name.is_empty() || name.len() > 255 {
return false;
}
if name.starts_with(b"user.") || name.starts_with(b"trusted.") {
return true;
}
// samba saves windows ACLs there
if name == b"security.NTACL" {
return true;
}
is_security_capability(c_name)
}
#[cfg(test)]
mod tests {
use super::*;
use std::ffi::CString;
use std::fs::OpenOptions;
use std::os::unix::io::AsRawFd;
use nix::errno::Errno;
use proxmox::c_str;
#[test]
fn test_fsetxattr_fgetxattr() {
let path = "./tests/xattrs.txt";
let file = OpenOptions::new()
.write(true)
.create(true)
.open(&path)
.unwrap();
let fd = file.as_raw_fd();
let mut name = b"user.".to_vec();
for _ in 0..260 {
name.push(b'a');
}
let invalid_name = CString::new(name).unwrap();
assert!(fsetxattr(fd, c_str!("user.attribute0"), b"value0").is_ok());
assert!(fsetxattr(fd, c_str!("user.empty"), b"").is_ok());
if nix::unistd::Uid::current() != nix::unistd::ROOT {
assert_eq!(fsetxattr(fd, c_str!("trusted.attribute0"), b"value0"), Err(Errno::EPERM));
}
assert_eq!(fsetxattr(fd, c_str!("garbage.attribute0"), b"value"), Err(Errno::EOPNOTSUPP));
assert_eq!(fsetxattr(fd, &invalid_name, b"err"), Err(Errno::ERANGE));
let v0 = fgetxattr(fd, c_str!("user.attribute0")).unwrap();
let v1 = fgetxattr(fd, c_str!("user.empty")).unwrap();
assert_eq!(v0, b"value0".as_ref());
assert_eq!(v1, b"".as_ref());
assert_eq!(fgetxattr(fd, c_str!("user.attribute1")), Err(Errno::ENODATA));
std::fs::remove_file(&path).unwrap();
}
#[test]
fn test_is_valid_xattr_name() {
let too_long = CString::new(vec![b'a'; 265]).unwrap();
assert!(!is_valid_xattr_name(&too_long));
assert!(!is_valid_xattr_name(c_str!("system.attr")));
assert!(is_valid_xattr_name(c_str!("user.attr")));
assert!(is_valid_xattr_name(c_str!("trusted.attr")));
assert!(is_valid_xattr_name(super::xattr_name_fcaps()));
}
}

View File

@ -1,671 +0,0 @@
//! ZIP Helper
//!
//! Provides an interface to create a ZIP File from ZipEntries
//! for a more detailed description of the ZIP format, see:
//! https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT
use std::convert::TryInto;
use std::ffi::OsString;
use std::io;
use std::mem::size_of;
use std::os::unix::ffi::OsStrExt;
use std::path::{Component, Path, PathBuf};
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::SystemTime;
use anyhow::{format_err, Error, Result};
use endian_trait::Endian;
use futures::ready;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
use crc32fast::Hasher;
use proxmox::tools::time::gmtime;
use crate::tools::compression::{DeflateEncoder, Level};
const LOCAL_FH_SIG: u32 = 0x04034B50;
const LOCAL_FF_SIG: u32 = 0x08074B50;
const CENTRAL_DIRECTORY_FH_SIG: u32 = 0x02014B50;
const END_OF_CENTRAL_DIR: u32 = 0x06054B50;
const VERSION_NEEDED: u16 = 0x002d;
const VERSION_MADE_BY: u16 = 0x032d;
const ZIP64_EOCD_RECORD: u32 = 0x06064B50;
const ZIP64_EOCD_LOCATOR: u32 = 0x07064B50;
// bits for time:
// 0-4: day of the month (1-31)
// 5-8: month: (1 = jan, etc.)
// 9-15: year offset from 1980
//
// bits for date:
// 0-4: second / 2
// 5-10: minute (0-59)
// 11-15: hour (0-23)
//
// see https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-filetimetodosdatetime
fn epoch_to_dos(epoch: i64) -> (u16, u16) {
let gmtime = match gmtime(epoch) {
Ok(gmtime) => gmtime,
Err(_) => return (0, 0),
};
let seconds = (gmtime.tm_sec / 2) & 0b11111;
let minutes = gmtime.tm_min & 0xb111111;
let hours = gmtime.tm_hour & 0b11111;
let time: u16 = ((hours << 11) | (minutes << 5) | (seconds)) as u16;
let date: u16 = if gmtime.tm_year > (2108 - 1900) || gmtime.tm_year < (1980 - 1900) {
0
} else {
let day = gmtime.tm_mday & 0b11111;
let month = (gmtime.tm_mon + 1) & 0b1111;
let year = (gmtime.tm_year + 1900 - 1980) & 0b1111111;
((year << 9) | (month << 5) | (day)) as u16
};
(date, time)
}
#[derive(Endian)]
#[repr(C, packed)]
struct Zip64Field {
field_type: u16,
field_size: u16,
uncompressed_size: u64,
compressed_size: u64,
}
#[derive(Endian)]
#[repr(C, packed)]
struct Zip64FieldWithOffset {
field_type: u16,
field_size: u16,
uncompressed_size: u64,
compressed_size: u64,
offset: u64,
start_disk: u32,
}
#[derive(Endian)]
#[repr(C, packed)]
struct LocalFileHeader {
signature: u32,
version_needed: u16,
flags: u16,
compression: u16,
time: u16,
date: u16,
crc32: u32,
compressed_size: u32,
uncompressed_size: u32,
filename_len: u16,
extra_field_len: u16,
}
#[derive(Endian)]
#[repr(C, packed)]
struct LocalFileFooter {
signature: u32,
crc32: u32,
compressed_size: u64,
uncompressed_size: u64,
}
#[derive(Endian)]
#[repr(C, packed)]
struct CentralDirectoryFileHeader {
signature: u32,
version_made_by: u16,
version_needed: u16,
flags: u16,
compression: u16,
time: u16,
date: u16,
crc32: u32,
compressed_size: u32,
uncompressed_size: u32,
filename_len: u16,
extra_field_len: u16,
comment_len: u16,
start_disk: u16,
internal_flags: u16,
external_flags: u32,
offset: u32,
}
#[derive(Endian)]
#[repr(C, packed)]
struct EndOfCentralDir {
signature: u32,
disk_number: u16,
start_disk: u16,
disk_record_count: u16,
total_record_count: u16,
directory_size: u32,
directory_offset: u32,
comment_len: u16,
}
#[derive(Endian)]
#[repr(C, packed)]
struct Zip64EOCDRecord {
signature: u32,
field_size: u64,
version_made_by: u16,
version_needed: u16,
disk_number: u32,
disk_number_central_dir: u32,
disk_record_count: u64,
total_record_count: u64,
directory_size: u64,
directory_offset: u64,
}
#[derive(Endian)]
#[repr(C, packed)]
struct Zip64EOCDLocator {
signature: u32,
disk_number: u32,
offset: u64,
disk_count: u32,
}
async fn write_struct<E, T>(output: &mut T, data: E) -> io::Result<()>
where
T: AsyncWrite + ?Sized + Unpin,
E: Endian,
{
let data = data.to_le();
let data = unsafe {
std::slice::from_raw_parts(
&data as *const E as *const u8,
core::mem::size_of_val(&data),
)
};
output.write_all(data).await
}
/// Represents an Entry in a ZIP File
///
/// used to add to a ZipEncoder
pub struct ZipEntry {
filename: OsString,
mtime: i64,
mode: u16,
crc32: u32,
uncompressed_size: u64,
compressed_size: u64,
offset: u64,
is_file: bool,
}
impl ZipEntry {
/// Creates a new ZipEntry
///
/// if is_file is false the path will contain an trailing separator,
/// so that the zip file understands that it is a directory
pub fn new<P: AsRef<Path>>(path: P, mtime: i64, mode: u16, is_file: bool) -> Self {
let mut relpath = PathBuf::new();
for comp in path.as_ref().components() {
if let Component::Normal(_) = comp {
relpath.push(comp);
}
}
if !is_file {
relpath.push(""); // adds trailing slash
}
Self {
filename: relpath.into(),
crc32: 0,
mtime,
mode,
uncompressed_size: 0,
compressed_size: 0,
offset: 0,
is_file,
}
}
async fn write_local_header<W>(&self, mut buf: &mut W) -> io::Result<usize>
where
W: AsyncWrite + Unpin + ?Sized,
{
let filename = self.filename.as_bytes();
let filename_len = filename.len();
let header_size = size_of::<LocalFileHeader>();
let zip_field_size = size_of::<Zip64Field>();
let size: usize = header_size + filename_len + zip_field_size;
let (date, time) = epoch_to_dos(self.mtime);
write_struct(
&mut buf,
LocalFileHeader {
signature: LOCAL_FH_SIG,
version_needed: 0x2d,
flags: 1 << 3,
compression: 0x8,
time,
date,
crc32: 0,
compressed_size: 0xFFFFFFFF,
uncompressed_size: 0xFFFFFFFF,
filename_len: filename_len as u16,
extra_field_len: zip_field_size as u16,
},
)
.await?;
buf.write_all(filename).await?;
write_struct(
&mut buf,
Zip64Field {
field_type: 0x0001,
field_size: 2 * 8,
uncompressed_size: 0,
compressed_size: 0,
},
)
.await?;
Ok(size)
}
async fn write_data_descriptor<W: AsyncWrite + Unpin + ?Sized>(
&self,
mut buf: &mut W,
) -> io::Result<usize> {
let size = size_of::<LocalFileFooter>();
write_struct(
&mut buf,
LocalFileFooter {
signature: LOCAL_FF_SIG,
crc32: self.crc32,
compressed_size: self.compressed_size,
uncompressed_size: self.uncompressed_size,
},
)
.await?;
Ok(size)
}
async fn write_central_directory_header<W: AsyncWrite + Unpin + ?Sized>(
&self,
mut buf: &mut W,
) -> io::Result<usize> {
let filename = self.filename.as_bytes();
let filename_len = filename.len();
let header_size = size_of::<CentralDirectoryFileHeader>();
let zip_field_size = size_of::<Zip64FieldWithOffset>();
let mut size: usize = header_size + filename_len;
let (date, time) = epoch_to_dos(self.mtime);
let (compressed_size, uncompressed_size, offset, need_zip64) = if self.compressed_size
>= (u32::MAX as u64)
|| self.uncompressed_size >= (u32::MAX as u64)
|| self.offset >= (u32::MAX as u64)
{
size += zip_field_size;
(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, true)
} else {
(
self.compressed_size as u32,
self.uncompressed_size as u32,
self.offset as u32,
false,
)
};
write_struct(
&mut buf,
CentralDirectoryFileHeader {
signature: CENTRAL_DIRECTORY_FH_SIG,
version_made_by: VERSION_MADE_BY,
version_needed: VERSION_NEEDED,
flags: 1 << 3,
compression: 0x8,
time,
date,
crc32: self.crc32,
compressed_size,
uncompressed_size,
filename_len: filename_len as u16,
extra_field_len: if need_zip64 { zip_field_size as u16 } else { 0 },
comment_len: 0,
start_disk: 0,
internal_flags: 0,
external_flags: (self.mode as u32) << 16 | (!self.is_file as u32) << 4,
offset,
},
)
.await?;
buf.write_all(filename).await?;
if need_zip64 {
write_struct(
&mut buf,
Zip64FieldWithOffset {
field_type: 1,
field_size: 3 * 8 + 4,
uncompressed_size: self.uncompressed_size,
compressed_size: self.compressed_size,
offset: self.offset,
start_disk: 0,
},
)
.await?;
}
Ok(size)
}
}
// wraps an asyncreader and calculates the hash
struct HashWrapper<R> {
inner: R,
hasher: Hasher,
}
impl<R> HashWrapper<R> {
fn new(inner: R) -> Self {
Self {
inner,
hasher: Hasher::new(),
}
}
// consumes self and returns the hash and the reader
fn finish(self) -> (u32, R) {
let crc32 = self.hasher.finalize();
(crc32, self.inner)
}
}
impl<R> AsyncRead for HashWrapper<R>
where
R: AsyncRead + Unpin,
{
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<Result<(), io::Error>> {
let this = self.get_mut();
let old_len = buf.filled().len();
ready!(Pin::new(&mut this.inner).poll_read(cx, buf))?;
let new_len = buf.filled().len();
if new_len > old_len {
this.hasher.update(&buf.filled()[old_len..new_len]);
}
Poll::Ready(Ok(()))
}
}
/// Wraps a writer that implements AsyncWrite for creating a ZIP archive
///
/// This will create a ZIP archive on the fly with files added with
/// 'add_entry'. To Finish the file, call 'finish'
/// Example:
/// ```no_run
/// use proxmox_backup::tools::zip::*;
/// use tokio::fs::File;
/// use anyhow::{Error, Result};
///
/// #[tokio::main]
/// async fn main() -> Result<(), Error> {
/// let target = File::open("foo.zip").await?;
/// let mut source = File::open("foo.txt").await?;
///
/// let mut zip = ZipEncoder::new(target);
/// zip.add_entry(ZipEntry::new(
/// "foo.txt",
/// 0,
/// 0o100755,
/// true,
/// ), Some(source)).await?;
///
/// zip.finish().await?;
///
/// Ok(())
/// }
/// ```
pub struct ZipEncoder<W>
where
W: AsyncWrite + Unpin,
{
byte_count: usize,
files: Vec<ZipEntry>,
target: Option<W>,
}
impl<W: AsyncWrite + Unpin> ZipEncoder<W> {
pub fn new(target: W) -> Self {
Self {
byte_count: 0,
files: Vec::new(),
target: Some(target),
}
}
pub async fn add_entry<R: AsyncRead + Unpin>(
&mut self,
mut entry: ZipEntry,
content: Option<R>,
) -> Result<(), Error> {
let mut target = self
.target
.take()
.ok_or_else(|| format_err!("had no target during add entry"))?;
entry.offset = self.byte_count.try_into()?;
self.byte_count += entry.write_local_header(&mut target).await?;
if let Some(content) = content {
let mut reader = HashWrapper::new(content);
let mut enc = DeflateEncoder::with_quality(target, Level::Fastest);
enc.compress(&mut reader).await?;
let total_in = enc.total_in();
let total_out = enc.total_out();
target = enc.into_inner();
let (crc32, _reader) = reader.finish();
self.byte_count += total_out as usize;
entry.compressed_size = total_out;
entry.uncompressed_size = total_in;
entry.crc32 = crc32;
}
self.byte_count += entry.write_data_descriptor(&mut target).await?;
self.target = Some(target);
self.files.push(entry);
Ok(())
}
async fn write_eocd(
&mut self,
central_dir_size: usize,
central_dir_offset: usize,
) -> Result<(), Error> {
let entrycount = self.files.len();
let mut target = self
.target
.take()
.ok_or_else(|| format_err!("had no target during write_eocd"))?;
let mut count = entrycount as u16;
let mut directory_size = central_dir_size as u32;
let mut directory_offset = central_dir_offset as u32;
if central_dir_size > u32::MAX as usize
|| central_dir_offset > u32::MAX as usize
|| entrycount > u16::MAX as usize
{
count = 0xFFFF;
directory_size = 0xFFFFFFFF;
directory_offset = 0xFFFFFFFF;
write_struct(
&mut target,
Zip64EOCDRecord {
signature: ZIP64_EOCD_RECORD,
field_size: 44,
version_made_by: VERSION_MADE_BY,
version_needed: VERSION_NEEDED,
disk_number: 0,
disk_number_central_dir: 0,
disk_record_count: entrycount.try_into()?,
total_record_count: entrycount.try_into()?,
directory_size: central_dir_size.try_into()?,
directory_offset: central_dir_offset.try_into()?,
},
)
.await?;
let locator_offset = central_dir_offset + central_dir_size;
write_struct(
&mut target,
Zip64EOCDLocator {
signature: ZIP64_EOCD_LOCATOR,
disk_number: 0,
offset: locator_offset.try_into()?,
disk_count: 1,
},
)
.await?;
}
write_struct(
&mut target,
EndOfCentralDir {
signature: END_OF_CENTRAL_DIR,
disk_number: 0,
start_disk: 0,
disk_record_count: count,
total_record_count: count,
directory_size,
directory_offset,
comment_len: 0,
},
)
.await?;
self.target = Some(target);
Ok(())
}
pub async fn finish(&mut self) -> Result<(), Error> {
let mut target = self
.target
.take()
.ok_or_else(|| format_err!("had no target during finish"))?;
let central_dir_offset = self.byte_count;
let mut central_dir_size = 0;
for file in &self.files {
central_dir_size += file.write_central_directory_header(&mut target).await?;
}
self.target = Some(target);
self.write_eocd(central_dir_size, central_dir_offset)
.await?;
self.target
.take()
.ok_or_else(|| format_err!("had no target for flush"))?
.flush()
.await?;
Ok(())
}
}
/// Zip a local directory and write encoded data to target. "source" has to point to a valid
/// directory, it's name will be the root of the zip file - e.g.:
/// source:
/// /foo/bar
/// zip file:
/// /bar/file1
/// /bar/dir1
/// /bar/dir1/file2
/// ...
/// ...except if "source" is the root directory
pub async fn zip_directory<W>(target: W, source: &Path) -> Result<(), Error>
where
W: AsyncWrite + Unpin + Send,
{
use walkdir::WalkDir;
use std::os::unix::fs::MetadataExt;
let base_path = source.parent().unwrap_or_else(|| Path::new("/"));
let mut encoder = ZipEncoder::new(target);
for entry in WalkDir::new(&source).into_iter() {
match entry {
Ok(entry) => {
let entry_path = entry.path().to_owned();
let encoder = &mut encoder;
if let Err(err) = async move {
let entry_path_no_base = entry.path().strip_prefix(base_path)?;
let metadata = entry.metadata()?;
let mtime = match metadata.modified().unwrap_or_else(|_| SystemTime::now()).duration_since(SystemTime::UNIX_EPOCH) {
Ok(dur) => dur.as_secs() as i64,
Err(time_error) => -(time_error.duration().as_secs() as i64)
};
let mode = metadata.mode() as u16;
if entry.file_type().is_file() {
let file = tokio::fs::File::open(entry.path()).await?;
let ze = ZipEntry::new(
&entry_path_no_base,
mtime,
mode,
true,
);
encoder.add_entry(ze, Some(file)).await?;
} else if entry.file_type().is_dir() {
let ze = ZipEntry::new(
&entry_path_no_base,
mtime,
mode,
false,
);
let content: Option<tokio::fs::File> = None;
encoder.add_entry(ze, content).await?;
}
// ignore other file types
let ok: Result<(), Error> = Ok(());
ok
}
.await
{
eprintln!(
"zip: error encoding file or directory '{}': {}",
entry_path.display(),
err
);
}
}
Err(err) => {
eprintln!("zip: error reading directory entry: {}", err);
}
}
}
encoder.finish().await
}