Compare commits
209 Commits
Author | SHA1 | Date | |
---|---|---|---|
0d5ab04a90 | |||
4059285649 | |||
2e079b8bf2 | |||
4ff2c9b832 | |||
a8e2940ff3 | |||
d5d5f2174e | |||
2311238450 | |||
2ea501ffdf | |||
4eb4e94918 | |||
817bcda848 | |||
f6de2c7359 | |||
3f0b9c10ec | |||
2b66abbfab | |||
402c8861d8 | |||
3f683799a8 | |||
573bcd9a92 | |||
90779237ae | |||
1f82f9b7b5 | |||
19b5c3c43e | |||
fe3e65c3ea | |||
fdaab0df4e | |||
b957aa81bd | |||
8ea00f6e49 | |||
4bd789b0fa | |||
2f050cf2ed | |||
e22f4882e7 | |||
c65bc99a41 | |||
355c055e81 | |||
c2009e5309 | |||
23f74c190e | |||
a6f8728339 | |||
c1769a749c | |||
facd9801cf | |||
21302088de | |||
8268c9d161 | |||
b91b7d9ffd | |||
6e1f0c138f | |||
8567c0d29c | |||
d33d8f4e6a | |||
5b1cfa01f1 | |||
05d18b907a | |||
e44fe0c9f5 | |||
4cf0ced950 | |||
98425309b0 | |||
7b1e26699d | |||
676b0fde49 | |||
60f9a6ea8f | |||
1090fd4424 | |||
92c3fd2e22 | |||
e3efaa1972 | |||
0cf2b6441e | |||
d6d3b353be | |||
a67f7d0a07 | |||
c8137518fe | |||
cbef49bf4f | |||
0b99e5aebc | |||
29c55e5fc4 | |||
f386f512d0 | |||
3ddb14889a | |||
00c2327564 | |||
d79926795a | |||
c08fac4d69 | |||
c40440092d | |||
dc2ef2b54f | |||
b28253d650 | |||
f28cfb322a | |||
3bbe291c51 | |||
42d19fdf69 | |||
215968e033 | |||
eddd1a1b9c | |||
d2ce211899 | |||
1cb46c6f65 | |||
5d88c3a1c8 | |||
07fb504943 | |||
f675c5e978 | |||
4e37d9ce67 | |||
e303077132 | |||
6ef9bb59eb | |||
eeaa2c212b | |||
4a3adc3de8 | |||
abdb976340 | |||
3b62116ce6 | |||
e005f953d9 | |||
1c090810f5 | |||
e181d2f6da | |||
16021f6ab7 | |||
ba694720fc | |||
bde8e243cf | |||
3352ee5656 | |||
b29cbc414d | |||
026dc1d11f | |||
9438aca6c9 | |||
547f0c97e4 | |||
177a2de992 | |||
0686b1f4db | |||
0727e56a06 | |||
2fd3d57490 | |||
3f851d1321 | |||
1aef491e24 | |||
d0eccae37d | |||
a34154d900 | |||
c2cc32b4dd | |||
46405fa35d | |||
66af7f51bc | |||
c72ccd4e33 | |||
902b2cc278 | |||
8ecd7c9c21 | |||
7f17f7444a | |||
fb5a066500 | |||
d19c96d507 | |||
929a13b357 | |||
36c65ee0b0 | |||
3378fd9fe5 | |||
58c51cf3d9 | |||
5509b199fb | |||
bb59df9134 | |||
2564b0834f | |||
9321bbd1f5 | |||
4264e52220 | |||
6988b29bdc | |||
98c54240e6 | |||
d30c192589 | |||
67908b47fa | |||
ac7513e368 | |||
fbbcd85839 | |||
7a6b549270 | |||
0196b9bf5b | |||
739a51459a | |||
195d7c90ce | |||
6f3146c08c | |||
4b12879289 | |||
20b3094bcb | |||
df528ee6fa | |||
57e50fb906 | |||
3136792c95 | |||
3d571d5509 | |||
8e6e18b77c | |||
4d16badf6f | |||
a609cf210e | |||
1498659b4e | |||
4482f3fe11 | |||
5d85847f91 | |||
476b4acadc | |||
cf1bd08131 | |||
ec8f042459 | |||
431cc7b185 | |||
e693818afc | |||
3d68536fc2 | |||
26e78a2efb | |||
5444fa940b | |||
d4f2397d4c | |||
fab2413741 | |||
669c137fec | |||
fc6047fcb1 | |||
3014088684 | |||
144006fade | |||
b9cf6ee797 | |||
cdde66d277 | |||
239e49f927 | |||
ae66873ce9 | |||
bda48e04da | |||
ba97479848 | |||
6cad8ce4ce | |||
34020b929e | |||
33070956af | |||
da84cc52f4 | |||
9825748e5e | |||
2179359f40 | |||
9bb161c881 | |||
297e600730 | |||
ed7b3a7de2 | |||
0f358204bd | |||
ca6124d5fa | |||
7eacdc765b | |||
c443f58b09 | |||
ab1092392f | |||
1e3d9b103d | |||
386990ba09 | |||
bc853b028f | |||
d406de299b | |||
dfb31de8f0 | |||
7c3aa258f8 | |||
044055062c | |||
2b388026f8 | |||
707974fdb3 | |||
9069debcd8 | |||
fa2bdc1309 | |||
8e40aa63c1 | |||
d2522b2db6 | |||
ce8e3de401 | |||
7fa2779559 | |||
042afd6e52 | |||
ff30caeaf8 | |||
553cd12ba6 | |||
de1e1a9d95 | |||
91960d6162 | |||
4c24a48eb3 | |||
484e761dab | |||
059b7a252e | |||
1278aeec36 | |||
e53a4c4577 | |||
98ad58fbd2 | |||
98bb3b9016 | |||
eb80aac288 | |||
c26aad405f | |||
f03a0e509e | |||
4c1e8855cc | |||
85a9a5b68c | |||
f856e0774e |
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "0.2.3"
|
||||
version = "0.5.0"
|
||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3"
|
||||
@ -30,15 +30,20 @@ lazy_static = "1.4"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
nix = "0.16"
|
||||
num-traits = "0.2"
|
||||
once_cell = "1.3.1"
|
||||
openssl = "0.10"
|
||||
pam = "0.7"
|
||||
pam-sys = "0.5"
|
||||
percent-encoding = "2.1"
|
||||
pin-utils = "0.1.0"
|
||||
proxmox = { version = "0.1.38", features = [ "sortable-macro", "api-macro" ] }
|
||||
pathpatterns = "0.1.1"
|
||||
proxmox = { version = "0.1.41", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
||||
proxmox-fuse = "0.1.0"
|
||||
pxar = { version = "0.2.0", features = [ "tokio-io", "futures-io" ] }
|
||||
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
||||
regex = "1.2"
|
||||
rustyline = "6"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
|
2
TODO.rst
2
TODO.rst
@ -30,8 +30,6 @@ Chores:
|
||||
|
||||
* move tools/xattr.rs and tools/acl.rs to proxmox/sys/linux/
|
||||
|
||||
* recompute PXAR_ header types from strings: avoid using numbers from casync
|
||||
|
||||
* remove pbs-* systemd timers and services on package purge
|
||||
|
||||
|
||||
|
28
debian/changelog
vendored
28
debian/changelog
vendored
@ -1,3 +1,31 @@
|
||||
rust-proxmox-backup (0.5.0-1) unstable; urgency=medium
|
||||
|
||||
* partially revert commit 1f82f9b7b5d231da22a541432d5617cb303c0000
|
||||
|
||||
* ui: allow to Forget (delete) backup snapshots
|
||||
|
||||
* pxar: deal with files changing size during archiving
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 29 Jun 2020 13:00:54 +0200
|
||||
|
||||
rust-proxmox-backup (0.4.0-1) unstable; urgency=medium
|
||||
|
||||
* change api for incremental backups mode
|
||||
|
||||
* zfs disk management gui
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 26 Jun 2020 10:43:27 +0200
|
||||
|
||||
rust-proxmox-backup (0.3.0-1) unstable; urgency=medium
|
||||
|
||||
* support incremental backups mode
|
||||
|
||||
* new disk management
|
||||
|
||||
* single file restore for container backups
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 24 Jun 2020 10:12:57 +0200
|
||||
|
||||
rust-proxmox-backup (0.2.3-1) unstable; urgency=medium
|
||||
|
||||
* tools/systemd/time: fix compute_next_event for weekdays
|
||||
|
28
debian/postinst
vendored
Normal file
28
debian/postinst
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
# modeled after dh_systemd_start output
|
||||
systemctl --system daemon-reload >/dev/null || true
|
||||
if [ -n "$2" ]; then
|
||||
_dh_action=try-reload-or-restart
|
||||
else
|
||||
_dh_action=start
|
||||
fi
|
||||
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
||||
;;
|
||||
|
||||
abort-upgrade|abort-remove|abort-deconfigure)
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "postinst called with unknown argument \`$1'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
10
debian/prerm
vendored
Normal file
10
debian/prerm
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
# modeled after dh_systemd_start output
|
||||
if [ -d /run/systemd/system ] && [ "$1" = remove ]; then
|
||||
deb-systemd-invoke stop 'proxmox-backup-banner.service' 'proxmox-backup-proxy.service' 'proxmox-backup.service' >/dev/null || true
|
||||
fi
|
6
debian/rules
vendored
6
debian/rules
vendored
@ -37,9 +37,9 @@ override_dh_auto_install:
|
||||
PROXY_USER=backup \
|
||||
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
|
||||
|
||||
override_dh_installinit:
|
||||
dh_installinit
|
||||
dh_installinit --name proxmox-backup-proxy
|
||||
override_dh_installsystemd:
|
||||
# note: we start/try-reload-restart services manually in postinst
|
||||
dh_installsystemd --no-start --no-restart-after-upgrade
|
||||
|
||||
# workaround https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=933541
|
||||
# TODO: remove once available (Debian 11 ?)
|
||||
|
@ -2,7 +2,7 @@
|
||||
Description=Proxmox Backup API Proxy Server
|
||||
Wants=network-online.target
|
||||
After=network.target
|
||||
Requires=proxmox-backup.service
|
||||
Wants=proxmox-backup.service
|
||||
After=proxmox-backup.service
|
||||
|
||||
[Service]
|
||||
|
@ -44,8 +44,8 @@ async fn run() -> Result<(), Error> {
|
||||
|
||||
let mut bytes = 0;
|
||||
for _ in 0..100 {
|
||||
let writer = DummyWriter { bytes: 0 };
|
||||
let writer = client.speedtest(writer).await?;
|
||||
let mut writer = DummyWriter { bytes: 0 };
|
||||
client.speedtest(&mut writer).await?;
|
||||
println!("Received {} bytes", writer.bytes);
|
||||
bytes += writer.bytes;
|
||||
}
|
||||
@ -59,8 +59,7 @@ async fn run() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
fn main() {
|
||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||
eprintln!("ERROR: {}", err);
|
||||
}
|
@ -17,7 +17,7 @@ async fn upload_speed() -> Result<usize, Error> {
|
||||
|
||||
let backup_time = chrono::Utc::now();
|
||||
|
||||
let client = BackupWriter::start(client, datastore, "host", "speedtest", backup_time, false).await?;
|
||||
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false).await?;
|
||||
|
||||
println!("start upload speed test");
|
||||
let res = client.upload_speedtest().await?;
|
@ -5,9 +5,11 @@ pub mod config;
|
||||
pub mod node;
|
||||
pub mod reader;
|
||||
mod subscription;
|
||||
pub mod status;
|
||||
pub mod types;
|
||||
pub mod version;
|
||||
pub mod pull;
|
||||
mod helpers;
|
||||
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::Router;
|
||||
@ -23,6 +25,7 @@ pub const SUBDIRS: SubdirMap = &[
|
||||
("nodes", &NODES_ROUTER),
|
||||
("pull", &pull::ROUTER),
|
||||
("reader", &reader::ROUTER),
|
||||
("status", &status::ROUTER),
|
||||
("subscription", &subscription::ROUTER),
|
||||
("version", &version::ROUTER),
|
||||
];
|
||||
|
@ -1,8 +1,8 @@
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
use chrono::{TimeZone, Local};
|
||||
use anyhow::{bail, Error};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use hyper::http::request::Parts;
|
||||
use hyper::{header, Body, Response, StatusCode};
|
||||
@ -13,17 +13,21 @@ use proxmox::api::{
|
||||
RpcEnvironment, RpcEnvironmentType, Permission, UserInformation};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
use proxmox::try_block;
|
||||
use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
|
||||
|
||||
use pxar::accessor::aio::Accessor;
|
||||
use pxar::EntryKind;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::api2::node::rrd::create_value_from_rrd;
|
||||
use crate::backup::*;
|
||||
use crate::config::datastore;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
use crate::tools;
|
||||
use crate::tools::{self, AsyncReaderStream, WrappedReaderStream};
|
||||
use crate::config::acl::{
|
||||
PRIV_DATASTORE_AUDIT,
|
||||
PRIV_DATASTORE_MODIFY,
|
||||
@ -42,32 +46,45 @@ fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> R
|
||||
|
||||
fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
|
||||
|
||||
let mut path = store.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(MANIFEST_BLOB_NAME);
|
||||
|
||||
let raw_data = file_get_contents(&path)?;
|
||||
let index_size = raw_data.len() as u64;
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
|
||||
let manifest = BackupManifest::try_from(blob)?;
|
||||
let (manifest, index_size) = store.load_manifest(backup_dir)?;
|
||||
|
||||
let mut result = Vec::new();
|
||||
for item in manifest.files() {
|
||||
result.push(BackupContent {
|
||||
filename: item.filename.clone(),
|
||||
encrypted: item.encrypted,
|
||||
size: Some(item.size),
|
||||
});
|
||||
}
|
||||
|
||||
result.push(BackupContent {
|
||||
filename: MANIFEST_BLOB_NAME.to_string(),
|
||||
encrypted: Some(false),
|
||||
size: Some(index_size),
|
||||
});
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn get_all_snapshot_files(
|
||||
store: &DataStore,
|
||||
info: &BackupInfo,
|
||||
) -> Result<Vec<BackupContent>, Error> {
|
||||
let mut files = read_backup_index(&store, &info.backup_dir)?;
|
||||
|
||||
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
|
||||
acc.insert(item.filename.clone());
|
||||
acc
|
||||
});
|
||||
|
||||
for file in &info.files {
|
||||
if file_set.contains(file) { continue; }
|
||||
files.push(BackupContent { filename: file.to_string(), size: None, encrypted: None });
|
||||
}
|
||||
|
||||
Ok(files)
|
||||
}
|
||||
|
||||
fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
|
||||
|
||||
let mut group_hash = HashMap::new();
|
||||
@ -201,21 +218,9 @@ pub fn list_snapshot_files(
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
|
||||
|
||||
let mut files = read_backup_index(&datastore, &snapshot)?;
|
||||
|
||||
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
|
||||
|
||||
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
|
||||
acc.insert(item.filename.clone());
|
||||
acc
|
||||
});
|
||||
|
||||
for file in info.files {
|
||||
if file_set.contains(&file) { continue; }
|
||||
files.push(BackupContent { filename: file, size: None });
|
||||
}
|
||||
|
||||
Ok(files)
|
||||
get_all_snapshot_files(&datastore, &info)
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -336,25 +341,28 @@ pub fn list_snapshots (
|
||||
if owner != username { continue; }
|
||||
}
|
||||
|
||||
let mut result_item = SnapshotListItem {
|
||||
let mut size = None;
|
||||
|
||||
let files = match get_all_snapshot_files(&datastore, &info) {
|
||||
Ok(files) => {
|
||||
size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
|
||||
files
|
||||
},
|
||||
Err(err) => {
|
||||
eprintln!("error during snapshot file listing: '{}'", err);
|
||||
info.files.iter().map(|x| BackupContent { filename: x.to_string(), size: None, encrypted: None }).collect()
|
||||
},
|
||||
};
|
||||
|
||||
let result_item = SnapshotListItem {
|
||||
backup_type: group.backup_type().to_string(),
|
||||
backup_id: group.backup_id().to_string(),
|
||||
backup_time: info.backup_dir.backup_time().timestamp(),
|
||||
files: info.files,
|
||||
size: None,
|
||||
files,
|
||||
size,
|
||||
owner: Some(owner),
|
||||
};
|
||||
|
||||
if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) {
|
||||
let mut backup_size = 0;
|
||||
for item in index.iter() {
|
||||
if let Some(item_size) = item.size {
|
||||
backup_size += item_size;
|
||||
}
|
||||
}
|
||||
result_item.size = Some(backup_size);
|
||||
}
|
||||
|
||||
snapshots.push(result_item);
|
||||
}
|
||||
|
||||
@ -382,25 +390,92 @@ pub fn status(
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<StorageStatus, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
crate::tools::disks::disk_usage(&datastore.base_path())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
|
||||
},
|
||||
)]
|
||||
/// Verify backups.
|
||||
///
|
||||
/// This function can verify a single backup snapshot, all backup from a backup group,
|
||||
/// or all backups in the datastore.
|
||||
pub fn verify(
|
||||
store: String,
|
||||
backup_type: Option<String>,
|
||||
backup_id: Option<String>,
|
||||
backup_time: Option<i64>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let base_path = datastore.base_path();
|
||||
let worker_id;
|
||||
|
||||
let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
|
||||
let mut backup_dir = None;
|
||||
let mut backup_group = None;
|
||||
|
||||
use nix::NixPath;
|
||||
match (backup_type, backup_id, backup_time) {
|
||||
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
|
||||
let dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
worker_id = format!("{}_{}", store, dir);
|
||||
backup_dir = Some(dir);
|
||||
}
|
||||
(Some(backup_type), Some(backup_id), None) => {
|
||||
let group = BackupGroup::new(backup_type, backup_id);
|
||||
worker_id = format!("{}_{}", store, group);
|
||||
backup_group = Some(group);
|
||||
}
|
||||
(None, None, None) => {
|
||||
worker_id = store.clone();
|
||||
}
|
||||
_ => bail!("parameters do not spefify a backup group or snapshot"),
|
||||
}
|
||||
|
||||
let res = base_path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
|
||||
nix::errno::Errno::result(res)?;
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let bsize = stat.f_bsize as u64;
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"verify", Some(worker_id.clone()), &username, to_stdout, move |worker|
|
||||
{
|
||||
let success = if let Some(backup_dir) = backup_dir {
|
||||
verify_backup_dir(&datastore, &backup_dir, &worker)?
|
||||
} else if let Some(backup_group) = backup_group {
|
||||
verify_backup_group(&datastore, &backup_group, &worker)?
|
||||
} else {
|
||||
verify_all_backups(&datastore, &worker)?
|
||||
};
|
||||
if !success {
|
||||
bail!("verfication failed - please check the log for details");
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(StorageStatus {
|
||||
total: stat.f_blocks*bsize,
|
||||
used: (stat.f_blocks-stat.f_bfree)*bsize,
|
||||
avail: stat.f_bavail*bsize,
|
||||
})
|
||||
Ok(json!(upid_str))
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
@ -752,19 +827,22 @@ fn download_file(
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
|
||||
println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
|
||||
backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
|
||||
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(&file_name);
|
||||
|
||||
let file = tokio::fs::File::open(path)
|
||||
let file = tokio::fs::File::open(&path)
|
||||
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
|
||||
.await?;
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", &path, err);
|
||||
err
|
||||
});
|
||||
let body = Body::wrap_stream(payload);
|
||||
|
||||
// fixme: set other headers ?
|
||||
@ -776,6 +854,118 @@ fn download_file(
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&download_file_decoded),
|
||||
&ObjectSchema::new(
|
||||
"Download single decoded file from backup snapshot. Only works if it's not encrypted.",
|
||||
&sorted!([
|
||||
("store", false, &DATASTORE_SCHEMA),
|
||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||
("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
|
||||
]),
|
||||
)
|
||||
).access(None, &Permission::Privilege(
|
||||
&["datastore", "{store}"],
|
||||
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
|
||||
true)
|
||||
);
|
||||
|
||||
fn download_file_decoded(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
|
||||
async move {
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
|
||||
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
||||
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
|
||||
let files = read_backup_index(&datastore, &backup_dir)?;
|
||||
for file in files {
|
||||
if file.filename == file_name && file.encrypted == Some(true) {
|
||||
bail!("cannot decode '{}' - is encrypted", file_name);
|
||||
}
|
||||
}
|
||||
|
||||
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(&file_name);
|
||||
|
||||
let extension = file_name.rsplitn(2, '.').next().unwrap();
|
||||
|
||||
let body = match extension {
|
||||
"didx" => {
|
||||
let index = DynamicIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||
Body::wrap_stream(AsyncReaderStream::new(reader)
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
||||
err
|
||||
}))
|
||||
},
|
||||
"fidx" => {
|
||||
let index = FixedIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||
Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
||||
err
|
||||
}))
|
||||
},
|
||||
"blob" => {
|
||||
let file = std::fs::File::open(&path)
|
||||
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))?;
|
||||
|
||||
Body::wrap_stream(
|
||||
WrappedReaderStream::new(DataBlobReader::new(file, None)?)
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
||||
err
|
||||
})
|
||||
)
|
||||
},
|
||||
extension => {
|
||||
bail!("cannot download '{}' files", extension);
|
||||
},
|
||||
};
|
||||
|
||||
// fixme: set other headers ?
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||
.body(body)
|
||||
.unwrap())
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&upload_backup_log),
|
||||
@ -846,6 +1036,212 @@ fn upload_backup_log(
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
"filepath": {
|
||||
description: "Base64 encoded path.",
|
||||
type: String,
|
||||
}
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
|
||||
},
|
||||
)]
|
||||
/// Get the entries of the given path of the catalog
|
||||
fn catalog(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
filepath: String,
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(CATALOG_NAME);
|
||||
|
||||
let index = DynamicIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
|
||||
let mut catalog_reader = CatalogReader::new(reader);
|
||||
let mut current = catalog_reader.root()?;
|
||||
let mut components = vec![];
|
||||
|
||||
|
||||
if filepath != "root" {
|
||||
components = base64::decode(filepath)?;
|
||||
if components.len() > 0 && components[0] == '/' as u8 {
|
||||
components.remove(0);
|
||||
}
|
||||
for component in components.split(|c| *c == '/' as u8) {
|
||||
if let Some(entry) = catalog_reader.lookup(¤t, component)? {
|
||||
current = entry;
|
||||
} else {
|
||||
bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut res = Vec::new();
|
||||
|
||||
for direntry in catalog_reader.read_dir(¤t)? {
|
||||
let mut components = components.clone();
|
||||
components.push('/' as u8);
|
||||
components.extend(&direntry.name);
|
||||
let path = base64::encode(components);
|
||||
let text = String::from_utf8_lossy(&direntry.name);
|
||||
let mut entry = json!({
|
||||
"filepath": path,
|
||||
"text": text,
|
||||
"type": CatalogEntryType::from(&direntry.attr).to_string(),
|
||||
"leaf": true,
|
||||
});
|
||||
match direntry.attr {
|
||||
DirEntryAttribute::Directory { start: _ } => {
|
||||
entry["leaf"] = false.into();
|
||||
},
|
||||
DirEntryAttribute::File { size, mtime } => {
|
||||
entry["size"] = size.into();
|
||||
entry["mtime"] = mtime.into();
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
res.push(entry);
|
||||
}
|
||||
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&pxar_file_download),
|
||||
&ObjectSchema::new(
|
||||
"Download single file from pxar file of a bacup snapshot. Only works if it's not encrypted.",
|
||||
&sorted!([
|
||||
("store", false, &DATASTORE_SCHEMA),
|
||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||
("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
|
||||
]),
|
||||
)
|
||||
).access(None, &Permission::Privilege(
|
||||
&["datastore", "{store}"],
|
||||
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
|
||||
true)
|
||||
);
|
||||
|
||||
fn pxar_file_download(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
|
||||
async move {
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
|
||||
let filepath = tools::required_string_param(¶m, "filepath")?.to_owned();
|
||||
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
|
||||
let mut components = base64::decode(&filepath)?;
|
||||
if components.len() > 0 && components[0] == '/' as u8 {
|
||||
components.remove(0);
|
||||
}
|
||||
|
||||
let mut split = components.splitn(2, |c| *c == '/' as u8);
|
||||
let pxar_name = split.next().unwrap();
|
||||
let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
|
||||
|
||||
path.push(OsStr::from_bytes(&pxar_name));
|
||||
|
||||
let index = DynamicIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let archive_size = reader.archive_size();
|
||||
let reader = LocalDynamicReadAt::new(reader);
|
||||
|
||||
let decoder = Accessor::new(reader, archive_size).await?;
|
||||
let root = decoder.open_root().await?;
|
||||
let file = root
|
||||
.lookup(OsStr::from_bytes(file_path)).await?
|
||||
.ok_or(format_err!("error opening '{:?}'", file_path))?;
|
||||
|
||||
let file = match file.kind() {
|
||||
EntryKind::File { .. } => file,
|
||||
EntryKind::Hardlink(_) => {
|
||||
decoder.follow_hardlink(&file).await?
|
||||
},
|
||||
// TODO symlink
|
||||
other => bail!("cannot download file of type {:?}", other),
|
||||
};
|
||||
|
||||
let body = Body::wrap_stream(
|
||||
AsyncReaderStream::new(file.contents().await?)
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", filepath, err);
|
||||
err
|
||||
})
|
||||
);
|
||||
|
||||
// fixme: set other headers ?
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||
.body(body)
|
||||
.unwrap())
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -872,10 +1268,8 @@ fn get_rrd_stats(
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let rrd_dir = format!("datastore/{}", store);
|
||||
|
||||
crate::rrd::extract_data(
|
||||
&rrd_dir,
|
||||
create_value_from_rrd(
|
||||
&format!("datastore/{}", store),
|
||||
&[
|
||||
"total", "used",
|
||||
"read_ios", "read_bytes",
|
||||
@ -889,11 +1283,21 @@ fn get_rrd_stats(
|
||||
|
||||
#[sortable]
|
||||
const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"catalog",
|
||||
&Router::new()
|
||||
.get(&API_METHOD_CATALOG)
|
||||
),
|
||||
(
|
||||
"download",
|
||||
&Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_FILE)
|
||||
),
|
||||
(
|
||||
"download-decoded",
|
||||
&Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_FILE_DECODED)
|
||||
),
|
||||
(
|
||||
"files",
|
||||
&Router::new()
|
||||
@ -915,6 +1319,11 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||
&Router::new()
|
||||
.post(&API_METHOD_PRUNE)
|
||||
),
|
||||
(
|
||||
"pxar-file-download",
|
||||
&Router::new()
|
||||
.download(&API_METHOD_PXAR_FILE_DOWNLOAD)
|
||||
),
|
||||
(
|
||||
"rrd",
|
||||
&Router::new()
|
||||
@ -936,6 +1345,11 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||
&Router::new()
|
||||
.upload(&API_METHOD_UPLOAD_BACKUP_LOG)
|
||||
),
|
||||
(
|
||||
"verify",
|
||||
&Router::new()
|
||||
.post(&API_METHOD_VERIFY)
|
||||
),
|
||||
];
|
||||
|
||||
const DATASTORE_INFO_ROUTER: Router = Router::new()
|
||||
|
@ -10,7 +10,7 @@ use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironm
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
|
||||
use crate::tools::{self, WrappedReaderStream};
|
||||
use crate::tools;
|
||||
use crate::server::{WorkerTask, H2Service};
|
||||
use crate::backup::*;
|
||||
use crate::api2::types::*;
|
||||
@ -199,7 +199,6 @@ pub const BACKUP_API_SUBDIRS: SubdirMap = &[
|
||||
),
|
||||
(
|
||||
"dynamic_index", &Router::new()
|
||||
.download(&API_METHOD_DYNAMIC_CHUNK_INDEX)
|
||||
.post(&API_METHOD_CREATE_DYNAMIC_INDEX)
|
||||
.put(&API_METHOD_DYNAMIC_APPEND)
|
||||
),
|
||||
@ -222,10 +221,13 @@ pub const BACKUP_API_SUBDIRS: SubdirMap = &[
|
||||
),
|
||||
(
|
||||
"fixed_index", &Router::new()
|
||||
.download(&API_METHOD_FIXED_CHUNK_INDEX)
|
||||
.post(&API_METHOD_CREATE_FIXED_INDEX)
|
||||
.put(&API_METHOD_FIXED_APPEND)
|
||||
),
|
||||
(
|
||||
"previous", &Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_PREVIOUS)
|
||||
),
|
||||
(
|
||||
"speedtest", &Router::new()
|
||||
.upload(&API_METHOD_UPLOAD_SPEEDTEST)
|
||||
@ -284,6 +286,8 @@ pub const API_METHOD_CREATE_FIXED_INDEX: ApiMethod = ApiMethod::new(
|
||||
.minimum(1)
|
||||
.schema()
|
||||
),
|
||||
("reuse-csum", true, &StringSchema::new("If set, compare last backup's \
|
||||
csum and reuse index for incremental backup if it matches.").schema()),
|
||||
]),
|
||||
)
|
||||
);
|
||||
@ -296,10 +300,9 @@ fn create_fixed_index(
|
||||
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
println!("PARAM: {:?}", param);
|
||||
|
||||
let name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||
let size = tools::required_integer_param(¶m, "size")? as usize;
|
||||
let reuse_csum = param["reuse-csum"].as_str();
|
||||
|
||||
let archive_name = name.clone();
|
||||
if !archive_name.ends_with(".fidx") {
|
||||
@ -307,12 +310,49 @@ fn create_fixed_index(
|
||||
}
|
||||
|
||||
let mut path = env.backup_dir.relative_path();
|
||||
path.push(archive_name);
|
||||
path.push(&archive_name);
|
||||
|
||||
let chunk_size = 4096*1024; // todo: ??
|
||||
|
||||
let index = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
|
||||
let wid = env.register_fixed_writer(index, name, size, chunk_size as u32)?;
|
||||
// do incremental backup if csum is set
|
||||
let mut reader = None;
|
||||
let mut incremental = false;
|
||||
if let Some(csum) = reuse_csum {
|
||||
incremental = true;
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => {
|
||||
bail!("cannot reuse index - no previous backup exists");
|
||||
}
|
||||
};
|
||||
|
||||
let mut last_path = last_backup.backup_dir.relative_path();
|
||||
last_path.push(&archive_name);
|
||||
|
||||
let index = match env.datastore.open_fixed_reader(last_path) {
|
||||
Ok(index) => index,
|
||||
Err(_) => {
|
||||
bail!("cannot reuse index - no previous backup exists for archive");
|
||||
}
|
||||
};
|
||||
|
||||
let (old_csum, _) = index.compute_csum();
|
||||
let old_csum = proxmox::tools::digest_to_hex(&old_csum);
|
||||
if old_csum != csum {
|
||||
bail!("expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
|
||||
csum, old_csum);
|
||||
}
|
||||
|
||||
reader = Some(index);
|
||||
}
|
||||
|
||||
let mut writer = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
|
||||
|
||||
if let Some(reader) = reader {
|
||||
writer.clone_data_from(&reader)?;
|
||||
}
|
||||
|
||||
let wid = env.register_fixed_writer(writer, name, size, chunk_size as u32, incremental)?;
|
||||
|
||||
env.log(format!("created new fixed index {} ({:?})", wid, path));
|
||||
|
||||
@ -520,15 +560,15 @@ pub const API_METHOD_CLOSE_FIXED_INDEX: ApiMethod = ApiMethod::new(
|
||||
(
|
||||
"chunk-count",
|
||||
false,
|
||||
&IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
|
||||
.minimum(1)
|
||||
&IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks. Ignored for incremental backups.")
|
||||
.minimum(0)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"size",
|
||||
false,
|
||||
&IntegerSchema::new("File size. This is used to verify that the server got all data.")
|
||||
.minimum(1)
|
||||
&IntegerSchema::new("File size. This is used to verify that the server got all data. Ignored for incremental backups.")
|
||||
.minimum(0)
|
||||
.schema()
|
||||
),
|
||||
("csum", false, &StringSchema::new("Digest list checksum.").schema()),
|
||||
@ -572,20 +612,17 @@ fn finish_backup (
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_DYNAMIC_CHUNK_INDEX: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&dynamic_chunk_index),
|
||||
pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&download_previous),
|
||||
&ObjectSchema::new(
|
||||
r###"
|
||||
Download the dynamic chunk index from the previous backup.
|
||||
Simply returns an empty list if this is the first backup.
|
||||
"### ,
|
||||
"Download archive from previous backup.",
|
||||
&sorted!([
|
||||
("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA)
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
fn dynamic_chunk_index(
|
||||
fn download_previous(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
@ -598,130 +635,38 @@ fn dynamic_chunk_index(
|
||||
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||
|
||||
if !archive_name.ends_with(".didx") {
|
||||
bail!("wrong archive extension: '{}'", archive_name);
|
||||
}
|
||||
|
||||
let empty_response = {
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::empty())?
|
||||
};
|
||||
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => return Ok(empty_response),
|
||||
None => bail!("no previous backup"),
|
||||
};
|
||||
|
||||
let mut path = last_backup.backup_dir.relative_path();
|
||||
let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);
|
||||
path.push(&archive_name);
|
||||
|
||||
let index = match env.datastore.open_dynamic_reader(path) {
|
||||
Ok(index) => index,
|
||||
Err(_) => {
|
||||
env.log(format!("there is no last backup for archive '{}'", archive_name));
|
||||
return Ok(empty_response);
|
||||
{
|
||||
let index: Option<Box<dyn IndexFile>> = match archive_type(&archive_name)? {
|
||||
ArchiveType::FixedIndex => {
|
||||
let index = env.datastore.open_fixed_reader(&path)?;
|
||||
Some(Box::new(index))
|
||||
}
|
||||
ArchiveType::DynamicIndex => {
|
||||
let index = env.datastore.open_dynamic_reader(&path)?;
|
||||
Some(Box::new(index))
|
||||
}
|
||||
_ => { None }
|
||||
};
|
||||
if let Some(index) = index {
|
||||
env.log(format!("register chunks in '{}' from previous backup.", archive_name));
|
||||
|
||||
env.log(format!("download last backup index for archive '{}'", archive_name));
|
||||
|
||||
let count = index.index_count();
|
||||
for pos in 0..count {
|
||||
let (start, end, digest) = index.chunk_info(pos)?;
|
||||
let size = (end - start) as u32;
|
||||
env.register_chunk(digest, size)?;
|
||||
for pos in 0..index.index_count() {
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let size = info.range.end - info.range.start;
|
||||
env.register_chunk(info.digest, size as u32)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let reader = DigestListEncoder::new(Box::new(index));
|
||||
|
||||
let stream = WrappedReaderStream::new(reader);
|
||||
|
||||
// fixme: set size, content type?
|
||||
let response = http::Response::builder()
|
||||
.status(200)
|
||||
.body(Body::wrap_stream(stream))?;
|
||||
|
||||
Ok(response)
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_FIXED_CHUNK_INDEX: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&fixed_chunk_index),
|
||||
&ObjectSchema::new(
|
||||
r###"
|
||||
Download the fixed chunk index from the previous backup.
|
||||
Simply returns an empty list if this is the first backup.
|
||||
"### ,
|
||||
&sorted!([
|
||||
("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA)
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
fn fixed_chunk_index(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
|
||||
async move {
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||
|
||||
if !archive_name.ends_with(".fidx") {
|
||||
bail!("wrong archive extension: '{}'", archive_name);
|
||||
}
|
||||
|
||||
let empty_response = {
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::empty())?
|
||||
};
|
||||
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => return Ok(empty_response),
|
||||
};
|
||||
|
||||
let mut path = last_backup.backup_dir.relative_path();
|
||||
path.push(&archive_name);
|
||||
|
||||
let index = match env.datastore.open_fixed_reader(path) {
|
||||
Ok(index) => index,
|
||||
Err(_) => {
|
||||
env.log(format!("there is no last backup for archive '{}'", archive_name));
|
||||
return Ok(empty_response);
|
||||
}
|
||||
};
|
||||
|
||||
env.log(format!("download last backup index for archive '{}'", archive_name));
|
||||
|
||||
let count = index.index_count();
|
||||
let image_size = index.index_bytes();
|
||||
for pos in 0..count {
|
||||
let digest = index.index_digest(pos).unwrap();
|
||||
// Note: last chunk can be smaller
|
||||
let start = (pos*index.chunk_size) as u64;
|
||||
let mut end = start + index.chunk_size as u64;
|
||||
if end > image_size { end = image_size; }
|
||||
let size = (end - start) as u32;
|
||||
env.register_chunk(*digest, size)?;
|
||||
}
|
||||
|
||||
let reader = DigestListEncoder::new(Box::new(index));
|
||||
|
||||
let stream = WrappedReaderStream::new(reader);
|
||||
|
||||
// fixme: set size, content type?
|
||||
let response = http::Response::builder()
|
||||
.status(200)
|
||||
.body(Body::wrap_stream(stream))?;
|
||||
|
||||
Ok(response)
|
||||
env.log(format!("download '{}' from previous backup.", archive_name));
|
||||
crate::api2::helpers::create_download_response(path).await
|
||||
}.boxed()
|
||||
}
|
||||
|
@ -47,6 +47,7 @@ struct FixedWriterState {
|
||||
chunk_count: u64,
|
||||
small_chunk_count: usize, // allow 0..1 small chunks (last chunk may be smaller)
|
||||
upload_stat: UploadStatistic,
|
||||
incremental: bool,
|
||||
}
|
||||
|
||||
struct SharedBackupState {
|
||||
@ -237,7 +238,7 @@ impl BackupEnvironment {
|
||||
}
|
||||
|
||||
/// Store the writer with an unique ID
|
||||
pub fn register_fixed_writer(&self, index: FixedIndexWriter, name: String, size: usize, chunk_size: u32) -> Result<usize, Error> {
|
||||
pub fn register_fixed_writer(&self, index: FixedIndexWriter, name: String, size: usize, chunk_size: u32, incremental: bool) -> Result<usize, Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
@ -245,7 +246,7 @@ impl BackupEnvironment {
|
||||
let uid = state.next_uid();
|
||||
|
||||
state.fixed_writers.insert(uid, FixedWriterState {
|
||||
index, name, chunk_count: 0, size, chunk_size, small_chunk_count: 0, upload_stat: UploadStatistic::new(),
|
||||
index, name, chunk_count: 0, size, chunk_size, small_chunk_count: 0, upload_stat: UploadStatistic::new(), incremental,
|
||||
});
|
||||
|
||||
Ok(uid)
|
||||
@ -310,7 +311,13 @@ impl BackupEnvironment {
|
||||
|
||||
self.log(format!("Upload size: {} ({}%)", upload_stat.size, (upload_stat.size*100)/size));
|
||||
|
||||
let client_side_duplicates = chunk_count - upload_stat.count;
|
||||
// account for zero chunk, which might be uploaded but never used
|
||||
let client_side_duplicates = if chunk_count < upload_stat.count {
|
||||
0
|
||||
} else {
|
||||
chunk_count - upload_stat.count
|
||||
};
|
||||
|
||||
let server_side_duplicates = upload_stat.duplicates;
|
||||
|
||||
if (client_side_duplicates + server_side_duplicates) > 0 {
|
||||
@ -373,6 +380,7 @@ impl BackupEnvironment {
|
||||
bail!("fixed writer '{}' close failed - received wrong number of chunk ({} != {})", data.name, data.chunk_count, chunk_count);
|
||||
}
|
||||
|
||||
if !data.incremental {
|
||||
let expected_count = data.index.index_length();
|
||||
|
||||
if chunk_count != (expected_count as u64) {
|
||||
@ -382,12 +390,12 @@ impl BackupEnvironment {
|
||||
if size != (data.size as u64) {
|
||||
bail!("fixed writer '{}' close failed - unexpected file size ({} != {})", data.name, data.size, size);
|
||||
}
|
||||
}
|
||||
|
||||
let uuid = data.index.uuid;
|
||||
|
||||
let expected_csum = data.index.close()?;
|
||||
|
||||
println!("server checksum {:?} client: {:?}", expected_csum, csum);
|
||||
println!("server checksum: {:?} client: {:?} (incremental: {})", expected_csum, csum, data.incremental);
|
||||
if csum != expected_csum {
|
||||
bail!("fixed writer '{}' close failed - got unexpected checksum", data.name);
|
||||
}
|
||||
@ -430,8 +438,6 @@ impl BackupEnvironment {
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
state.finished = true;
|
||||
|
||||
if state.dynamic_writers.len() != 0 {
|
||||
bail!("found open index writer - unable to finish backup");
|
||||
}
|
||||
@ -440,6 +446,8 @@ impl BackupEnvironment {
|
||||
bail!("backup does not contain valid files (file count == 0)");
|
||||
}
|
||||
|
||||
state.finished = true;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
23
src/api2/helpers.rs
Normal file
23
src/api2/helpers.rs
Normal file
@ -0,0 +1,23 @@
|
||||
use std::path::PathBuf;
|
||||
use anyhow::Error;
|
||||
use futures::*;
|
||||
use hyper::{Body, Response, StatusCode, header};
|
||||
use proxmox::http_err;
|
||||
|
||||
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
|
||||
let file = tokio::fs::File::open(path.clone())
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path.clone(), err)))
|
||||
.await?;
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||
|
||||
let body = Body::wrap_stream(payload);
|
||||
|
||||
// fixme: set other headers ?
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||
.body(body)
|
||||
.unwrap())
|
||||
}
|
@ -9,9 +9,11 @@ mod syslog;
|
||||
mod journal;
|
||||
mod services;
|
||||
mod status;
|
||||
mod rrd;
|
||||
pub(crate) mod rrd;
|
||||
pub mod disks;
|
||||
|
||||
pub const SUBDIRS: SubdirMap = &[
|
||||
("disks", &disks::ROUTER),
|
||||
("dns", &dns::ROUTER),
|
||||
("journal", &journal::ROUTER),
|
||||
("network", &network::ROUTER),
|
||||
|
188
src/api2/node/disks.rs
Normal file
188
src/api2/node/disks.rs
Normal file
@ -0,0 +1,188 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, Permission, RpcEnvironment, RpcEnvironmentType};
|
||||
use proxmox::api::router::{Router, SubdirMap};
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::{list_subdirs_api_method};
|
||||
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::tools::disks::{
|
||||
DiskUsageInfo, DiskUsageType, DiskManage, SmartData,
|
||||
get_disks, get_smart_data, get_disk_usage_info, inititialize_gpt_disk,
|
||||
};
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::{UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
|
||||
|
||||
pub mod directory;
|
||||
pub mod zfs;
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
skipsmart: {
|
||||
description: "Skip smart checks.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"usage-type": {
|
||||
type: DiskUsageType,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "Local disk list.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: DiskUsageInfo,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List local disks
|
||||
pub fn list_disks(
|
||||
skipsmart: bool,
|
||||
usage_type: Option<DiskUsageType>,
|
||||
) -> Result<Vec<DiskUsageInfo>, Error> {
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (_, info) in get_disks(None, skipsmart)? {
|
||||
if let Some(ref usage_type) = usage_type {
|
||||
if info.used == *usage_type {
|
||||
list.push(info);
|
||||
}
|
||||
} else {
|
||||
list.push(info);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
healthonly: {
|
||||
description: "If true returns only the health status.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
type: SmartData,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get SMART attributes and health of a disk.
|
||||
pub fn smart_status(
|
||||
disk: String,
|
||||
healthonly: Option<bool>,
|
||||
) -> Result<SmartData, Error> {
|
||||
|
||||
let healthonly = healthonly.unwrap_or(false);
|
||||
|
||||
let manager = DiskManage::new();
|
||||
let disk = manager.disk_by_name(&disk)?;
|
||||
get_smart_data(&disk, healthonly)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
uuid: {
|
||||
description: "UUID for the GPT table.",
|
||||
type: String,
|
||||
optional: true,
|
||||
max_length: 36,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Initialize empty Disk with GPT
|
||||
pub fn initialize_disk(
|
||||
disk: String,
|
||||
uuid: Option<String>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let info = get_disk_usage_info(&disk, true)?;
|
||||
|
||||
if info.used != DiskUsageType::Unused {
|
||||
bail!("disk '{}' is already in use.", disk);
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"diskinit", Some(disk.clone()), &username.clone(), to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("initialize disk {}", disk));
|
||||
|
||||
let disk_manager = DiskManage::new();
|
||||
let disk_info = disk_manager.disk_by_name(&disk)?;
|
||||
|
||||
inititialize_gpt_disk(&disk_info, uuid.as_deref())?;
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(json!(upid_str))
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const SUBDIRS: SubdirMap = &sorted!([
|
||||
// ("lvm", &lvm::ROUTER),
|
||||
("directory", &directory::ROUTER),
|
||||
("zfs", &zfs::ROUTER),
|
||||
(
|
||||
"initgpt", &Router::new()
|
||||
.post(&API_METHOD_INITIALIZE_DISK)
|
||||
),
|
||||
(
|
||||
"list", &Router::new()
|
||||
.get(&API_METHOD_LIST_DISKS)
|
||||
),
|
||||
(
|
||||
"smart", &Router::new()
|
||||
.get(&API_METHOD_SMART_STATUS)
|
||||
),
|
||||
]);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
221
src/api2/node/disks/directory.rs
Normal file
221
src/api2/node/disks/directory.rs
Normal file
@ -0,0 +1,221 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::json;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Permission, RpcEnvironment, RpcEnvironmentType};
|
||||
use proxmox::api::section_config::SectionConfigData;
|
||||
use proxmox::api::router::Router;
|
||||
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::tools::disks::{
|
||||
DiskManage, FileSystemType, DiskUsageType,
|
||||
create_file_system, create_single_linux_partition, get_fs_uuid, get_disk_usage_info,
|
||||
};
|
||||
use crate::tools::systemd::{self, types::*};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::*;
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"filesystem": {
|
||||
type: FileSystemType,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Datastore mount info.
|
||||
pub struct DatastoreMountInfo {
|
||||
/// The path of the mount unit.
|
||||
pub unitfile: String,
|
||||
/// The mount path.
|
||||
pub path: String,
|
||||
/// The mounted device.
|
||||
pub device: String,
|
||||
/// File system type
|
||||
pub filesystem: Option<String>,
|
||||
/// Mount options
|
||||
pub options: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
}
|
||||
},
|
||||
returns: {
|
||||
description: "List of systemd datastore mount units.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: DatastoreMountInfo,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List systemd datastore mount units.
|
||||
pub fn list_datastore_mounts() -> Result<Vec<DatastoreMountInfo>, Error> {
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref MOUNT_NAME_REGEX: regex::Regex = regex::Regex::new(r"^mnt-datastore-(.+)\.mount$").unwrap();
|
||||
}
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
let basedir = "/etc/systemd/system";
|
||||
for item in crate::tools::fs::scan_subdir(libc::AT_FDCWD, basedir, &MOUNT_NAME_REGEX)? {
|
||||
let item = item?;
|
||||
let name = item.file_name().to_string_lossy().to_string();
|
||||
|
||||
let unitfile = format!("{}/{}", basedir, name);
|
||||
let config = systemd::config::parse_systemd_mount(&unitfile)?;
|
||||
let data: SystemdMountSection = config.lookup("Mount", "Mount")?;
|
||||
|
||||
list.push(DatastoreMountInfo {
|
||||
unitfile,
|
||||
device: data.What,
|
||||
path: data.Where,
|
||||
filesystem: data.Type,
|
||||
options: data.Options,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
"add-datastore": {
|
||||
description: "Configure a datastore using the directory.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
filesystem: {
|
||||
type: FileSystemType,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a Filesystem on an unused disk. Will be mounted under '/mnt/datastore/<name>'.".
|
||||
pub fn create_datastore_disk(
|
||||
name: String,
|
||||
disk: String,
|
||||
add_datastore: Option<bool>,
|
||||
filesystem: Option<FileSystemType>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let info = get_disk_usage_info(&disk, true)?;
|
||||
|
||||
if info.used != DiskUsageType::Unused {
|
||||
bail!("disk '{}' is already in use.", disk);
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"dircreate", Some(name.clone()), &username.clone(), to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("create datastore '{}' on disk {}", name, disk));
|
||||
|
||||
let add_datastore = add_datastore.unwrap_or(false);
|
||||
let filesystem = filesystem.unwrap_or(FileSystemType::Ext4);
|
||||
|
||||
let manager = DiskManage::new();
|
||||
|
||||
let disk = manager.clone().disk_by_name(&disk)?;
|
||||
|
||||
let partition = create_single_linux_partition(&disk)?;
|
||||
create_file_system(&partition, filesystem)?;
|
||||
|
||||
let uuid = get_fs_uuid(&partition)?;
|
||||
let uuid_path = format!("/dev/disk/by-uuid/{}", uuid);
|
||||
|
||||
let (mount_unit_name, mount_point) = create_datastore_mount_unit(&name, filesystem, &uuid_path)?;
|
||||
|
||||
systemd::reload_daemon()?;
|
||||
systemd::enable_unit(&mount_unit_name)?;
|
||||
systemd::start_unit(&mount_unit_name)?;
|
||||
|
||||
if add_datastore {
|
||||
crate::api2::config::datastore::create_datastore(json!({ "name": name, "path": mount_point }))?
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_DATASTORE_MOUNTS)
|
||||
.post(&API_METHOD_CREATE_DATASTORE_DISK);
|
||||
|
||||
|
||||
fn create_datastore_mount_unit(
|
||||
datastore_name: &str,
|
||||
fs_type: FileSystemType,
|
||||
what: &str,
|
||||
) -> Result<(String, String), Error> {
|
||||
|
||||
let mount_point = format!("/mnt/datastore/{}", datastore_name);
|
||||
let mut mount_unit_name = systemd::escape_unit(&mount_point, true);
|
||||
mount_unit_name.push_str(".mount");
|
||||
|
||||
let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
|
||||
|
||||
let unit = SystemdUnitSection {
|
||||
Description: format!("Mount datatstore '{}' under '{}'", datastore_name, mount_point),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let install = SystemdInstallSection {
|
||||
WantedBy: Some(vec!["multi-user.target".to_string()]),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mount = SystemdMountSection {
|
||||
What: what.to_string(),
|
||||
Where: mount_point.clone(),
|
||||
Type: Some(fs_type.to_string()),
|
||||
Options: Some(String::from("defaults")),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut config = SectionConfigData::new();
|
||||
config.set_data("Unit", "Unit", unit)?;
|
||||
config.set_data("Install", "Install", install)?;
|
||||
config.set_data("Mount", "Mount", mount)?;
|
||||
|
||||
systemd::config::save_systemd_mount(&mount_unit_path, &config)?;
|
||||
|
||||
Ok((mount_unit_name, mount_point))
|
||||
}
|
380
src/api2/node/disks/zfs.rs
Normal file
380
src/api2/node/disks/zfs.rs
Normal file
@ -0,0 +1,380 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::{json, Value};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{
|
||||
api, Permission, RpcEnvironment, RpcEnvironmentType,
|
||||
schema::{
|
||||
Schema,
|
||||
StringSchema,
|
||||
ArraySchema,
|
||||
IntegerSchema,
|
||||
ApiStringFormat,
|
||||
parse_property_string,
|
||||
},
|
||||
};
|
||||
use proxmox::api::router::Router;
|
||||
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::tools::disks::{
|
||||
zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree,
|
||||
DiskUsageType,
|
||||
};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::*;
|
||||
|
||||
pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||
"Disk name list.", &BLOCKDEVICE_NAME_SCHEMA)
|
||||
.schema();
|
||||
|
||||
pub const DISK_LIST_SCHEMA: Schema = StringSchema::new(
|
||||
"A list of disk names, comma separated.")
|
||||
.format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA))
|
||||
.schema();
|
||||
|
||||
pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new(
|
||||
"Pool sector size exponent.")
|
||||
.minimum(9)
|
||||
.maximum(16)
|
||||
.default(12)
|
||||
.schema();
|
||||
|
||||
|
||||
#[api(
|
||||
default: "On",
|
||||
)]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The ZFS compression algorithm to use.
|
||||
pub enum ZfsCompressionType {
|
||||
/// Gnu Zip
|
||||
Gzip,
|
||||
/// LZ4
|
||||
Lz4,
|
||||
/// LZJB
|
||||
Lzjb,
|
||||
/// ZLE
|
||||
Zle,
|
||||
/// Enable compression using the default algorithm.
|
||||
On,
|
||||
/// Disable compression.
|
||||
Off,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The ZFS RAID level to use.
|
||||
pub enum ZfsRaidLevel {
|
||||
/// Single Disk
|
||||
Single,
|
||||
/// Mirror
|
||||
Mirror,
|
||||
/// Raid10
|
||||
Raid10,
|
||||
/// RaidZ
|
||||
RaidZ,
|
||||
/// RaidZ2
|
||||
RaidZ2,
|
||||
/// RaidZ3
|
||||
RaidZ3,
|
||||
}
|
||||
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// zpool list item
|
||||
pub struct ZpoolListItem {
|
||||
/// zpool name
|
||||
pub name: String,
|
||||
/// Health
|
||||
pub health: String,
|
||||
/// Total size
|
||||
pub size: u64,
|
||||
/// Used size
|
||||
pub alloc: u64,
|
||||
/// Free space
|
||||
pub free: u64,
|
||||
/// ZFS fragnentation level
|
||||
pub frag: u64,
|
||||
/// ZFS deduplication ratio
|
||||
pub dedup: f64,
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "List of zpools.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: ZpoolListItem,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List zfs pools.
|
||||
pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> {
|
||||
|
||||
let data = zpool_list(None, false)?;
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for item in data {
|
||||
if let Some(usage) = item.usage {
|
||||
list.push(ZpoolListItem {
|
||||
name: item.name,
|
||||
health: item.health,
|
||||
size: usage.size,
|
||||
alloc: usage.alloc,
|
||||
free: usage.free,
|
||||
frag: usage.frag,
|
||||
dedup: usage.dedup,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "zpool vdev tree with status",
|
||||
properties: {
|
||||
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get zpool status details.
|
||||
pub fn zpool_details(
|
||||
name: String,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let key_value_list = zpool_status(&name)?;
|
||||
|
||||
let config = match key_value_list.iter().find(|(k, _)| k == "config") {
|
||||
Some((_, v)) => v,
|
||||
None => bail!("got zpool status without config key"),
|
||||
};
|
||||
|
||||
let vdev_list = parse_zpool_status_config_tree(config)?;
|
||||
let mut tree = vdev_list_to_tree(&vdev_list)?;
|
||||
|
||||
for (k, v) in key_value_list {
|
||||
if k != "config" {
|
||||
tree[k] = v.into();
|
||||
}
|
||||
}
|
||||
|
||||
tree["name"] = tree.as_object_mut().unwrap()
|
||||
.remove("pool")
|
||||
.unwrap_or_else(|| name.into());
|
||||
|
||||
|
||||
Ok(tree)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
devices: {
|
||||
schema: DISK_LIST_SCHEMA,
|
||||
},
|
||||
raidlevel: {
|
||||
type: ZfsRaidLevel,
|
||||
},
|
||||
ashift: {
|
||||
schema: ZFS_ASHIFT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
compression: {
|
||||
type: ZfsCompressionType,
|
||||
optional: true,
|
||||
},
|
||||
"add-datastore": {
|
||||
description: "Configure a datastore using the zpool.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a new ZFS pool.
|
||||
pub fn create_zpool(
|
||||
name: String,
|
||||
devices: String,
|
||||
raidlevel: ZfsRaidLevel,
|
||||
compression: Option<String>,
|
||||
ashift: Option<usize>,
|
||||
add_datastore: Option<bool>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let add_datastore = add_datastore.unwrap_or(false);
|
||||
|
||||
let ashift = ashift.unwrap_or(12);
|
||||
|
||||
let devices_text = devices.clone();
|
||||
let devices = parse_property_string(&devices, &DISK_ARRAY_SCHEMA)?;
|
||||
let devices: Vec<String> = devices.as_array().unwrap().iter()
|
||||
.map(|v| v.as_str().unwrap().to_string()).collect();
|
||||
|
||||
let disk_map = crate::tools::disks::get_disks(None, true)?;
|
||||
for disk in devices.iter() {
|
||||
match disk_map.get(disk) {
|
||||
Some(info) => {
|
||||
if info.used != DiskUsageType::Unused {
|
||||
bail!("disk '{}' is already in use.", disk);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
bail!("no such disk '{}'", disk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let min_disks = match raidlevel {
|
||||
ZfsRaidLevel::Single => 1,
|
||||
ZfsRaidLevel::Mirror => 2,
|
||||
ZfsRaidLevel::Raid10 => 4,
|
||||
ZfsRaidLevel::RaidZ => 3,
|
||||
ZfsRaidLevel::RaidZ2 => 4,
|
||||
ZfsRaidLevel::RaidZ3 => 5,
|
||||
};
|
||||
|
||||
// Sanity checks
|
||||
if raidlevel == ZfsRaidLevel::Raid10 && devices.len() % 2 != 0 {
|
||||
bail!("Raid10 needs an even number of disks.");
|
||||
}
|
||||
|
||||
if raidlevel == ZfsRaidLevel::Single && devices.len() > 1 {
|
||||
bail!("Please give only one disk for single disk mode.");
|
||||
}
|
||||
|
||||
if devices.len() < min_disks {
|
||||
bail!("{:?} needs at least {} disks.", raidlevel, min_disks);
|
||||
}
|
||||
|
||||
// check if the default path does exist already and bail if it does
|
||||
// otherwise we get an error on mounting
|
||||
let mut default_path = std::path::PathBuf::from("/");
|
||||
default_path.push(&name);
|
||||
|
||||
match std::fs::metadata(&default_path) {
|
||||
Err(_) => {}, // path does not exist
|
||||
Ok(_) => {
|
||||
bail!("path {:?} already exists", default_path);
|
||||
}
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"zfscreate", Some(name.clone()), &username.clone(), to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));
|
||||
|
||||
|
||||
let mut command = std::process::Command::new("zpool");
|
||||
command.args(&["create", "-o", &format!("ashift={}", ashift), &name]);
|
||||
|
||||
match raidlevel {
|
||||
ZfsRaidLevel::Single => {
|
||||
command.arg(&devices[0]);
|
||||
}
|
||||
ZfsRaidLevel::Mirror => {
|
||||
command.arg("mirror");
|
||||
command.args(devices);
|
||||
}
|
||||
ZfsRaidLevel::Raid10 => {
|
||||
devices.chunks(2).for_each(|pair| {
|
||||
command.arg("mirror");
|
||||
command.args(pair);
|
||||
});
|
||||
}
|
||||
ZfsRaidLevel::RaidZ => {
|
||||
command.arg("raidz");
|
||||
command.args(devices);
|
||||
}
|
||||
ZfsRaidLevel::RaidZ2 => {
|
||||
command.arg("raidz2");
|
||||
command.args(devices);
|
||||
}
|
||||
ZfsRaidLevel::RaidZ3 => {
|
||||
command.arg("raidz3");
|
||||
command.args(devices);
|
||||
}
|
||||
}
|
||||
|
||||
worker.log(format!("# {:?}", command));
|
||||
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
worker.log(output);
|
||||
|
||||
if let Some(compression) = compression {
|
||||
let mut command = std::process::Command::new("zfs");
|
||||
command.args(&["set", &format!("compression={}", compression), &name]);
|
||||
worker.log(format!("# {:?}", command));
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
worker.log(output);
|
||||
}
|
||||
|
||||
if add_datastore {
|
||||
let mount_point = format!("/{}", name);
|
||||
crate::api2::config::datastore::create_datastore(json!({ "name": name, "path": mount_point }))?
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
pub const POOL_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_ZPOOL_DETAILS);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_ZPOOLS)
|
||||
.post(&API_METHOD_CREATE_ZPOOL)
|
||||
.match_all("name", &POOL_ROUTER);
|
@ -94,7 +94,7 @@ fn get_journal(
|
||||
|
||||
let mut lines: Vec<String> = vec![];
|
||||
|
||||
let mut child = Command::new("/usr/bin/mini-journalreader")
|
||||
let mut child = Command::new("mini-journalreader")
|
||||
.args(&args)
|
||||
.stdout(Stdio::piped())
|
||||
.spawn()?;
|
||||
|
@ -1,9 +1,47 @@
|
||||
use anyhow::Error;
|
||||
use serde_json::Value;
|
||||
use serde_json::{Value, json};
|
||||
|
||||
use proxmox::api::{api, Router};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::tools::epoch_now_f64;
|
||||
use crate::rrd::{extract_cached_data, RRD_DATA_ENTRIES};
|
||||
|
||||
pub fn create_value_from_rrd(
|
||||
basedir: &str,
|
||||
list: &[&str],
|
||||
timeframe: RRDTimeFrameResolution,
|
||||
cf: RRDMode,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let mut result = Vec::new();
|
||||
let now = epoch_now_f64()?;
|
||||
|
||||
for name in list {
|
||||
let (start, reso, list) = match extract_cached_data(basedir, name, now, timeframe, cf) {
|
||||
Some(result) => result,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let mut t = start;
|
||||
for index in 0..RRD_DATA_ENTRIES {
|
||||
if result.len() <= index {
|
||||
if let Some(value) = list[index] {
|
||||
result.push(json!({ "time": t, *name: value }));
|
||||
} else {
|
||||
result.push(json!({ "time": t }));
|
||||
}
|
||||
} else {
|
||||
if let Some(value) = list[index] {
|
||||
result[index][name] = value.into();
|
||||
}
|
||||
}
|
||||
t += reso;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result.into())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -27,7 +65,7 @@ fn get_node_stats(
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
crate::rrd::extract_data(
|
||||
create_value_from_rrd(
|
||||
"host",
|
||||
&[
|
||||
"cpu", "iowait",
|
||||
|
@ -38,7 +38,7 @@ fn get_full_service_state(service: &str) -> Result<Value, Error> {
|
||||
|
||||
let real_service_name = real_service_name(service);
|
||||
|
||||
let mut child = Command::new("/bin/systemctl")
|
||||
let mut child = Command::new("systemctl")
|
||||
.args(&["show", real_service_name])
|
||||
.stdout(Stdio::piped())
|
||||
.spawn()?;
|
||||
@ -196,7 +196,7 @@ fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
|
||||
|
||||
let real_service_name = real_service_name(service);
|
||||
|
||||
let status = Command::new("/bin/systemctl")
|
||||
let status = Command::new("systemctl")
|
||||
.args(&[cmd, real_service_name])
|
||||
.status()?;
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
use std::process::Command;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{Error, format_err, bail};
|
||||
use serde_json::{json, Value};
|
||||
@ -60,6 +61,7 @@ fn get_usage(
|
||||
|
||||
let meminfo: procfs::ProcFsMemInfo = procfs::read_meminfo()?;
|
||||
let kstat: procfs::ProcFsStat = procfs::read_proc_stat()?;
|
||||
let disk_usage = crate::tools::disks::disk_usage(Path::new("/"))?;
|
||||
|
||||
Ok(json!({
|
||||
"memory": {
|
||||
@ -68,6 +70,11 @@ fn get_usage(
|
||||
"free": meminfo.memfree,
|
||||
},
|
||||
"cpu": kstat.cpu,
|
||||
"root": {
|
||||
"total": disk_usage.total,
|
||||
"used": disk_usage.used,
|
||||
"free": disk_usage.avail,
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
@ -95,7 +102,7 @@ fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> {
|
||||
NodePowerCommand::Shutdown => "poweroff",
|
||||
};
|
||||
|
||||
let output = Command::new("/bin/systemctl")
|
||||
let output = Command::new("systemctl")
|
||||
.arg(systemctl_command)
|
||||
.output()
|
||||
.map_err(|err| format_err!("failed to execute systemctl - {}", err))?;
|
||||
|
@ -27,7 +27,7 @@ fn dump_journal(
|
||||
let start = start.unwrap_or(0);
|
||||
let mut count: u64 = 0;
|
||||
|
||||
let mut child = Command::new("/bin/journalctl")
|
||||
let mut child = Command::new("journalctl")
|
||||
.args(&args)
|
||||
.stdout(Stdio::piped())
|
||||
.spawn()?;
|
||||
|
@ -323,21 +323,9 @@ pub fn list_tasks(
|
||||
|
||||
let mut count = 0;
|
||||
|
||||
for info in list.iter() {
|
||||
for info in list {
|
||||
if !list_all && info.upid.username != username { continue; }
|
||||
|
||||
let mut entry = TaskListItem {
|
||||
upid: info.upid_str.clone(),
|
||||
node: "localhost".to_string(),
|
||||
pid: info.upid.pid as i64,
|
||||
pstart: info.upid.pstart,
|
||||
starttime: info.upid.starttime,
|
||||
worker_type: info.upid.worker_type.clone(),
|
||||
worker_id: info.upid.worker_id.clone(),
|
||||
user: info.upid.username.clone(),
|
||||
endtime: None,
|
||||
status: None,
|
||||
};
|
||||
|
||||
if let Some(username) = userfilter {
|
||||
if !info.upid.username.contains(username) { continue; }
|
||||
@ -367,9 +355,6 @@ pub fn list_tasks(
|
||||
if errors && state.1 == "OK" {
|
||||
continue;
|
||||
}
|
||||
|
||||
entry.endtime = Some(state.0);
|
||||
entry.status = Some(state.1.clone());
|
||||
}
|
||||
|
||||
if (count as u64) < start {
|
||||
@ -379,7 +364,7 @@ pub fn list_tasks(
|
||||
count += 1;
|
||||
}
|
||||
|
||||
if (result.len() as u64) < limit { result.push(entry); };
|
||||
if (result.len() as u64) < limit { result.push(info.into()); };
|
||||
}
|
||||
|
||||
rpcenv["total"] = Value::from(count);
|
||||
|
@ -17,6 +17,7 @@ use crate::server::{WorkerTask, H2Service};
|
||||
use crate::tools;
|
||||
use crate::config::acl::PRIV_DATASTORE_READ;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::api2::helpers;
|
||||
|
||||
mod environment;
|
||||
use environment::*;
|
||||
@ -187,26 +188,9 @@ fn download_file(
|
||||
path.push(env.backup_dir.relative_path());
|
||||
path.push(&file_name);
|
||||
|
||||
let path2 = path.clone();
|
||||
let path3 = path.clone();
|
||||
env.log(format!("download {:?}", path.clone()));
|
||||
|
||||
let file = tokio::fs::File::open(path)
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path2, err)))
|
||||
.await?;
|
||||
|
||||
env.log(format!("download {:?}", path3));
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||
|
||||
let body = Body::wrap_stream(payload);
|
||||
|
||||
// fixme: set other headers ?
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||
.body(body)
|
||||
.unwrap())
|
||||
helpers::create_download_response(path).await
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
|
226
src/api2/status.rs
Normal file
226
src/api2/status.rs
Normal file
@ -0,0 +1,226 @@
|
||||
use proxmox::list_subdirs_api_method;
|
||||
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{
|
||||
api,
|
||||
ApiMethod,
|
||||
Permission,
|
||||
Router,
|
||||
RpcEnvironment,
|
||||
SubdirMap,
|
||||
UserInformation,
|
||||
};
|
||||
|
||||
use crate::api2::types::{
|
||||
DATASTORE_SCHEMA,
|
||||
RRDMode,
|
||||
RRDTimeFrameResolution,
|
||||
TaskListItem
|
||||
};
|
||||
|
||||
use crate::server;
|
||||
use crate::backup::{DataStore};
|
||||
use crate::config::datastore;
|
||||
use crate::tools::epoch_now_f64;
|
||||
use crate::tools::statistics::{linear_regression};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::acl::{
|
||||
PRIV_SYS_AUDIT,
|
||||
PRIV_DATASTORE_AUDIT,
|
||||
PRIV_DATASTORE_BACKUP,
|
||||
};
|
||||
|
||||
#[api(
|
||||
returns: {
|
||||
description: "Lists the Status of the Datastores.",
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Status of a Datastore",
|
||||
type: Object,
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
total: {
|
||||
type: Integer,
|
||||
description: "The Size of the underlying storage in bytes",
|
||||
},
|
||||
used: {
|
||||
type: Integer,
|
||||
description: "The used bytes of the underlying storage",
|
||||
},
|
||||
avail: {
|
||||
type: Integer,
|
||||
description: "The available bytes of the underlying storage",
|
||||
},
|
||||
history: {
|
||||
type: Array,
|
||||
description: "A list of usages of the past (last Month).",
|
||||
items: {
|
||||
type: Number,
|
||||
description: "The usage of a time in the past. Either null or between 0.0 and 1.0.",
|
||||
}
|
||||
},
|
||||
"estimated-full-date": {
|
||||
type: Integer,
|
||||
optional: true,
|
||||
description: "Estimation of the UNIX epoch when the storage will be full.\
|
||||
This is calculated via a simple Linear Regression (Least Squares)\
|
||||
of RRD data of the last Month. Missing if there are not enough data points yet.\
|
||||
If the estimate lies in the past, the usage is decreasing.",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// List Datastore usages and estimates
|
||||
fn datastore_status(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (store, (_, _)) in &config.sections {
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||
if !allowed {
|
||||
continue;
|
||||
}
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
let status = crate::tools::disks::disk_usage(&datastore.base_path())?;
|
||||
|
||||
let mut entry = json!({
|
||||
"store": store,
|
||||
"total": status.total,
|
||||
"used": status.used,
|
||||
"avail": status.avail,
|
||||
});
|
||||
|
||||
let rrd_dir = format!("datastore/{}", store);
|
||||
let now = epoch_now_f64()?;
|
||||
let rrd_resolution = RRDTimeFrameResolution::Month;
|
||||
let rrd_mode = RRDMode::Average;
|
||||
|
||||
let total_res = crate::rrd::extract_cached_data(
|
||||
&rrd_dir,
|
||||
"total",
|
||||
now,
|
||||
rrd_resolution,
|
||||
rrd_mode,
|
||||
);
|
||||
|
||||
let used_res = crate::rrd::extract_cached_data(
|
||||
&rrd_dir,
|
||||
"used",
|
||||
now,
|
||||
rrd_resolution,
|
||||
rrd_mode,
|
||||
);
|
||||
|
||||
match (total_res, used_res) {
|
||||
(Some((start, reso, total_list)), Some((_, _, used_list))) => {
|
||||
let mut usage_list: Vec<f64> = Vec::new();
|
||||
let mut time_list: Vec<u64> = Vec::new();
|
||||
let mut history = Vec::new();
|
||||
|
||||
for (idx, used) in used_list.iter().enumerate() {
|
||||
let total = if idx < total_list.len() {
|
||||
total_list[idx]
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
match (total, used) {
|
||||
(Some(total), Some(used)) if total != 0.0 => {
|
||||
time_list.push(start + (idx as u64)*reso);
|
||||
let usage = used/total;
|
||||
usage_list.push(usage);
|
||||
history.push(json!(usage));
|
||||
},
|
||||
_ => {
|
||||
history.push(json!(null))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
entry["history"] = history.into();
|
||||
|
||||
// we skip the calculation for datastores with not enough data
|
||||
if usage_list.len() >= 7 {
|
||||
if let Some((a,b)) = linear_regression(&time_list, &usage_list) {
|
||||
if b != 0.0 {
|
||||
let estimate = (1.0 - a) / b;
|
||||
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
|
||||
list.push(entry);
|
||||
}
|
||||
|
||||
Ok(list.into())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
since: {
|
||||
type: u64,
|
||||
description: "Only list tasks since this UNIX epoch.",
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "A list of tasks.",
|
||||
type: Array,
|
||||
items: { type: TaskListItem },
|
||||
},
|
||||
access: {
|
||||
description: "Users can only see there own tasks, unless the have Sys.Audit on /system/tasks.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List tasks.
|
||||
pub fn list_tasks(
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TaskListItem>, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["system", "tasks"]);
|
||||
|
||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||
|
||||
// TODO: replace with call that gets all task since 'since' epoch
|
||||
let list: Vec<TaskListItem> = server::read_task_list()?
|
||||
.into_iter()
|
||||
.map(TaskListItem::from)
|
||||
.filter(|entry| list_all || entry.user == username)
|
||||
.collect();
|
||||
|
||||
Ok(list.into())
|
||||
}
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("datastore-usage", &Router::new().get(&API_METHOD_DATASTORE_STATUS)),
|
||||
("tasks", &Router::new().get(&API_METHOD_LIST_TASKS)),
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
@ -74,6 +74,8 @@ const_regex!{
|
||||
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
||||
|
||||
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
||||
|
||||
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
|
||||
}
|
||||
|
||||
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
||||
@ -133,6 +135,8 @@ pub const CIDR_V6_FORMAT: ApiStringFormat =
|
||||
pub const CIDR_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&CIDR_REGEX);
|
||||
|
||||
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
|
||||
|
||||
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
@ -353,6 +357,11 @@ pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID")
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
|
||||
.format(&BLOCKDEVICE_NAME_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
// Complex type definitions
|
||||
|
||||
@ -419,7 +428,7 @@ pub struct SnapshotListItem {
|
||||
pub backup_id: String,
|
||||
pub backup_time: i64,
|
||||
/// List of contained archive files.
|
||||
pub files: Vec<String>,
|
||||
pub files: Vec<BackupContent>,
|
||||
/// Overall snapshot size (sum of all archive sizes).
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub size: Option<u64>,
|
||||
@ -494,6 +503,9 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
|
||||
/// Basic information about archive files inside a backup snapshot.
|
||||
pub struct BackupContent {
|
||||
pub filename: String,
|
||||
/// Info if file is encrypted (or empty if we do not have that info)
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub encrypted: Option<bool>,
|
||||
/// Archive size (from backup manifest).
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub size: Option<u64>,
|
||||
@ -590,6 +602,27 @@ pub struct TaskListItem {
|
||||
pub status: Option<String>,
|
||||
}
|
||||
|
||||
impl From<crate::server::TaskListInfo> for TaskListItem {
|
||||
fn from(info: crate::server::TaskListInfo) -> Self {
|
||||
let (endtime, status) = info
|
||||
.state
|
||||
.map_or_else(|| (None, None), |(a,b)| (Some(a), Some(b)));
|
||||
|
||||
TaskListItem {
|
||||
upid: info.upid_str,
|
||||
node: "localhost".to_string(),
|
||||
pid: info.upid.pid as i64,
|
||||
pstart: info.upid.pstart,
|
||||
starttime: info.upid.starttime,
|
||||
worker_type: info.upid.worker_type,
|
||||
worker_id: info.upid.worker_id,
|
||||
user: info.upid.username,
|
||||
endtime,
|
||||
status,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
|
@ -10,6 +10,8 @@ use std::path::PathBuf;
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
use proxmox::try_block;
|
||||
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
fn compute_csrf_secret_digest(
|
||||
timestamp: i64,
|
||||
secret: &[u8],
|
||||
@ -29,8 +31,7 @@ pub fn assemble_csrf_prevention_token(
|
||||
username: &str,
|
||||
) -> String {
|
||||
|
||||
let epoch = std::time::SystemTime::now().duration_since(
|
||||
std::time::SystemTime::UNIX_EPOCH).unwrap().as_secs() as i64;
|
||||
let epoch = epoch_now_u64().unwrap() as i64;
|
||||
|
||||
let digest = compute_csrf_secret_digest(epoch, secret, username);
|
||||
|
||||
@ -67,8 +68,7 @@ pub fn verify_csrf_prevention_token(
|
||||
bail!("invalid signature.");
|
||||
}
|
||||
|
||||
let now = std::time::SystemTime::now().duration_since(
|
||||
std::time::SystemTime::UNIX_EPOCH)?.as_secs() as i64;
|
||||
let now = epoch_now_u64()? as i64;
|
||||
|
||||
let age = now - ttime;
|
||||
if age < min_age {
|
||||
|
@ -198,5 +198,11 @@ pub use prune::*;
|
||||
mod datastore;
|
||||
pub use datastore::*;
|
||||
|
||||
mod verify;
|
||||
pub use verify::*;
|
||||
|
||||
mod catalog_shell;
|
||||
pub use catalog_shell::*;
|
||||
|
||||
mod async_index_reader;
|
||||
pub use async_index_reader::*;
|
||||
|
127
src/backup/async_index_reader.rs
Normal file
127
src/backup/async_index_reader.rs
Normal file
@ -0,0 +1,127 @@
|
||||
use std::future::Future;
|
||||
use std::task::{Poll, Context};
|
||||
use std::pin::Pin;
|
||||
|
||||
use anyhow::Error;
|
||||
use futures::future::FutureExt;
|
||||
use futures::ready;
|
||||
use tokio::io::AsyncRead;
|
||||
|
||||
use proxmox::sys::error::io_err_other;
|
||||
use proxmox::io_format_err;
|
||||
|
||||
use super::IndexFile;
|
||||
use super::read_chunk::AsyncReadChunk;
|
||||
|
||||
enum AsyncIndexReaderState<S> {
|
||||
NoData,
|
||||
WaitForData(Pin<Box<dyn Future<Output = Result<(S, Vec<u8>), Error>> + Send + 'static>>),
|
||||
HaveData(usize),
|
||||
}
|
||||
|
||||
pub struct AsyncIndexReader<S, I: IndexFile> {
|
||||
store: Option<S>,
|
||||
index: I,
|
||||
read_buffer: Vec<u8>,
|
||||
current_chunk_idx: usize,
|
||||
current_chunk_digest: [u8; 32],
|
||||
state: AsyncIndexReaderState<S>,
|
||||
}
|
||||
|
||||
// ok because the only public interfaces operates on &mut Self
|
||||
unsafe impl<S: Sync, I: IndexFile + Sync> Sync for AsyncIndexReader<S, I> {}
|
||||
|
||||
impl<S: AsyncReadChunk, I: IndexFile> AsyncIndexReader<S, I> {
|
||||
pub fn new(index: I, store: S) -> Self {
|
||||
Self {
|
||||
store: Some(store),
|
||||
index,
|
||||
read_buffer: Vec::with_capacity(1024*1024),
|
||||
current_chunk_idx: 0,
|
||||
current_chunk_digest: [0u8; 32],
|
||||
state: AsyncIndexReaderState::NoData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, I> AsyncRead for AsyncIndexReader<S, I> where
|
||||
S: AsyncReadChunk + Unpin + 'static,
|
||||
I: IndexFile + Unpin
|
||||
{
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context,
|
||||
buf: &mut [u8],
|
||||
) -> Poll<tokio::io::Result<usize>> {
|
||||
let this = Pin::get_mut(self);
|
||||
loop {
|
||||
match &mut this.state {
|
||||
AsyncIndexReaderState::NoData => {
|
||||
if this.current_chunk_idx >= this.index.index_count() {
|
||||
return Poll::Ready(Ok(0));
|
||||
}
|
||||
|
||||
let digest = this
|
||||
.index
|
||||
.index_digest(this.current_chunk_idx)
|
||||
.ok_or(io_format_err!("could not get digest"))?
|
||||
.clone();
|
||||
|
||||
if digest == this.current_chunk_digest {
|
||||
this.state = AsyncIndexReaderState::HaveData(0);
|
||||
continue;
|
||||
}
|
||||
|
||||
this.current_chunk_digest = digest;
|
||||
|
||||
let mut store = match this.store.take() {
|
||||
Some(store) => store,
|
||||
None => {
|
||||
return Poll::Ready(Err(io_format_err!("could not find store")));
|
||||
},
|
||||
};
|
||||
|
||||
let future = async move {
|
||||
store.read_chunk(&digest)
|
||||
.await
|
||||
.map(move |x| (store, x))
|
||||
};
|
||||
|
||||
this.state = AsyncIndexReaderState::WaitForData(future.boxed());
|
||||
},
|
||||
AsyncIndexReaderState::WaitForData(ref mut future) => {
|
||||
match ready!(future.as_mut().poll(cx)) {
|
||||
Ok((store, mut chunk_data)) => {
|
||||
this.read_buffer.clear();
|
||||
this.read_buffer.append(&mut chunk_data);
|
||||
this.state = AsyncIndexReaderState::HaveData(0);
|
||||
this.store = Some(store);
|
||||
},
|
||||
Err(err) => {
|
||||
return Poll::Ready(Err(io_err_other(err)));
|
||||
},
|
||||
};
|
||||
},
|
||||
AsyncIndexReaderState::HaveData(offset) => {
|
||||
let offset = *offset;
|
||||
let len = this.read_buffer.len();
|
||||
let n = if len - offset < buf.len() {
|
||||
len - offset
|
||||
} else {
|
||||
buf.len()
|
||||
};
|
||||
|
||||
buf[0..n].copy_from_slice(&this.read_buffer[offset..offset+n]);
|
||||
if offset + n == len {
|
||||
this.state = AsyncIndexReaderState::NoData;
|
||||
this.current_chunk_idx += 1;
|
||||
} else {
|
||||
this.state = AsyncIndexReaderState::HaveData(offset + n);
|
||||
}
|
||||
|
||||
return Poll::Ready(Ok(n));
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -59,17 +59,6 @@ impl BackupGroup {
|
||||
&self.backup_id
|
||||
}
|
||||
|
||||
pub fn parse(path: &str) -> Result<Self, Error> {
|
||||
|
||||
let cap = GROUP_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
||||
|
||||
Ok(Self {
|
||||
backup_type: cap.get(1).unwrap().as_str().to_owned(),
|
||||
backup_id: cap.get(2).unwrap().as_str().to_owned(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn group_path(&self) -> PathBuf {
|
||||
|
||||
let mut relative_path = PathBuf::new();
|
||||
@ -152,6 +141,31 @@ impl BackupGroup {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for BackupGroup {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let backup_type = self.backup_type();
|
||||
let id = self.backup_id();
|
||||
write!(f, "{}/{}", backup_type, id)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for BackupGroup {
|
||||
type Err = Error;
|
||||
|
||||
/// Parse a backup group path
|
||||
///
|
||||
/// This parses strings like `vm/100".
|
||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||
let cap = GROUP_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
||||
|
||||
Ok(Self {
|
||||
backup_type: cap.get(1).unwrap().as_str().to_owned(),
|
||||
backup_id: cap.get(2).unwrap().as_str().to_owned(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Uniquely identify a Backup (relative to data store)
|
||||
///
|
||||
/// We also call this a backup snaphost.
|
||||
@ -188,16 +202,6 @@ impl BackupDir {
|
||||
self.backup_time
|
||||
}
|
||||
|
||||
pub fn parse(path: &str) -> Result<Self, Error> {
|
||||
|
||||
let cap = SNAPSHOT_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||
|
||||
let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str());
|
||||
let backup_time = cap.get(3).unwrap().as_str().parse::<DateTime<Utc>>()?;
|
||||
Ok(BackupDir::from((group, backup_time.timestamp())))
|
||||
}
|
||||
|
||||
pub fn relative_path(&self) -> PathBuf {
|
||||
|
||||
let mut relative_path = self.group.group_path();
|
||||
@ -212,6 +216,31 @@ impl BackupDir {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for BackupDir {
|
||||
type Err = Error;
|
||||
|
||||
/// Parse a snapshot path
|
||||
///
|
||||
/// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
|
||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||
let cap = SNAPSHOT_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||
|
||||
let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str());
|
||||
let backup_time = cap.get(3).unwrap().as_str().parse::<DateTime<Utc>>()?;
|
||||
Ok(BackupDir::from((group, backup_time.timestamp())))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for BackupDir {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let backup_type = self.group.backup_type();
|
||||
let id = self.group.backup_id();
|
||||
let time = Self::backup_time_to_string(self.backup_time);
|
||||
write!(f, "{}/{}/{}", backup_type, id, time)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(BackupGroup, i64)> for BackupDir {
|
||||
fn from((group, timestamp): (BackupGroup, i64)) -> Self {
|
||||
Self { group, backup_time: Utc.timestamp(timestamp, 0) }
|
||||
|
@ -1,23 +1,21 @@
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use std::fmt;
|
||||
use std::ffi::{CStr, CString, OsStr};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::io::{Read, Write, Seek, SeekFrom};
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::{CStr, CString, OsStr};
|
||||
use std::fmt;
|
||||
use std::io::{Read, Write, Seek, SeekFrom};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::offset::{TimeZone, Local};
|
||||
|
||||
use pathpatterns::{MatchList, MatchType};
|
||||
use proxmox::tools::io::ReadExt;
|
||||
use proxmox::sys::error::io_err_other;
|
||||
|
||||
use crate::pxar::catalog::BackupCatalogWriter;
|
||||
use crate::pxar::{MatchPattern, MatchPatternSlice, MatchType};
|
||||
use crate::backup::file_formats::PROXMOX_CATALOG_FILE_MAGIC_1_0;
|
||||
use crate::tools::runtime::block_on;
|
||||
use crate::pxar::catalog::BackupCatalogWriter;
|
||||
|
||||
#[repr(u8)]
|
||||
#[derive(Copy,Clone,PartialEq)]
|
||||
enum CatalogEntryType {
|
||||
pub(crate) enum CatalogEntryType {
|
||||
Directory = b'd',
|
||||
File = b'f',
|
||||
Symlink = b'l',
|
||||
@ -46,6 +44,21 @@ impl TryFrom<u8> for CatalogEntryType {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&DirEntryAttribute> for CatalogEntryType {
|
||||
fn from(value: &DirEntryAttribute) -> Self {
|
||||
match value {
|
||||
DirEntryAttribute::Directory { .. } => CatalogEntryType::Directory,
|
||||
DirEntryAttribute::File { .. } => CatalogEntryType::File,
|
||||
DirEntryAttribute::Symlink => CatalogEntryType::Symlink,
|
||||
DirEntryAttribute::Hardlink => CatalogEntryType::Hardlink,
|
||||
DirEntryAttribute::BlockDevice => CatalogEntryType::BlockDevice,
|
||||
DirEntryAttribute::CharDevice => CatalogEntryType::CharDevice,
|
||||
DirEntryAttribute::Fifo => CatalogEntryType::Fifo,
|
||||
DirEntryAttribute::Socket => CatalogEntryType::Socket,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for CatalogEntryType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", char::from(*self as u8))
|
||||
@ -63,7 +76,7 @@ pub struct DirEntry {
|
||||
}
|
||||
|
||||
/// Used to specific additional attributes inside DirEntry
|
||||
#[derive(Clone, PartialEq)]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum DirEntryAttribute {
|
||||
Directory { start: u64 },
|
||||
File { size: u64, mtime: u64 },
|
||||
@ -106,6 +119,23 @@ impl DirEntry {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get file mode bits for this entry to be used with the `MatchList` api.
|
||||
pub fn get_file_mode(&self) -> Option<u32> {
|
||||
Some(
|
||||
match self.attr {
|
||||
DirEntryAttribute::Directory { .. } => pxar::mode::IFDIR,
|
||||
DirEntryAttribute::File { .. } => pxar::mode::IFREG,
|
||||
DirEntryAttribute::Symlink => pxar::mode::IFLNK,
|
||||
DirEntryAttribute::Hardlink => return None,
|
||||
DirEntryAttribute::BlockDevice => pxar::mode::IFBLK,
|
||||
DirEntryAttribute::CharDevice => pxar::mode::IFCHR,
|
||||
DirEntryAttribute::Fifo => pxar::mode::IFIFO,
|
||||
DirEntryAttribute::Socket => pxar::mode::IFSOCK,
|
||||
}
|
||||
as u32
|
||||
)
|
||||
}
|
||||
|
||||
/// Check if DirEntry is a directory
|
||||
pub fn is_directory(&self) -> bool {
|
||||
match self.attr {
|
||||
@ -383,32 +413,6 @@ impl <W: Write> BackupCatalogWriter for CatalogWriter<W> {
|
||||
}
|
||||
}
|
||||
|
||||
// fixme: move to somehere else?
|
||||
/// Implement Write to tokio mpsc channel Sender
|
||||
pub struct SenderWriter(tokio::sync::mpsc::Sender<Result<Vec<u8>, Error>>);
|
||||
|
||||
impl SenderWriter {
|
||||
pub fn new(sender: tokio::sync::mpsc::Sender<Result<Vec<u8>, Error>>) -> Self {
|
||||
Self(sender)
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for SenderWriter {
|
||||
fn write(&mut self, buf: &[u8]) -> Result<usize, std::io::Error> {
|
||||
block_on(async move {
|
||||
self.0
|
||||
.send(Ok(buf.to_vec()))
|
||||
.await
|
||||
.map_err(io_err_other)
|
||||
.and(Ok(buf.len()))
|
||||
})
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> Result<(), std::io::Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Read Catalog files
|
||||
pub struct CatalogReader<R> {
|
||||
reader: R,
|
||||
@ -476,7 +480,7 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
&mut self,
|
||||
parent: &DirEntry,
|
||||
filename: &[u8],
|
||||
) -> Result<DirEntry, Error> {
|
||||
) -> Result<Option<DirEntry>, Error> {
|
||||
|
||||
let start = match parent.attr {
|
||||
DirEntryAttribute::Directory { start } => start,
|
||||
@ -496,10 +500,7 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
Ok(false) // stop parsing
|
||||
})?;
|
||||
|
||||
match item {
|
||||
None => bail!("no such file"),
|
||||
Some(entry) => Ok(entry),
|
||||
}
|
||||
Ok(item)
|
||||
}
|
||||
|
||||
/// Read the raw directory info block from current reader position.
|
||||
@ -532,7 +533,10 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
self.dump_dir(&path, pos)?;
|
||||
}
|
||||
CatalogEntryType::File => {
|
||||
let dt = Local.timestamp(mtime as i64, 0);
|
||||
let dt = Local
|
||||
.timestamp_opt(mtime as i64, 0)
|
||||
.single() // chrono docs say timestamp_opt can only be None or Single!
|
||||
.unwrap_or_else(|| Local.timestamp(0, 0));
|
||||
|
||||
println!(
|
||||
"{} {:?} {} {}",
|
||||
@ -555,38 +559,30 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
/// provided callback on them.
|
||||
pub fn find(
|
||||
&mut self,
|
||||
mut entry: &mut Vec<DirEntry>,
|
||||
pattern: &[MatchPatternSlice],
|
||||
callback: &Box<fn(&[DirEntry])>,
|
||||
parent: &DirEntry,
|
||||
file_path: &mut Vec<u8>,
|
||||
match_list: &impl MatchList, //&[MatchEntry],
|
||||
callback: &mut dyn FnMut(&[u8]) -> Result<(), Error>,
|
||||
) -> Result<(), Error> {
|
||||
let parent = entry.last().unwrap();
|
||||
if !parent.is_directory() {
|
||||
return Ok(())
|
||||
}
|
||||
|
||||
let file_len = file_path.len();
|
||||
for e in self.read_dir(parent)? {
|
||||
match MatchPatternSlice::match_filename_include(
|
||||
&CString::new(e.name.clone())?,
|
||||
e.is_directory(),
|
||||
pattern,
|
||||
)? {
|
||||
(MatchType::Positive, _) => {
|
||||
entry.push(e);
|
||||
callback(&entry);
|
||||
let pattern = MatchPattern::from_line(b"**/*").unwrap().unwrap();
|
||||
let child_pattern = vec![pattern.as_slice()];
|
||||
self.find(&mut entry, &child_pattern, callback)?;
|
||||
entry.pop();
|
||||
let is_dir = e.is_directory();
|
||||
file_path.truncate(file_len);
|
||||
if !e.name.starts_with(b"/") {
|
||||
file_path.reserve(e.name.len() + 1);
|
||||
file_path.push(b'/');
|
||||
}
|
||||
(MatchType::PartialPositive, child_pattern)
|
||||
| (MatchType::PartialNegative, child_pattern) => {
|
||||
entry.push(e);
|
||||
self.find(&mut entry, &child_pattern, callback)?;
|
||||
entry.pop();
|
||||
file_path.extend(&e.name);
|
||||
match match_list.matches(&file_path, e.get_file_mode()) {
|
||||
Some(MatchType::Exclude) => continue,
|
||||
Some(MatchType::Include) => callback(&file_path)?,
|
||||
None => (),
|
||||
}
|
||||
_ => {}
|
||||
if is_dir {
|
||||
self.find(&e, file_path, match_list, callback)?;
|
||||
}
|
||||
}
|
||||
file_path.truncate(file_len);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -429,6 +429,10 @@ impl ChunkStore {
|
||||
full_path
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
pub fn base_path(&self) -> PathBuf {
|
||||
self.base.clone()
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ impl DataBlob {
|
||||
}
|
||||
|
||||
/// Decode blob data
|
||||
pub fn decode(self, config: Option<&CryptConfig>) -> Result<Vec<u8>, Error> {
|
||||
pub fn decode(&self, config: Option<&CryptConfig>) -> Result<Vec<u8>, Error> {
|
||||
|
||||
let magic = self.magic();
|
||||
|
||||
@ -311,7 +311,9 @@ impl DataBlob {
|
||||
/// Verify digest and data length for unencrypted chunks.
|
||||
///
|
||||
/// To do that, we need to decompress data first. Please note that
|
||||
/// this is not possible for encrypted chunks.
|
||||
/// this is not possible for encrypted chunks. This function simply return Ok
|
||||
/// for encrypted chunks.
|
||||
/// Note: This does not call verify_crc
|
||||
pub fn verify_unencrypted(
|
||||
&self,
|
||||
expected_chunk_size: usize,
|
||||
@ -320,23 +322,19 @@ impl DataBlob {
|
||||
|
||||
let magic = self.magic();
|
||||
|
||||
let verify_raw_data = |data: &[u8]| {
|
||||
if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let data = self.decode(None)?;
|
||||
|
||||
if expected_chunk_size != data.len() {
|
||||
bail!("detected chunk with wrong length ({} != {})", expected_chunk_size, data.len());
|
||||
}
|
||||
let digest = openssl::sha::sha256(data);
|
||||
let digest = openssl::sha::sha256(&data);
|
||||
if &digest != expected_digest {
|
||||
bail!("detected chunk with wrong digest.");
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
|
||||
if magic == &COMPRESSED_BLOB_MAGIC_1_0 {
|
||||
let data = zstd::block::decompress(&self.raw_data[12..], 16*1024*1024)?;
|
||||
verify_raw_data(&data)?;
|
||||
} else if magic == &UNCOMPRESSED_BLOB_MAGIC_1_0 {
|
||||
verify_raw_data(&self.raw_data[12..])?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -19,6 +19,10 @@ pub struct DataBlobReader<R: Read> {
|
||||
state: BlobReaderState<R>,
|
||||
}
|
||||
|
||||
// zstd_safe::DCtx is not sync but we are, since
|
||||
// the only public interface is on mutable reference
|
||||
unsafe impl<R: Read> Sync for DataBlobReader<R> {}
|
||||
|
||||
impl <R: Read> DataBlobReader<R> {
|
||||
|
||||
pub fn new(mut reader: R, config: Option<Arc<CryptConfig>>) -> Result<Self, Error> {
|
||||
|
@ -2,6 +2,7 @@ use std::collections::{HashSet, HashMap};
|
||||
use std::io::{self, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
@ -134,6 +135,10 @@ impl DataStore {
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &str {
|
||||
self.chunk_store.name()
|
||||
}
|
||||
|
||||
pub fn base_path(&self) -> PathBuf {
|
||||
self.chunk_store.base_path()
|
||||
}
|
||||
@ -470,4 +475,28 @@ impl DataStore {
|
||||
) -> Result<(bool, u64), Error> {
|
||||
self.chunk_store.insert_chunk(chunk, digest)
|
||||
}
|
||||
|
||||
pub fn verify_stored_chunk(&self, digest: &[u8; 32], expected_chunk_size: u64) -> Result<(), Error> {
|
||||
let blob = self.chunk_store.read_chunk(digest)?;
|
||||
blob.verify_crc()?;
|
||||
blob.verify_unencrypted(expected_chunk_size as usize, digest)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<(DataBlob, u64), Error> {
|
||||
let mut path = self.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(filename);
|
||||
|
||||
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
||||
let raw_size = raw_data.len() as u64;
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
Ok((blob, raw_size))
|
||||
}
|
||||
|
||||
pub fn load_manifest(&self, backup_dir: &BackupDir) -> Result<(BackupManifest, u64), Error> {
|
||||
let (blob, raw_size) = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
||||
let manifest = BackupManifest::try_from(blob)?;
|
||||
Ok((manifest, raw_size))
|
||||
}
|
||||
}
|
||||
|
@ -1,23 +1,28 @@
|
||||
use std::convert::TryInto;
|
||||
use std::fs::File;
|
||||
use std::io::{BufWriter, Seek, SeekFrom, Write};
|
||||
use std::io::{self, BufWriter, Seek, SeekFrom, Write};
|
||||
use std::ops::Range;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::task::Context;
|
||||
use std::pin::Pin;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use proxmox::tools::io::ReadExt;
|
||||
use proxmox::tools::uuid::Uuid;
|
||||
use proxmox::tools::vec;
|
||||
use proxmox::tools::mmap::Mmap;
|
||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||
|
||||
use super::chunk_stat::ChunkStat;
|
||||
use super::chunk_store::ChunkStore;
|
||||
use super::index::ChunkReadInfo;
|
||||
use super::read_chunk::ReadChunk;
|
||||
use super::Chunker;
|
||||
use super::IndexFile;
|
||||
use super::{DataBlob, DataChunkBuilder};
|
||||
use crate::tools;
|
||||
use crate::tools::{self, epoch_now_u64};
|
||||
|
||||
/// Header format definition for dynamic index files (`.dixd`)
|
||||
#[repr(C)]
|
||||
@ -36,34 +41,34 @@ proxmox::static_assert_size!(DynamicIndexHeader, 4096);
|
||||
// pub data: DynamicIndexHeaderData,
|
||||
// }
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[repr(C)]
|
||||
pub struct DynamicEntry {
|
||||
end_le: u64,
|
||||
digest: [u8; 32],
|
||||
}
|
||||
|
||||
impl DynamicEntry {
|
||||
#[inline]
|
||||
pub fn end(&self) -> u64 {
|
||||
u64::from_le(self.end_le)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DynamicIndexReader {
|
||||
_file: File,
|
||||
pub size: usize,
|
||||
index: *const u8,
|
||||
index_entries: usize,
|
||||
index: Mmap<DynamicEntry>,
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub index_csum: [u8; 32],
|
||||
}
|
||||
|
||||
// `index` is mmap()ed which cannot be thread-local so should be sendable
|
||||
// FIXME: Introduce an mmap wrapper type for this?
|
||||
unsafe impl Send for DynamicIndexReader {}
|
||||
unsafe impl Sync for DynamicIndexReader {}
|
||||
|
||||
impl Drop for DynamicIndexReader {
|
||||
fn drop(&mut self) {
|
||||
if let Err(err) = self.unmap() {
|
||||
eprintln!("Unable to unmap dynamic index - {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DynamicIndexReader {
|
||||
pub fn open(path: &Path) -> Result<Self, Error> {
|
||||
File::open(path)
|
||||
.map_err(Error::from)
|
||||
.and_then(|file| Self::new(file))
|
||||
.and_then(Self::new)
|
||||
.map_err(|err| format_err!("Unable to open dynamic index {:?} - {}", path, err))
|
||||
}
|
||||
|
||||
@ -74,6 +79,7 @@ impl DynamicIndexReader {
|
||||
bail!("unable to get shared lock - {}", err);
|
||||
}
|
||||
|
||||
// FIXME: This is NOT OUR job! Check the callers of this method and remove this!
|
||||
file.seek(SeekFrom::Start(0))?;
|
||||
|
||||
let header_size = std::mem::size_of::<DynamicIndexHeader>();
|
||||
@ -93,123 +99,49 @@ impl DynamicIndexReader {
|
||||
let size = stat.st_size as usize;
|
||||
|
||||
let index_size = size - header_size;
|
||||
if (index_size % 40) != 0 {
|
||||
let index_count = index_size / 40;
|
||||
if index_count * 40 != index_size {
|
||||
bail!("got unexpected file size");
|
||||
}
|
||||
|
||||
let data = unsafe {
|
||||
nix::sys::mman::mmap(
|
||||
std::ptr::null_mut(),
|
||||
index_size,
|
||||
let index = unsafe {
|
||||
Mmap::map_fd(
|
||||
rawfd,
|
||||
header_size as u64,
|
||||
index_count,
|
||||
nix::sys::mman::ProtFlags::PROT_READ,
|
||||
nix::sys::mman::MapFlags::MAP_PRIVATE,
|
||||
rawfd,
|
||||
header_size as i64,
|
||||
)
|
||||
}? as *const u8;
|
||||
)?
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
_file: file,
|
||||
size,
|
||||
index: data,
|
||||
index_entries: index_size / 40,
|
||||
index,
|
||||
ctime,
|
||||
uuid: header.uuid,
|
||||
index_csum: header.index_csum,
|
||||
})
|
||||
}
|
||||
|
||||
fn unmap(&mut self) -> Result<(), Error> {
|
||||
if self.index == std::ptr::null_mut() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Err(err) = unsafe {
|
||||
nix::sys::mman::munmap(self.index as *mut std::ffi::c_void, self.index_entries * 40)
|
||||
} {
|
||||
bail!("unmap dynamic index failed - {}", err);
|
||||
}
|
||||
|
||||
self.index = std::ptr::null_mut();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
pub fn chunk_info(&self, pos: usize) -> Result<(u64, u64, [u8; 32]), Error> {
|
||||
if pos >= self.index_entries {
|
||||
bail!("chunk index out of range");
|
||||
}
|
||||
let start = if pos == 0 {
|
||||
0
|
||||
} else {
|
||||
unsafe { *(self.index.add((pos - 1) * 40) as *const u64) }
|
||||
};
|
||||
|
||||
let end = unsafe { *(self.index.add(pos * 40) as *const u64) };
|
||||
|
||||
let mut digest = std::mem::MaybeUninit::<[u8; 32]>::uninit();
|
||||
unsafe {
|
||||
std::ptr::copy_nonoverlapping(
|
||||
self.index.add(pos * 40 + 8),
|
||||
(*digest.as_mut_ptr()).as_mut_ptr(),
|
||||
32,
|
||||
);
|
||||
}
|
||||
|
||||
Ok((start, end, unsafe { digest.assume_init() }))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
fn chunk_end(&self, pos: usize) -> u64 {
|
||||
if pos >= self.index_entries {
|
||||
if pos >= self.index.len() {
|
||||
panic!("chunk index out of range");
|
||||
}
|
||||
unsafe { *(self.index.add(pos * 40) as *const u64) }
|
||||
self.index[pos].end()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn chunk_digest(&self, pos: usize) -> &[u8; 32] {
|
||||
if pos >= self.index_entries {
|
||||
if pos >= self.index.len() {
|
||||
panic!("chunk index out of range");
|
||||
}
|
||||
let slice = unsafe { std::slice::from_raw_parts(self.index.add(pos * 40 + 8), 32) };
|
||||
slice.try_into().unwrap()
|
||||
&self.index[pos].digest
|
||||
}
|
||||
|
||||
/// Compute checksum and data size
|
||||
pub fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||
let mut csum = openssl::sha::Sha256::new();
|
||||
let mut chunk_end = 0;
|
||||
for pos in 0..self.index_entries {
|
||||
chunk_end = self.chunk_end(pos);
|
||||
let digest = self.chunk_digest(pos);
|
||||
csum.update(&chunk_end.to_le_bytes());
|
||||
csum.update(digest);
|
||||
}
|
||||
let csum = csum.finish();
|
||||
|
||||
(csum, chunk_end)
|
||||
}
|
||||
|
||||
/*
|
||||
pub fn dump_pxar(&self, mut writer: Box<dyn Write>) -> Result<(), Error> {
|
||||
|
||||
for pos in 0..self.index_entries {
|
||||
let _end = self.chunk_end(pos);
|
||||
let digest = self.chunk_digest(pos);
|
||||
//println!("Dump {:08x}", end );
|
||||
let chunk = self.store.read_chunk(digest)?;
|
||||
// fimxe: handle encrypted chunks
|
||||
let data = chunk.decode(None)?;
|
||||
writer.write_all(&data)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
*/
|
||||
|
||||
// TODO: can we use std::slice::binary_search with Mmap now?
|
||||
fn binary_search(
|
||||
&self,
|
||||
start_idx: usize,
|
||||
@ -238,11 +170,11 @@ impl DynamicIndexReader {
|
||||
|
||||
impl IndexFile for DynamicIndexReader {
|
||||
fn index_count(&self) -> usize {
|
||||
self.index_entries
|
||||
self.index.len()
|
||||
}
|
||||
|
||||
fn index_digest(&self, pos: usize) -> Option<&[u8; 32]> {
|
||||
if pos >= self.index_entries {
|
||||
if pos >= self.index.len() {
|
||||
None
|
||||
} else {
|
||||
Some(unsafe { std::mem::transmute(self.chunk_digest(pos).as_ptr()) })
|
||||
@ -250,12 +182,59 @@ impl IndexFile for DynamicIndexReader {
|
||||
}
|
||||
|
||||
fn index_bytes(&self) -> u64 {
|
||||
if self.index_entries == 0 {
|
||||
if self.index.is_empty() {
|
||||
0
|
||||
} else {
|
||||
self.chunk_end((self.index_entries - 1) as usize)
|
||||
self.chunk_end(self.index.len() - 1)
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||
let mut csum = openssl::sha::Sha256::new();
|
||||
let mut chunk_end = 0;
|
||||
for pos in 0..self.index_count() {
|
||||
let info = self.chunk_info(pos).unwrap();
|
||||
chunk_end = info.range.end;
|
||||
csum.update(&chunk_end.to_le_bytes());
|
||||
csum.update(&info.digest);
|
||||
}
|
||||
let csum = csum.finish();
|
||||
(csum, chunk_end)
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo> {
|
||||
if pos >= self.index.len() {
|
||||
return None;
|
||||
}
|
||||
let start = if pos == 0 { 0 } else { self.index[pos - 1].end() };
|
||||
|
||||
let end = self.index[pos].end();
|
||||
|
||||
Some(ChunkReadInfo {
|
||||
range: start..end,
|
||||
digest: self.index[pos].digest.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct CachedChunk {
|
||||
range: Range<u64>,
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
impl CachedChunk {
|
||||
/// Perform sanity checks on the range and data size:
|
||||
pub fn new(range: Range<u64>, data: Vec<u8>) -> Result<Self, Error> {
|
||||
if data.len() as u64 != range.end - range.start {
|
||||
bail!(
|
||||
"read chunk with wrong size ({} != {})",
|
||||
data.len(),
|
||||
range.end - range.start,
|
||||
);
|
||||
}
|
||||
Ok(Self { range, data })
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BufferedDynamicReader<S> {
|
||||
@ -266,7 +245,7 @@ pub struct BufferedDynamicReader<S> {
|
||||
buffered_chunk_idx: usize,
|
||||
buffered_chunk_start: u64,
|
||||
read_offset: u64,
|
||||
lru_cache: crate::tools::lru_cache::LruCache<usize, (u64, u64, Vec<u8>)>,
|
||||
lru_cache: crate::tools::lru_cache::LruCache<usize, CachedChunk>,
|
||||
}
|
||||
|
||||
struct ChunkCacher<'a, S> {
|
||||
@ -274,16 +253,21 @@ struct ChunkCacher<'a, S> {
|
||||
index: &'a DynamicIndexReader,
|
||||
}
|
||||
|
||||
impl<'a, S: ReadChunk> crate::tools::lru_cache::Cacher<usize, (u64, u64, Vec<u8>)> for ChunkCacher<'a, S> {
|
||||
fn fetch(&mut self, index: usize) -> Result<Option<(u64, u64, Vec<u8>)>, anyhow::Error> {
|
||||
let (start, end, digest) = self.index.chunk_info(index)?;
|
||||
self.store.read_chunk(&digest).and_then(|data| Ok(Some((start, end, data))))
|
||||
impl<'a, S: ReadChunk> crate::tools::lru_cache::Cacher<usize, CachedChunk> for ChunkCacher<'a, S> {
|
||||
fn fetch(&mut self, index: usize) -> Result<Option<CachedChunk>, Error> {
|
||||
let info = match self.index.chunk_info(index) {
|
||||
Some(info) => info,
|
||||
None => bail!("chunk index out of range"),
|
||||
};
|
||||
let range = info.range;
|
||||
let data = self.store.read_chunk(&info.digest)?;
|
||||
CachedChunk::new(range, data).map(Some)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||
pub fn new(index: DynamicIndexReader, store: S) -> Self {
|
||||
let archive_size = index.chunk_end(index.index_entries - 1);
|
||||
let archive_size = index.index_bytes();
|
||||
Self {
|
||||
store,
|
||||
index,
|
||||
@ -301,7 +285,8 @@ impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||
}
|
||||
|
||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
||||
let (start, end, data) = self.lru_cache.access(
|
||||
//let (start, end, data) = self.lru_cache.access(
|
||||
let cached_chunk = self.lru_cache.access(
|
||||
idx,
|
||||
&mut ChunkCacher {
|
||||
store: &mut self.store,
|
||||
@ -309,21 +294,13 @@ impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||
},
|
||||
)?.ok_or_else(|| format_err!("chunk not found by cacher"))?;
|
||||
|
||||
if (*end - *start) != data.len() as u64 {
|
||||
bail!(
|
||||
"read chunk with wrong size ({} != {}",
|
||||
(*end - *start),
|
||||
data.len()
|
||||
);
|
||||
}
|
||||
|
||||
// fixme: avoid copy
|
||||
self.read_buffer.clear();
|
||||
self.read_buffer.extend_from_slice(&data);
|
||||
self.read_buffer.extend_from_slice(&cached_chunk.data);
|
||||
|
||||
self.buffered_chunk_idx = idx;
|
||||
|
||||
self.buffered_chunk_start = *start;
|
||||
self.buffered_chunk_start = cached_chunk.range.start;
|
||||
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
||||
Ok(())
|
||||
}
|
||||
@ -340,7 +317,7 @@ impl<S: ReadChunk> crate::tools::BufferedRead for BufferedDynamicReader<S> {
|
||||
|
||||
// optimization for sequential read
|
||||
if buffer_len > 0
|
||||
&& ((self.buffered_chunk_idx + 1) < index.index_entries)
|
||||
&& ((self.buffered_chunk_idx + 1) < index.index.len())
|
||||
&& (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||
{
|
||||
let next_idx = self.buffered_chunk_idx + 1;
|
||||
@ -356,7 +333,7 @@ impl<S: ReadChunk> crate::tools::BufferedRead for BufferedDynamicReader<S> {
|
||||
|| (offset < self.buffered_chunk_start)
|
||||
|| (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||
{
|
||||
let end_idx = index.index_entries - 1;
|
||||
let end_idx = index.index.len() - 1;
|
||||
let end = index.chunk_end(end_idx);
|
||||
let idx = index.binary_search(0, 0, end_idx, end, offset)?;
|
||||
self.buffer_chunk(idx)?;
|
||||
@ -383,9 +360,7 @@ impl<S: ReadChunk> std::io::Read for BufferedDynamicReader<S> {
|
||||
data.len()
|
||||
};
|
||||
|
||||
unsafe {
|
||||
std::ptr::copy_nonoverlapping(data.as_ptr(), buf.as_mut_ptr(), n);
|
||||
}
|
||||
buf[0..n].copy_from_slice(&data[0..n]);
|
||||
|
||||
self.read_offset += n as u64;
|
||||
|
||||
@ -417,6 +392,49 @@ impl<S: ReadChunk> std::io::Seek for BufferedDynamicReader<S> {
|
||||
}
|
||||
}
|
||||
|
||||
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
||||
/// async use!
|
||||
///
|
||||
/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
|
||||
/// so that we can properly access it from multiple threads simultaneously while not issuing
|
||||
/// duplicate simultaneous reads over http.
|
||||
#[derive(Clone)]
|
||||
pub struct LocalDynamicReadAt<R: ReadChunk> {
|
||||
inner: Arc<Mutex<BufferedDynamicReader<R>>>,
|
||||
}
|
||||
|
||||
impl<R: ReadChunk> LocalDynamicReadAt<R> {
|
||||
pub fn new(inner: BufferedDynamicReader<R>) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(inner)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: ReadChunk> ReadAt for LocalDynamicReadAt<R> {
|
||||
fn start_read_at<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
_cx: &mut Context,
|
||||
buf: &'a mut [u8],
|
||||
offset: u64,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
use std::io::Read;
|
||||
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
||||
let mut reader = self.inner.lock().unwrap();
|
||||
reader.seek(SeekFrom::Start(offset))?;
|
||||
Ok(reader.read(buf)?)
|
||||
}))
|
||||
}
|
||||
|
||||
fn poll_complete<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
_op: ReadAtOperation<'a>,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
panic!("LocalDynamicReadAt::start_read_at returned Pending");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Create dynamic index files (`.dixd`)
|
||||
pub struct DynamicIndexWriter {
|
||||
store: Arc<ChunkStore>,
|
||||
@ -460,9 +478,7 @@ impl DynamicIndexWriter {
|
||||
panic!("got unexpected header size");
|
||||
}
|
||||
|
||||
let ctime = std::time::SystemTime::now()
|
||||
.duration_since(std::time::SystemTime::UNIX_EPOCH)?
|
||||
.as_secs();
|
||||
let ctime = epoch_now_u64()?;
|
||||
|
||||
let uuid = Uuid::generate();
|
||||
|
||||
|
@ -1,11 +1,10 @@
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use std::convert::TryInto;
|
||||
use std::io::{Seek, SeekFrom};
|
||||
|
||||
use super::chunk_stat::*;
|
||||
use super::chunk_store::*;
|
||||
use super::IndexFile;
|
||||
use crate::tools;
|
||||
use super::{IndexFile, ChunkReadInfo};
|
||||
use crate::tools::{self, epoch_now_u64};
|
||||
|
||||
use chrono::{Local, TimeZone};
|
||||
use std::fs::File;
|
||||
@ -147,38 +146,6 @@ impl FixedIndexReader {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn chunk_info(&self, pos: usize) -> Result<(u64, u64, [u8; 32]), Error> {
|
||||
if pos >= self.index_length {
|
||||
bail!("chunk index out of range");
|
||||
}
|
||||
let start = (pos * self.chunk_size) as u64;
|
||||
let mut end = start + self.chunk_size as u64;
|
||||
|
||||
if end > self.size {
|
||||
end = self.size;
|
||||
}
|
||||
|
||||
let mut digest = std::mem::MaybeUninit::<[u8; 32]>::uninit();
|
||||
unsafe {
|
||||
std::ptr::copy_nonoverlapping(
|
||||
self.index.add(pos * 32),
|
||||
(*digest.as_mut_ptr()).as_mut_ptr(),
|
||||
32,
|
||||
);
|
||||
}
|
||||
|
||||
Ok((start, end, unsafe { digest.assume_init() }))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn chunk_digest(&self, pos: usize) -> &[u8; 32] {
|
||||
if pos >= self.index_length {
|
||||
panic!("chunk index out of range");
|
||||
}
|
||||
let slice = unsafe { std::slice::from_raw_parts(self.index.add(pos * 32), 32) };
|
||||
slice.try_into().unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn chunk_end(&self, pos: usize) -> u64 {
|
||||
if pos >= self.index_length {
|
||||
@ -193,20 +160,6 @@ impl FixedIndexReader {
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute checksum and data size
|
||||
pub fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||
let mut csum = openssl::sha::Sha256::new();
|
||||
let mut chunk_end = 0;
|
||||
for pos in 0..self.index_length {
|
||||
chunk_end = self.chunk_end(pos);
|
||||
let digest = self.chunk_digest(pos);
|
||||
csum.update(digest);
|
||||
}
|
||||
let csum = csum.finish();
|
||||
|
||||
(csum, chunk_end)
|
||||
}
|
||||
|
||||
pub fn print_info(&self) {
|
||||
println!("Size: {}", self.size);
|
||||
println!("ChunkSize: {}", self.chunk_size);
|
||||
@ -234,6 +187,38 @@ impl IndexFile for FixedIndexReader {
|
||||
fn index_bytes(&self) -> u64 {
|
||||
self.size
|
||||
}
|
||||
|
||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo> {
|
||||
if pos >= self.index_length {
|
||||
return None;
|
||||
}
|
||||
|
||||
let start = (pos * self.chunk_size) as u64;
|
||||
let mut end = start + self.chunk_size as u64;
|
||||
|
||||
if end > self.size {
|
||||
end = self.size;
|
||||
}
|
||||
|
||||
let digest = self.index_digest(pos).unwrap();
|
||||
Some(ChunkReadInfo {
|
||||
range: start..end,
|
||||
digest: *digest,
|
||||
})
|
||||
}
|
||||
|
||||
fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||
let mut csum = openssl::sha::Sha256::new();
|
||||
let mut chunk_end = 0;
|
||||
for pos in 0..self.index_count() {
|
||||
let info = self.chunk_info(pos).unwrap();
|
||||
chunk_end = info.range.end;
|
||||
csum.update(&info.digest);
|
||||
}
|
||||
let csum = csum.finish();
|
||||
|
||||
(csum, chunk_end)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FixedIndexWriter {
|
||||
@ -290,9 +275,7 @@ impl FixedIndexWriter {
|
||||
panic!("got unexpected header size");
|
||||
}
|
||||
|
||||
let ctime = std::time::SystemTime::now()
|
||||
.duration_since(std::time::SystemTime::UNIX_EPOCH)?
|
||||
.as_secs();
|
||||
let ctime = epoch_now_u64()?;
|
||||
|
||||
let uuid = Uuid::generate();
|
||||
|
||||
@ -469,6 +452,18 @@ impl FixedIndexWriter {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn clone_data_from(&mut self, reader: &FixedIndexReader) -> Result<(), Error> {
|
||||
if self.index_length != reader.index_count() {
|
||||
bail!("clone_data_from failed - index sizes not equal");
|
||||
}
|
||||
|
||||
for i in 0..self.index_length {
|
||||
self.add_digest(i, reader.index_digest(i).unwrap())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BufferedFixedReader<S> {
|
||||
@ -501,18 +496,17 @@ impl<S: ReadChunk> BufferedFixedReader<S> {
|
||||
|
||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
||||
let index = &self.index;
|
||||
let (start, end, digest) = index.chunk_info(idx)?;
|
||||
let info = match index.chunk_info(idx) {
|
||||
Some(info) => info,
|
||||
None => bail!("chunk index out of range"),
|
||||
};
|
||||
|
||||
// fixme: avoid copy
|
||||
|
||||
let data = self.store.read_chunk(&digest)?;
|
||||
|
||||
if (end - start) != data.len() as u64 {
|
||||
bail!(
|
||||
"read chunk with wrong size ({} != {}",
|
||||
(end - start),
|
||||
data.len()
|
||||
);
|
||||
let data = self.store.read_chunk(&info.digest)?;
|
||||
let size = info.range.end - info.range.start;
|
||||
if size != data.len() as u64 {
|
||||
bail!("read chunk with wrong size ({} != {}", size, data.len());
|
||||
}
|
||||
|
||||
self.read_buffer.clear();
|
||||
@ -520,8 +514,7 @@ impl<S: ReadChunk> BufferedFixedReader<S> {
|
||||
|
||||
self.buffered_chunk_idx = idx;
|
||||
|
||||
self.buffered_chunk_start = start as u64;
|
||||
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
||||
self.buffered_chunk_start = info.range.start as u64;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -1,10 +1,17 @@
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::ops::Range;
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::*;
|
||||
pub struct ChunkReadInfo {
|
||||
pub range: Range<u64>,
|
||||
pub digest: [u8; 32],
|
||||
}
|
||||
|
||||
impl ChunkReadInfo {
|
||||
#[inline]
|
||||
pub fn size(&self) -> u64 {
|
||||
self.range.end - self.range.start
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait to get digest list from index files
|
||||
///
|
||||
@ -13,6 +20,10 @@ pub trait IndexFile {
|
||||
fn index_count(&self) -> usize;
|
||||
fn index_digest(&self, pos: usize) -> Option<&[u8; 32]>;
|
||||
fn index_bytes(&self) -> u64;
|
||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo>;
|
||||
|
||||
/// Compute index checksum and size
|
||||
fn compute_csum(&self) -> ([u8; 32], u64);
|
||||
|
||||
/// Returns most often used chunks
|
||||
fn find_most_used_chunks(&self, max: usize) -> HashMap<[u8; 32], usize> {
|
||||
@ -46,111 +57,3 @@ pub trait IndexFile {
|
||||
map
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode digest list from an `IndexFile` into a binary stream
|
||||
///
|
||||
/// The reader simply returns a birary stream of 32 byte digest values.
|
||||
pub struct DigestListEncoder {
|
||||
index: Box<dyn IndexFile + Send + Sync>,
|
||||
pos: usize,
|
||||
count: usize,
|
||||
}
|
||||
|
||||
impl DigestListEncoder {
|
||||
|
||||
pub fn new(index: Box<dyn IndexFile + Send + Sync>) -> Self {
|
||||
let count = index.index_count();
|
||||
Self { index, pos: 0, count }
|
||||
}
|
||||
}
|
||||
|
||||
impl std::io::Read for DigestListEncoder {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
||||
if buf.len() < 32 {
|
||||
panic!("read buffer too small");
|
||||
}
|
||||
|
||||
if self.pos < self.count {
|
||||
let mut written = 0;
|
||||
loop {
|
||||
let digest = self.index.index_digest(self.pos).unwrap();
|
||||
buf[written..(written + 32)].copy_from_slice(digest);
|
||||
self.pos += 1;
|
||||
written += 32;
|
||||
if self.pos >= self.count {
|
||||
break;
|
||||
}
|
||||
if (written + 32) >= buf.len() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(written)
|
||||
} else {
|
||||
Ok(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Decodes a Stream<Item=Bytes> into Stream<Item=<[u8;32]>
|
||||
///
|
||||
/// The reader simply returns a birary stream of 32 byte digest values.
|
||||
|
||||
pub struct DigestListDecoder<S: Unpin> {
|
||||
input: S,
|
||||
buffer: BytesMut,
|
||||
}
|
||||
|
||||
impl<S: Unpin> DigestListDecoder<S> {
|
||||
pub fn new(input: S) -> Self {
|
||||
Self { input, buffer: BytesMut::new() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Unpin> Unpin for DigestListDecoder<S> {}
|
||||
|
||||
impl<S: Unpin, E> Stream for DigestListDecoder<S>
|
||||
where
|
||||
S: Stream<Item=Result<Bytes, E>>,
|
||||
E: Into<Error>,
|
||||
{
|
||||
type Item = Result<[u8; 32], Error>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
let this = self.get_mut();
|
||||
|
||||
loop {
|
||||
if this.buffer.len() >= 32 {
|
||||
let left = this.buffer.split_to(32);
|
||||
|
||||
let mut digest = std::mem::MaybeUninit::<[u8; 32]>::uninit();
|
||||
unsafe {
|
||||
(*digest.as_mut_ptr()).copy_from_slice(&left[..]);
|
||||
return Poll::Ready(Some(Ok(digest.assume_init())));
|
||||
}
|
||||
}
|
||||
|
||||
match Pin::new(&mut this.input).poll_next(cx) {
|
||||
Poll::Pending => {
|
||||
return Poll::Pending;
|
||||
}
|
||||
Poll::Ready(Some(Err(err))) => {
|
||||
return Poll::Ready(Some(Err(err.into())));
|
||||
}
|
||||
Poll::Ready(Some(Ok(data))) => {
|
||||
this.buffer.extend_from_slice(&data);
|
||||
// continue
|
||||
}
|
||||
Poll::Ready(None) => {
|
||||
let rest = this.buffer.len();
|
||||
if rest == 0 {
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
return Poll::Ready(Some(Err(format_err!(
|
||||
"got small digest ({} != 32).",
|
||||
rest,
|
||||
))));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
||||
|
||||
pub struct FileInfo {
|
||||
pub filename: String,
|
||||
pub encrypted: Option<bool>,
|
||||
pub size: u64,
|
||||
pub csum: [u8; 32],
|
||||
}
|
||||
@ -48,9 +49,9 @@ impl BackupManifest {
|
||||
Self { files: Vec::new(), snapshot }
|
||||
}
|
||||
|
||||
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32]) -> Result<(), Error> {
|
||||
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32], encrypted: Option<bool>) -> Result<(), Error> {
|
||||
let _archive_type = archive_type(&filename)?; // check type
|
||||
self.files.push(FileInfo { filename, size, csum });
|
||||
self.files.push(FileInfo { filename, size, csum, encrypted });
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -90,11 +91,18 @@ impl BackupManifest {
|
||||
"backup-time": self.snapshot.backup_time().timestamp(),
|
||||
"files": self.files.iter()
|
||||
.fold(Vec::new(), |mut acc, info| {
|
||||
acc.push(json!({
|
||||
let mut value = json!({
|
||||
"filename": info.filename,
|
||||
"encrypted": info.encrypted,
|
||||
"size": info.size,
|
||||
"csum": proxmox::tools::digest_to_hex(&info.csum),
|
||||
}));
|
||||
});
|
||||
|
||||
if let Some(encrypted) = info.encrypted {
|
||||
value["encrypted"] = encrypted.into();
|
||||
}
|
||||
|
||||
acc.push(value);
|
||||
acc
|
||||
})
|
||||
})
|
||||
@ -134,7 +142,8 @@ impl TryFrom<Value> for BackupManifest {
|
||||
let csum = required_string_property(item, "csum")?;
|
||||
let csum = proxmox::tools::hex_to_digest(csum)?;
|
||||
let size = required_integer_property(item, "size")? as u64;
|
||||
manifest.add_file(filename, size, csum)?;
|
||||
let encrypted = item["encrypted"].as_bool();
|
||||
manifest.add_file(filename, size, csum, encrypted)?;
|
||||
}
|
||||
|
||||
if manifest.files().is_empty() {
|
||||
|
@ -1,38 +1,39 @@
|
||||
use anyhow::{Error};
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::datastore::*;
|
||||
use super::crypt_config::*;
|
||||
use super::data_blob::*;
|
||||
use anyhow::Error;
|
||||
|
||||
use super::crypt_config::CryptConfig;
|
||||
use super::data_blob::DataBlob;
|
||||
use super::datastore::DataStore;
|
||||
|
||||
/// The ReadChunk trait allows reading backup data chunks (local or remote)
|
||||
pub trait ReadChunk {
|
||||
/// Returns the encoded chunk data
|
||||
fn read_raw_chunk(&mut self, digest:&[u8; 32]) -> Result<DataBlob, Error>;
|
||||
fn read_raw_chunk(&mut self, digest: &[u8; 32]) -> Result<DataBlob, Error>;
|
||||
|
||||
/// Returns the decoded chunk data
|
||||
fn read_chunk(&mut self, digest:&[u8; 32]) -> Result<Vec<u8>, Error>;
|
||||
fn read_chunk(&mut self, digest: &[u8; 32]) -> Result<Vec<u8>, Error>;
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LocalChunkReader {
|
||||
store: Arc<DataStore>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
}
|
||||
|
||||
impl LocalChunkReader {
|
||||
|
||||
pub fn new(store: Arc<DataStore>, crypt_config: Option<Arc<CryptConfig>>) -> Self {
|
||||
Self { store, crypt_config }
|
||||
Self {
|
||||
store,
|
||||
crypt_config,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadChunk for LocalChunkReader {
|
||||
|
||||
fn read_raw_chunk(&mut self, digest:&[u8; 32]) -> Result<DataBlob, Error> {
|
||||
|
||||
let digest_str = proxmox::tools::digest_to_hex(digest);
|
||||
println!("READ CHUNK {}", digest_str);
|
||||
|
||||
fn read_raw_chunk(&mut self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
let (path, _) = self.store.chunk_path(digest);
|
||||
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
||||
let chunk = DataBlob::from_raw(raw_data)?;
|
||||
@ -41,8 +42,8 @@ impl ReadChunk for LocalChunkReader {
|
||||
Ok(chunk)
|
||||
}
|
||||
|
||||
fn read_chunk(&mut self, digest:&[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||
let chunk = self.read_raw_chunk(digest)?;
|
||||
fn read_chunk(&mut self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
@ -51,3 +52,49 @@ impl ReadChunk for LocalChunkReader {
|
||||
Ok(raw_data)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait AsyncReadChunk: Send {
|
||||
/// Returns the encoded chunk data
|
||||
fn read_raw_chunk<'a>(
|
||||
&'a mut self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>>;
|
||||
|
||||
/// Returns the decoded chunk data
|
||||
fn read_chunk<'a>(
|
||||
&'a mut self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>>;
|
||||
}
|
||||
|
||||
impl AsyncReadChunk for LocalChunkReader {
|
||||
fn read_raw_chunk<'a>(
|
||||
&'a mut self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
||||
Box::pin(async move{
|
||||
let (path, _) = self.store.chunk_path(digest);
|
||||
|
||||
let raw_data = tokio::fs::read(&path).await?;
|
||||
let chunk = DataBlob::from_raw(raw_data)?;
|
||||
chunk.verify_crc()?;
|
||||
|
||||
Ok(chunk)
|
||||
})
|
||||
}
|
||||
|
||||
fn read_chunk<'a>(
|
||||
&'a mut self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
|
||||
Box::pin(async move {
|
||||
let chunk = AsyncReadChunk::read_raw_chunk(self, digest).await?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
|
||||
Ok(raw_data)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
196
src/backup/verify.rs
Normal file
196
src/backup/verify.rs
Normal file
@ -0,0 +1,196 @@
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use super::{
|
||||
DataStore, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
||||
ENCR_COMPR_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0,
|
||||
FileInfo, ArchiveType, archive_type,
|
||||
};
|
||||
|
||||
fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||
|
||||
let (blob, raw_size) = datastore.load_blob(backup_dir, &info.filename)?;
|
||||
|
||||
let csum = openssl::sha::sha256(blob.raw_data());
|
||||
if raw_size != info.size {
|
||||
bail!("wrong size ({} != {})", info.size, raw_size);
|
||||
}
|
||||
|
||||
if csum != info.csum {
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
blob.verify_crc()?;
|
||||
|
||||
let magic = blob.magic();
|
||||
|
||||
if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
blob.decode(None)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verify_index_chunks(
|
||||
datastore: &DataStore,
|
||||
index: Box<dyn IndexFile>,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let size = info.range.end - info.range.start;
|
||||
datastore.verify_stored_chunk(&info.digest, size)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verify_fixed_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
||||
|
||||
let mut path = backup_dir.relative_path();
|
||||
path.push(&info.filename);
|
||||
|
||||
let index = datastore.open_fixed_reader(&path)?;
|
||||
|
||||
let (csum, size) = index.compute_csum();
|
||||
if size != info.size {
|
||||
bail!("wrong size ({} != {})", info.size, size);
|
||||
}
|
||||
|
||||
if csum != info.csum {
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
verify_index_chunks(datastore, Box::new(index), worker)
|
||||
}
|
||||
|
||||
fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
||||
let mut path = backup_dir.relative_path();
|
||||
path.push(&info.filename);
|
||||
|
||||
let index = datastore.open_dynamic_reader(&path)?;
|
||||
|
||||
let (csum, size) = index.compute_csum();
|
||||
if size != info.size {
|
||||
bail!("wrong size ({} != {})", info.size, size);
|
||||
}
|
||||
|
||||
if csum != info.csum {
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
verify_index_chunks(datastore, Box::new(index), worker)
|
||||
}
|
||||
|
||||
/// Verify a single backup snapshot
|
||||
///
|
||||
/// This checks all archives inside a backup snapshot.
|
||||
/// Errors are logged to the worker log.
|
||||
///
|
||||
/// Returns
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
|
||||
let manifest = match datastore.load_manifest(&backup_dir) {
|
||||
Ok((manifest, _)) => manifest,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify {}:{}", datastore.name(), backup_dir));
|
||||
|
||||
let mut error_count = 0;
|
||||
|
||||
for info in manifest.files() {
|
||||
let result = proxmox::try_block!({
|
||||
worker.log(format!(" check {}", info.filename));
|
||||
match archive_type(&info.filename)? {
|
||||
ArchiveType::FixedIndex => verify_fixed_index(&datastore, &backup_dir, info, worker),
|
||||
ArchiveType::DynamicIndex => verify_dynamic_index(&datastore, &backup_dir, info, worker),
|
||||
ArchiveType::Blob => verify_blob(&datastore, &backup_dir, info),
|
||||
}
|
||||
});
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
|
||||
if let Err(err) = result {
|
||||
worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err));
|
||||
error_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(error_count == 0)
|
||||
}
|
||||
|
||||
/// Verify all backups inside a backup group
|
||||
///
|
||||
/// Errors are logged to the worker log.
|
||||
///
|
||||
/// Returns
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
|
||||
let mut list = match group.list_backups(&datastore.base_path()) {
|
||||
Ok(list) => list,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify group {}:{}", datastore.name(), group));
|
||||
|
||||
let mut error_count = 0;
|
||||
|
||||
BackupInfo::sort_list(&mut list, false); // newest first
|
||||
for info in list {
|
||||
if !verify_backup_dir(datastore, &info.backup_dir, worker)? {
|
||||
error_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(error_count == 0)
|
||||
}
|
||||
|
||||
/// Verify all backups inside a datastore
|
||||
///
|
||||
/// Errors are logged to the worker log.
|
||||
///
|
||||
/// Returns
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
|
||||
let list = match BackupGroup::list_groups(&datastore.base_path()) {
|
||||
Ok(list) => list,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err));
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify datastore {}", datastore.name()));
|
||||
|
||||
let mut error_count = 0;
|
||||
for group in list {
|
||||
if !verify_backup_group(datastore, &group, worker)? {
|
||||
error_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(error_count == 0)
|
||||
}
|
@ -14,6 +14,8 @@ use proxmox_backup::config;
|
||||
use proxmox_backup::buildcfg;
|
||||
|
||||
fn main() {
|
||||
proxmox_backup::tools::setup_safe_path_env();
|
||||
|
||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||
eprintln!("Error: {}", err);
|
||||
std::process::exit(-1);
|
||||
|
@ -1,13 +1,25 @@
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use nix::unistd::{fork, ForkResult, pipe};
|
||||
use std::os::unix::io::RawFd;
|
||||
use chrono::{Local, DateTime, Utc, TimeZone};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::ffi::OsStr;
|
||||
use std::io::{Write, Seek, SeekFrom};
|
||||
use std::io::{self, Write, Seek, SeekFrom};
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::task::Context;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::{Local, DateTime, Utc, TimeZone};
|
||||
use futures::future::FutureExt;
|
||||
use futures::select;
|
||||
use futures::stream::{StreamExt, TryStreamExt};
|
||||
use nix::unistd::{fork, ForkResult, pipe};
|
||||
use serde_json::{json, Value};
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
use tokio::sync::mpsc;
|
||||
use xdg::BaseDirectories;
|
||||
|
||||
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
|
||||
use proxmox::sys::linux::tty;
|
||||
@ -15,21 +27,38 @@ use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::api::cli::*;
|
||||
use proxmox::api::api;
|
||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||
|
||||
use proxmox_backup::tools;
|
||||
use proxmox_backup::api2::types::*;
|
||||
use proxmox_backup::client::*;
|
||||
use proxmox_backup::backup::*;
|
||||
use proxmox_backup::pxar::{ self, catalog::* };
|
||||
|
||||
use serde_json::{json, Value};
|
||||
//use hyper::Body;
|
||||
use std::sync::{Arc, Mutex};
|
||||
//use regex::Regex;
|
||||
use xdg::BaseDirectories;
|
||||
|
||||
use futures::*;
|
||||
use tokio::sync::mpsc;
|
||||
use proxmox_backup::pxar::catalog::*;
|
||||
use proxmox_backup::backup::{
|
||||
archive_type,
|
||||
encrypt_key_with_passphrase,
|
||||
load_and_decrypt_key,
|
||||
store_key_config,
|
||||
verify_chunk_size,
|
||||
ArchiveType,
|
||||
AsyncReadChunk,
|
||||
BackupDir,
|
||||
BackupGroup,
|
||||
BackupManifest,
|
||||
BufferedDynamicReader,
|
||||
CatalogReader,
|
||||
CatalogWriter,
|
||||
CATALOG_NAME,
|
||||
ChunkStream,
|
||||
CryptConfig,
|
||||
DataBlob,
|
||||
DynamicIndexReader,
|
||||
FixedChunkStream,
|
||||
FixedIndexReader,
|
||||
IndexFile,
|
||||
KeyConfig,
|
||||
MANIFEST_BLOB_NAME,
|
||||
Shell,
|
||||
};
|
||||
|
||||
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
|
||||
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
|
||||
@ -232,18 +261,17 @@ async fn api_datastore_latest_snapshot(
|
||||
Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
|
||||
}
|
||||
|
||||
|
||||
async fn backup_directory<P: AsRef<Path>>(
|
||||
client: &BackupWriter,
|
||||
previous_manifest: Option<Arc<BackupManifest>>,
|
||||
dir_path: P,
|
||||
archive_name: &str,
|
||||
chunk_size: Option<usize>,
|
||||
device_set: Option<HashSet<u64>>,
|
||||
verbose: bool,
|
||||
skip_lost_and_found: bool,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
||||
exclude_pattern: Vec<pxar::MatchPattern>,
|
||||
exclude_pattern: Vec<MatchEntry>,
|
||||
entries_max: usize,
|
||||
) -> Result<BackupStats, Error> {
|
||||
|
||||
@ -271,7 +299,7 @@ async fn backup_directory<P: AsRef<Path>>(
|
||||
});
|
||||
|
||||
let stats = client
|
||||
.upload_stream(archive_name, stream, "dynamic", None, crypt_config)
|
||||
.upload_stream(previous_manifest, archive_name, stream, "dynamic", None)
|
||||
.await?;
|
||||
|
||||
Ok(stats)
|
||||
@ -279,12 +307,12 @@ async fn backup_directory<P: AsRef<Path>>(
|
||||
|
||||
async fn backup_image<P: AsRef<Path>>(
|
||||
client: &BackupWriter,
|
||||
previous_manifest: Option<Arc<BackupManifest>>,
|
||||
image_path: P,
|
||||
archive_name: &str,
|
||||
image_size: u64,
|
||||
chunk_size: Option<usize>,
|
||||
_verbose: bool,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
) -> Result<BackupStats, Error> {
|
||||
|
||||
let path = image_path.as_ref().to_owned();
|
||||
@ -297,7 +325,7 @@ async fn backup_image<P: AsRef<Path>>(
|
||||
let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
|
||||
|
||||
let stats = client
|
||||
.upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config)
|
||||
.upload_stream(previous_manifest, archive_name, stream, "fixed", Some(image_size))
|
||||
.await?;
|
||||
|
||||
Ok(stats)
|
||||
@ -399,8 +427,8 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
|
||||
let group = if let Some(path) = param["group"].as_str() {
|
||||
Some(BackupGroup::parse(path)?)
|
||||
let group: Option<BackupGroup> = if let Some(path) = param["group"].as_str() {
|
||||
Some(path.parse()?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@ -417,7 +445,11 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
|
||||
Ok(tools::format::render_backup_file_list(&item.files))
|
||||
let mut filenames = Vec::new();
|
||||
for file in &item.files {
|
||||
filenames.push(file.filename.to_string());
|
||||
}
|
||||
Ok(tools::format::render_backup_file_list(&filenames[..]))
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
@ -456,7 +488,7 @@ async fn forget_snapshots(param: Value) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
let snapshot = BackupDir::parse(path)?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
|
||||
let mut client = connect(repo.host(), repo.user())?;
|
||||
|
||||
@ -536,7 +568,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
let snapshot = BackupDir::parse(path)?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
|
||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||
|
||||
@ -614,7 +646,7 @@ async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
let snapshot = BackupDir::parse(path)?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
@ -676,8 +708,7 @@ async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
|
||||
}
|
||||
|
||||
fn spawn_catalog_upload(
|
||||
client: Arc<BackupWriter>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
client: Arc<BackupWriter>
|
||||
) -> Result<
|
||||
(
|
||||
Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
||||
@ -695,7 +726,7 @@ fn spawn_catalog_upload(
|
||||
|
||||
tokio::spawn(async move {
|
||||
let catalog_upload_result = client
|
||||
.upload_stream(CATALOG_NAME, catalog_chunk_stream, "dynamic", None, crypt_config)
|
||||
.upload_stream(None, CATALOG_NAME, catalog_chunk_stream, "dynamic", None)
|
||||
.await;
|
||||
|
||||
if let Err(ref err) = catalog_upload_result {
|
||||
@ -769,7 +800,7 @@ fn spawn_catalog_upload(
|
||||
type: Integer,
|
||||
description: "Max number of entries to hold in memory.",
|
||||
optional: true,
|
||||
default: pxar::ENCODER_MAX_ENTRIES as isize,
|
||||
default: proxmox_backup::pxar::ENCODER_MAX_ENTRIES as isize,
|
||||
},
|
||||
"verbose": {
|
||||
type: Boolean,
|
||||
@ -812,17 +843,19 @@ async fn create_backup(
|
||||
|
||||
let include_dev = param["include-dev"].as_array();
|
||||
|
||||
let entries_max = param["entries-max"].as_u64().unwrap_or(pxar::ENCODER_MAX_ENTRIES as u64);
|
||||
let entries_max = param["entries-max"].as_u64()
|
||||
.unwrap_or(proxmox_backup::pxar::ENCODER_MAX_ENTRIES as u64);
|
||||
|
||||
let empty = Vec::new();
|
||||
let arg_pattern = param["exclude"].as_array().unwrap_or(&empty);
|
||||
let exclude_args = param["exclude"].as_array().unwrap_or(&empty);
|
||||
|
||||
let mut pattern_list = Vec::with_capacity(arg_pattern.len());
|
||||
for s in arg_pattern {
|
||||
let l = s.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
|
||||
let p = pxar::MatchPattern::from_line(l.as_bytes())?
|
||||
.ok_or_else(|| format_err!("Invalid match pattern in arguments"))?;
|
||||
pattern_list.push(p);
|
||||
let mut pattern_list = Vec::with_capacity(exclude_args.len());
|
||||
for entry in exclude_args {
|
||||
let entry = entry.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
|
||||
pattern_list.push(
|
||||
MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude)
|
||||
.map_err(|err| format_err!("invalid exclude pattern entry: {}", err))?
|
||||
);
|
||||
}
|
||||
|
||||
let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
|
||||
@ -844,8 +877,6 @@ async fn create_backup(
|
||||
|
||||
let mut upload_list = vec![];
|
||||
|
||||
let mut upload_catalog = false;
|
||||
|
||||
for backupspec in backupspec_list {
|
||||
let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
|
||||
let filename = &spec.config_string;
|
||||
@ -863,7 +894,6 @@ async fn create_backup(
|
||||
bail!("got unexpected file type (expected directory)");
|
||||
}
|
||||
upload_list.push((BackupSpecificationType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
|
||||
upload_catalog = true;
|
||||
}
|
||||
BackupSpecificationType::IMAGE => {
|
||||
if !(file_type.is_file() || file_type.is_block_device()) {
|
||||
@ -923,8 +953,11 @@ async fn create_backup(
|
||||
}
|
||||
};
|
||||
|
||||
let is_encrypted = Some(crypt_config.is_some());
|
||||
|
||||
let client = BackupWriter::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
backup_type,
|
||||
&backup_id,
|
||||
@ -932,64 +965,79 @@ async fn create_backup(
|
||||
verbose,
|
||||
).await?;
|
||||
|
||||
let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await {
|
||||
Some(Arc::new(previous_manifest))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
|
||||
let mut manifest = BackupManifest::new(snapshot);
|
||||
|
||||
let (catalog, catalog_result_rx) = spawn_catalog_upload(client.clone(), crypt_config.clone())?;
|
||||
let mut catalog = None;
|
||||
let mut catalog_result_tx = None;
|
||||
|
||||
for (backup_type, filename, target, size) in upload_list {
|
||||
match backup_type {
|
||||
BackupSpecificationType::CONFIG => {
|
||||
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
|
||||
let stats = client
|
||||
.upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
|
||||
.upload_blob_from_file(&filename, &target, true, Some(true))
|
||||
.await?;
|
||||
manifest.add_file(target, stats.size, stats.csum)?;
|
||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||
}
|
||||
BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
|
||||
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
|
||||
let stats = client
|
||||
.upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
|
||||
.upload_blob_from_file(&filename, &target, true, Some(true))
|
||||
.await?;
|
||||
manifest.add_file(target, stats.size, stats.csum)?;
|
||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||
}
|
||||
BackupSpecificationType::PXAR => {
|
||||
// start catalog upload on first use
|
||||
if catalog.is_none() {
|
||||
let (cat, res) = spawn_catalog_upload(client.clone())?;
|
||||
catalog = Some(cat);
|
||||
catalog_result_tx = Some(res);
|
||||
}
|
||||
let catalog = catalog.as_ref().unwrap();
|
||||
|
||||
println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
|
||||
catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
|
||||
let stats = backup_directory(
|
||||
&client,
|
||||
previous_manifest.clone(),
|
||||
&filename,
|
||||
&target,
|
||||
chunk_size_opt,
|
||||
devices.clone(),
|
||||
verbose,
|
||||
skip_lost_and_found,
|
||||
crypt_config.clone(),
|
||||
catalog.clone(),
|
||||
pattern_list.clone(),
|
||||
entries_max as usize,
|
||||
).await?;
|
||||
manifest.add_file(target, stats.size, stats.csum)?;
|
||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||
catalog.lock().unwrap().end_directory()?;
|
||||
}
|
||||
BackupSpecificationType::IMAGE => {
|
||||
println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
|
||||
let stats = backup_image(
|
||||
&client,
|
||||
previous_manifest.clone(),
|
||||
&filename,
|
||||
&target,
|
||||
size,
|
||||
chunk_size_opt,
|
||||
verbose,
|
||||
crypt_config.clone(),
|
||||
).await?;
|
||||
manifest.add_file(target, stats.size, stats.csum)?;
|
||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// finalize and upload catalog
|
||||
if upload_catalog {
|
||||
if let Some(catalog) = catalog {
|
||||
let mutex = Arc::try_unwrap(catalog)
|
||||
.map_err(|_| format_err!("unable to get catalog (still used)"))?;
|
||||
let mut catalog = mutex.into_inner().unwrap();
|
||||
@ -998,18 +1046,19 @@ async fn create_backup(
|
||||
|
||||
drop(catalog); // close upload stream
|
||||
|
||||
if let Some(catalog_result_rx) = catalog_result_tx {
|
||||
let stats = catalog_result_rx.await??;
|
||||
|
||||
manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum)?;
|
||||
manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, is_encrypted)?;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(rsa_encrypted_key) = rsa_encrypted_key {
|
||||
let target = "rsa-encrypted.key";
|
||||
println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
|
||||
let stats = client
|
||||
.upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
|
||||
.upload_blob_from_data(rsa_encrypted_key, target, false, None)
|
||||
.await?;
|
||||
manifest.add_file(format!("{}.blob", target), stats.size, stats.csum)?;
|
||||
manifest.add_file(format!("{}.blob", target), stats.size, stats.csum, is_encrypted)?;
|
||||
|
||||
// openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
|
||||
/*
|
||||
@ -1027,7 +1076,7 @@ async fn create_backup(
|
||||
println!("Upload index.json to '{:?}'", repo);
|
||||
let manifest = serde_json::to_string_pretty(&manifest)?.into();
|
||||
client
|
||||
.upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, crypt_config.clone(), true, true)
|
||||
.upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, true, Some(true))
|
||||
.await?;
|
||||
|
||||
client.finish().await?;
|
||||
@ -1062,7 +1111,7 @@ fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<Str
|
||||
result
|
||||
}
|
||||
|
||||
fn dump_image<W: Write>(
|
||||
async fn dump_image<W: Write>(
|
||||
client: Arc<BackupReader>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
index: FixedIndexReader,
|
||||
@ -1082,7 +1131,7 @@ fn dump_image<W: Write>(
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
let digest = index.index_digest(pos).unwrap();
|
||||
let raw_data = chunk_reader.read_chunk(&digest)?;
|
||||
let raw_data = chunk_reader.read_chunk(&digest).await?;
|
||||
writer.write_all(&raw_data)?;
|
||||
bytes += raw_data.len();
|
||||
if verbose {
|
||||
@ -1171,10 +1220,10 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
|
||||
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||
let group = BackupGroup::parse(path)?;
|
||||
let group: BackupGroup = path.parse()?;
|
||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||
} else {
|
||||
let snapshot = BackupDir::parse(path)?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||
};
|
||||
|
||||
@ -1246,18 +1295,19 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
|
||||
if let Some(target) = target {
|
||||
|
||||
let feature_flags = pxar::flags::DEFAULT;
|
||||
let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags);
|
||||
decoder.set_callback(move |path| {
|
||||
proxmox_backup::pxar::extract_archive(
|
||||
pxar::decoder::Decoder::from_std(reader)?,
|
||||
Path::new(target),
|
||||
&[],
|
||||
proxmox_backup::pxar::Flags::DEFAULT,
|
||||
allow_existing_dirs,
|
||||
|path| {
|
||||
if verbose {
|
||||
eprintln!("{:?}", path);
|
||||
println!("{:?}", path);
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
decoder.set_allow_existing_dirs(allow_existing_dirs);
|
||||
|
||||
decoder.restore(Path::new(target), &Vec::new())?;
|
||||
},
|
||||
)
|
||||
.map_err(|err| format_err!("error extracting archive - {}", err))?;
|
||||
} else {
|
||||
let mut writer = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
@ -1285,7 +1335,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
.map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
|
||||
};
|
||||
|
||||
dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
|
||||
dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose).await?;
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
@ -1320,7 +1370,7 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let snapshot = tools::required_string_param(¶m, "snapshot")?;
|
||||
let snapshot = BackupDir::parse(snapshot)?;
|
||||
let snapshot: BackupDir = snapshot.parse()?;
|
||||
|
||||
let mut client = connect(repo.host(), repo.user())?;
|
||||
|
||||
@ -1394,7 +1444,7 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
||||
let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
|
||||
|
||||
let group = tools::required_string_param(¶m, "group")?;
|
||||
let group = BackupGroup::parse(group)?;
|
||||
let group: BackupGroup = group.parse()?;
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
@ -1628,9 +1678,9 @@ async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<St
|
||||
_ => return result,
|
||||
};
|
||||
|
||||
let snapshot = match param.get("snapshot") {
|
||||
let snapshot: BackupDir = match param.get("snapshot") {
|
||||
Some(path) => {
|
||||
match BackupDir::parse(path) {
|
||||
match path.parse() {
|
||||
Ok(v) => v,
|
||||
_ => return result,
|
||||
}
|
||||
@ -1966,6 +2016,48 @@ fn mount(
|
||||
}
|
||||
}
|
||||
|
||||
use proxmox_backup::client::RemoteChunkReader;
|
||||
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
||||
/// async use!
|
||||
///
|
||||
/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
|
||||
/// so that we can properly access it from multiple threads simultaneously while not issuing
|
||||
/// duplicate simultaneous reads over http.
|
||||
struct BufferedDynamicReadAt {
|
||||
inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>,
|
||||
}
|
||||
|
||||
impl BufferedDynamicReadAt {
|
||||
fn new(inner: BufferedDynamicReader<RemoteChunkReader>) -> Self {
|
||||
Self {
|
||||
inner: Mutex::new(inner),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadAt for BufferedDynamicReadAt {
|
||||
fn start_read_at<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
_cx: &mut Context,
|
||||
buf: &'a mut [u8],
|
||||
offset: u64,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
use std::io::Read;
|
||||
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
||||
let mut reader = self.inner.lock().unwrap();
|
||||
reader.seek(SeekFrom::Start(offset))?;
|
||||
Ok(reader.read(buf)?)
|
||||
}))
|
||||
}
|
||||
|
||||
fn poll_complete<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
_op: ReadAtOperation<'a>,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
panic!("LocalDynamicReadAt::start_read_at returned Pending");
|
||||
}
|
||||
}
|
||||
|
||||
async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||
@ -1976,10 +2068,10 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||
let group = BackupGroup::parse(path)?;
|
||||
let group: BackupGroup = path.parse()?;
|
||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||
} else {
|
||||
let snapshot = BackupDir::parse(path)?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||
};
|
||||
|
||||
@ -2015,15 +2107,19 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let decoder = pxar::Decoder::new(reader)?;
|
||||
let archive_size = reader.archive_size();
|
||||
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||
let options = OsStr::new("ro,default_permissions");
|
||||
let mut session = pxar::fuse::Session::new(decoder, &options, pipe.is_none())
|
||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||
|
||||
// Mount the session but not call fuse deamonize as this will cause
|
||||
// issues with the runtime after the fork
|
||||
let deamonize = false;
|
||||
session.mount(&Path::new(target), deamonize)?;
|
||||
let session = proxmox_backup::pxar::fuse::Session::mount(
|
||||
decoder,
|
||||
&options,
|
||||
false,
|
||||
Path::new(target),
|
||||
)
|
||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||
|
||||
if let Some(pipe) = pipe {
|
||||
nix::unistd::chdir(Path::new("/")).unwrap();
|
||||
@ -2045,8 +2141,13 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
nix::unistd::close(pipe).unwrap();
|
||||
}
|
||||
|
||||
let multithreaded = true;
|
||||
session.run_loop(multithreaded)?;
|
||||
let mut interrupt = signal(SignalKind::interrupt())?;
|
||||
select! {
|
||||
res = session.fuse() => res?,
|
||||
_ = interrupt.recv().fuse() => {
|
||||
// exit on interrupted
|
||||
}
|
||||
}
|
||||
} else {
|
||||
bail!("unknown archive file extension (expected .pxar)");
|
||||
}
|
||||
@ -2085,10 +2186,10 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||
|
||||
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||
let group = BackupGroup::parse(path)?;
|
||||
let group: BackupGroup = path.parse()?;
|
||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||
} else {
|
||||
let snapshot = BackupDir::parse(path)?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||
};
|
||||
|
||||
@ -2117,7 +2218,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
true,
|
||||
).await?;
|
||||
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
@ -2129,13 +2230,12 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let mut decoder = pxar::Decoder::new(reader)?;
|
||||
decoder.set_callback(|path| {
|
||||
println!("{:?}", path);
|
||||
Ok(())
|
||||
});
|
||||
let archive_size = reader.archive_size();
|
||||
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||
|
||||
let tmpfile = client.download(CATALOG_NAME, tmpfile).await?;
|
||||
client.download(CATALOG_NAME, &mut tmpfile).await?;
|
||||
let index = DynamicIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read catalog index - {}", err))?;
|
||||
|
||||
@ -2161,10 +2261,10 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
catalog_reader,
|
||||
&server_archive_name,
|
||||
decoder,
|
||||
)?;
|
||||
).await?;
|
||||
|
||||
println!("Starting interactive shell");
|
||||
state.shell()?;
|
||||
state.shell().await?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
|
@ -32,6 +32,24 @@ async fn view_task_result(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Note: local workers should print logs to stdout, so there is no need
|
||||
// to fetch/display logs. We just wait for the worker to finish.
|
||||
pub async fn wait_for_local_worker(upid_str: &str) -> Result<(), Error> {
|
||||
|
||||
let upid: proxmox_backup::server::UPID = upid_str.parse()?;
|
||||
|
||||
let sleep_duration = core::time::Duration::new(0, 100_000_000);
|
||||
|
||||
loop {
|
||||
if proxmox_backup::server::worker_is_active_local(&upid) {
|
||||
tokio::time::delay_for(sleep_duration).await;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn connect() -> Result<HttpClient, Error> {
|
||||
|
||||
let uid = nix::unistd::Uid::current();
|
||||
@ -301,11 +319,48 @@ async fn pull_datastore(
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"store": {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Verify backups
|
||||
async fn verify(
|
||||
store: String,
|
||||
param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let mut client = connect()?;
|
||||
|
||||
let args = json!({});
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/verify", store);
|
||||
|
||||
let result = client.post(&path, Some(args)).await?;
|
||||
|
||||
view_task_result(client, result, &output_format).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
fn main() {
|
||||
|
||||
proxmox_backup::tools::setup_safe_path_env();
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("acl", acl_commands())
|
||||
.insert("datastore", datastore_commands())
|
||||
.insert("disk", disk_commands())
|
||||
.insert("dns", dns_commands())
|
||||
.insert("network", network_commands())
|
||||
.insert("user", user_commands())
|
||||
@ -321,8 +376,16 @@ fn main() {
|
||||
.completion_cb("local-store", config::datastore::complete_datastore_name)
|
||||
.completion_cb("remote", config::remote::complete_remote_name)
|
||||
.completion_cb("remote-store", complete_remote_datastore_name)
|
||||
)
|
||||
.insert(
|
||||
"verify",
|
||||
CliCommand::new(&API_METHOD_VERIFY)
|
||||
.arg_param(&["store"])
|
||||
.completion_cb("store", config::datastore::complete_datastore_name)
|
||||
);
|
||||
|
||||
|
||||
|
||||
let mut rpcenv = CliEnvironment::new();
|
||||
rpcenv.set_user(Some(String::from("root@pam")));
|
||||
|
||||
|
@ -12,12 +12,14 @@ use proxmox::api::RpcEnvironmentType;
|
||||
use proxmox_backup::configdir;
|
||||
use proxmox_backup::buildcfg;
|
||||
use proxmox_backup::server;
|
||||
use proxmox_backup::tools::daemon;
|
||||
use proxmox_backup::tools::{daemon, epoch_now, epoch_now_u64};
|
||||
use proxmox_backup::server::{ApiConfig, rest::*};
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
|
||||
|
||||
fn main() {
|
||||
proxmox_backup::tools::setup_safe_path_env();
|
||||
|
||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||
eprintln!("Error: {}", err);
|
||||
std::process::exit(-1);
|
||||
@ -134,10 +136,10 @@ fn start_task_scheduler() {
|
||||
tokio::spawn(task.map(|_| ()));
|
||||
}
|
||||
|
||||
use std::time:: {Instant, Duration, SystemTime, UNIX_EPOCH};
|
||||
use std::time:: {Instant, Duration};
|
||||
|
||||
fn next_minute() -> Result<Instant, Error> {
|
||||
let epoch_now = SystemTime::now().duration_since(UNIX_EPOCH)?;
|
||||
let epoch_now = epoch_now()?;
|
||||
let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
|
||||
Ok(Instant::now() + epoch_next - epoch_now)
|
||||
}
|
||||
@ -296,8 +298,9 @@ async fn schedule_datastore_garbage_collection() {
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
|
||||
Ok(epoch_now) => epoch_now.as_secs() as i64,
|
||||
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
@ -407,8 +410,8 @@ async fn schedule_datastore_prune() {
|
||||
}
|
||||
};
|
||||
|
||||
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
|
||||
Ok(epoch_now) => epoch_now.as_secs() as i64,
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
@ -532,8 +535,8 @@ async fn schedule_datastore_sync_jobs() {
|
||||
}
|
||||
};
|
||||
|
||||
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
|
||||
Ok(epoch_now) => epoch_now.as_secs() as i64,
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
@ -711,11 +714,11 @@ async fn generate_host_stats(save: bool) {
|
||||
fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) {
|
||||
|
||||
match proxmox_backup::tools::disks::disk_usage(path) {
|
||||
Ok((total, used, _avail)) => {
|
||||
Ok(status) => {
|
||||
let rrd_key = format!("{}/total", rrd_prefix);
|
||||
rrd_update_gauge(&rrd_key, total as f64, save);
|
||||
rrd_update_gauge(&rrd_key, status.total as f64, save);
|
||||
let rrd_key = format!("{}/used", rrd_prefix);
|
||||
rrd_update_gauge(&rrd_key, used as f64, save);
|
||||
rrd_update_gauge(&rrd_key, status.used as f64, save);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read disk_usage on {:?} failed - {}", path, err);
|
||||
|
353
src/bin/proxmox_backup_manager/disk.rs
Normal file
353
src/bin/proxmox_backup_manager/disk.rs
Normal file
@ -0,0 +1,353 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
|
||||
use proxmox_backup::tools::disks::{
|
||||
FileSystemType,
|
||||
SmartAttribute,
|
||||
complete_disk_name,
|
||||
};
|
||||
|
||||
use proxmox_backup::api2::node::disks::{
|
||||
zfs::DISK_LIST_SCHEMA,
|
||||
zfs::ZFS_ASHIFT_SCHEMA,
|
||||
zfs::ZfsRaidLevel,
|
||||
zfs::ZfsCompressionType,
|
||||
};
|
||||
|
||||
use proxmox_backup::api2::{self, types::* };
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Local disk list.
|
||||
fn list_disks(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::API_METHOD_LIST_DISKS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let render_wearout = |value: &Value, _record: &Value| -> Result<String, Error> {
|
||||
match value.as_f64() {
|
||||
Some(value) => Ok(format!("{:.2} %", if value <= 100.0 { 100.0 - value } else { 0.0 })),
|
||||
None => Ok(String::from("-")),
|
||||
}
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("name"))
|
||||
.column(ColumnConfig::new("used"))
|
||||
.column(ColumnConfig::new("gpt"))
|
||||
.column(ColumnConfig::new("disk-type"))
|
||||
.column(ColumnConfig::new("size"))
|
||||
.column(ColumnConfig::new("model"))
|
||||
.column(ColumnConfig::new("wearout").renderer(render_wearout))
|
||||
.column(ColumnConfig::new("status"))
|
||||
;
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
},
|
||||
returns: {
|
||||
description: "SMART attributes.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: SmartAttribute,
|
||||
},
|
||||
}
|
||||
)]
|
||||
/// Show SMART attributes.
|
||||
fn smart_attributes(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::API_METHOD_SMART_STATUS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let mut data = data["attributes"].take();
|
||||
|
||||
let options = default_table_format_options();
|
||||
format_and_print_result_full(&mut data, API_METHOD_SMART_ATTRIBUTES.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
uuid: {
|
||||
description: "UUID for the GPT table.",
|
||||
type: String,
|
||||
optional: true,
|
||||
max_length: 36,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Initialize empty Disk with GPT
|
||||
async fn initialize_disk(
|
||||
mut param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::API_METHOD_INITIALIZE_DISK;
|
||||
let result = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
crate::wait_for_local_worker(result.as_str().unwrap()).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
devices: {
|
||||
schema: DISK_LIST_SCHEMA,
|
||||
},
|
||||
raidlevel: {
|
||||
type: ZfsRaidLevel,
|
||||
},
|
||||
ashift: {
|
||||
schema: ZFS_ASHIFT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
compression: {
|
||||
type: ZfsCompressionType,
|
||||
optional: true,
|
||||
},
|
||||
"add-datastore": {
|
||||
description: "Configure a datastore using the zpool.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// create a zfs pool
|
||||
async fn create_zpool(
|
||||
mut param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::zfs::API_METHOD_CREATE_ZPOOL;
|
||||
let result = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
crate::wait_for_local_worker(result.as_str().unwrap()).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Local zfs pools.
|
||||
fn list_zpools(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::zfs::API_METHOD_LIST_ZPOOLS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let render_usage = |value: &Value, record: &Value| -> Result<String, Error> {
|
||||
let value = value.as_u64().unwrap_or(0);
|
||||
let size = match record["size"].as_u64() {
|
||||
Some(size) => size,
|
||||
None => bail!("missing size property"),
|
||||
};
|
||||
if size == 0 {
|
||||
bail!("got zero size");
|
||||
}
|
||||
Ok(format!("{:.2} %", (value as f64)/(size as f64)))
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("name"))
|
||||
.column(ColumnConfig::new("size"))
|
||||
.column(ColumnConfig::new("alloc").right_align(true).renderer(render_usage))
|
||||
.column(ColumnConfig::new("health"));
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn zpool_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&API_METHOD_LIST_ZPOOLS))
|
||||
.insert("create",
|
||||
CliCommand::new(&API_METHOD_CREATE_ZPOOL)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("devices", complete_disk_name) // fixme: comlete the list
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// List systemd datastore mount units.
|
||||
fn list_datastore_mounts(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::directory::API_METHOD_LIST_DATASTORE_MOUNTS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("path"))
|
||||
.column(ColumnConfig::new("device"))
|
||||
.column(ColumnConfig::new("filesystem"))
|
||||
.column(ColumnConfig::new("options"));
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
"add-datastore": {
|
||||
description: "Configure a datastore using the directory.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
filesystem: {
|
||||
type: FileSystemType,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Create a Filesystem on an unused disk. Will be mounted under '/mnt/datastore/<name>'.
|
||||
async fn create_datastore_disk(
|
||||
mut param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::directory::API_METHOD_CREATE_DATASTORE_DISK;
|
||||
let result = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
crate::wait_for_local_worker(result.as_str().unwrap()).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn filesystem_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&API_METHOD_LIST_DATASTORE_MOUNTS))
|
||||
.insert("create",
|
||||
CliCommand::new(&API_METHOD_CREATE_DATASTORE_DISK)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("disk", complete_disk_name)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
||||
|
||||
pub fn disk_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&API_METHOD_LIST_DISKS))
|
||||
.insert("smart-attributes",
|
||||
CliCommand::new(&API_METHOD_SMART_ATTRIBUTES)
|
||||
.arg_param(&["disk"])
|
||||
.completion_cb("disk", complete_disk_name)
|
||||
)
|
||||
.insert("fs", filesystem_commands())
|
||||
.insert("zpool", zpool_commands())
|
||||
.insert("initialize",
|
||||
CliCommand::new(&API_METHOD_INITIALIZE_DISK)
|
||||
.arg_param(&["disk"])
|
||||
.completion_cb("disk", complete_disk_name)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
@ -14,3 +14,5 @@ mod sync;
|
||||
pub use sync::*;
|
||||
mod user;
|
||||
pub use user::*;
|
||||
mod disk;
|
||||
pub use disk::*;
|
||||
|
776
src/bin/pxar.rs
776
src/bin/pxar.rs
@ -1,191 +1,305 @@
|
||||
extern crate proxmox_backup;
|
||||
use std::collections::HashSet;
|
||||
use std::ffi::OsStr;
|
||||
use std::fs::OpenOptions;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::future::FutureExt;
|
||||
use futures::select;
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
|
||||
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
|
||||
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::api::cli::*;
|
||||
use proxmox::api::api;
|
||||
|
||||
use proxmox_backup::tools;
|
||||
|
||||
use serde_json::{Value};
|
||||
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::fs::OpenOptions;
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use proxmox_backup::pxar;
|
||||
|
||||
fn dump_archive_from_reader<R: std::io::Read>(
|
||||
reader: &mut R,
|
||||
feature_flags: u64,
|
||||
verbose: bool,
|
||||
) -> Result<(), Error> {
|
||||
let mut decoder = pxar::SequentialDecoder::new(reader, feature_flags);
|
||||
|
||||
let stdout = std::io::stdout();
|
||||
let mut out = stdout.lock();
|
||||
|
||||
let mut path = PathBuf::new();
|
||||
decoder.dump_entry(&mut path, verbose, &mut out)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn dump_archive(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let archive = tools::required_string_param(¶m, "archive")?;
|
||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||
|
||||
let feature_flags = pxar::flags::DEFAULT;
|
||||
|
||||
if archive == "-" {
|
||||
let stdin = std::io::stdin();
|
||||
let mut reader = stdin.lock();
|
||||
dump_archive_from_reader(&mut reader, feature_flags, verbose)?;
|
||||
} else {
|
||||
if verbose { println!("PXAR dump: {}", archive); }
|
||||
let file = std::fs::File::open(archive)?;
|
||||
let mut reader = std::io::BufReader::new(file);
|
||||
dump_archive_from_reader(&mut reader, feature_flags, verbose)?;
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
use proxmox_backup::pxar::{fuse, format_single_line_entry, ENCODER_MAX_ENTRIES, Flags};
|
||||
|
||||
fn extract_archive_from_reader<R: std::io::Read>(
|
||||
reader: &mut R,
|
||||
target: &str,
|
||||
feature_flags: u64,
|
||||
feature_flags: Flags,
|
||||
allow_existing_dirs: bool,
|
||||
verbose: bool,
|
||||
pattern: Option<Vec<pxar::MatchPattern>>
|
||||
match_list: &[MatchEntry],
|
||||
) -> Result<(), Error> {
|
||||
let mut decoder = pxar::SequentialDecoder::new(reader, feature_flags);
|
||||
decoder.set_callback(move |path| {
|
||||
proxmox_backup::pxar::extract_archive(
|
||||
pxar::decoder::Decoder::from_std(reader)?,
|
||||
Path::new(target),
|
||||
&match_list,
|
||||
feature_flags,
|
||||
allow_existing_dirs,
|
||||
|path| {
|
||||
if verbose {
|
||||
println!("{:?}", path);
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
decoder.set_allow_existing_dirs(allow_existing_dirs);
|
||||
|
||||
let pattern = pattern.unwrap_or_else(Vec::new);
|
||||
decoder.restore(Path::new(target), &pattern)?;
|
||||
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
archive: {
|
||||
description: "Archive name.",
|
||||
},
|
||||
pattern: {
|
||||
description: "List of paths or pattern matching files to restore",
|
||||
type: Array,
|
||||
items: {
|
||||
type: String,
|
||||
description: "Path or pattern matching files to restore.",
|
||||
},
|
||||
optional: true,
|
||||
},
|
||||
target: {
|
||||
description: "Target directory",
|
||||
optional: true,
|
||||
},
|
||||
verbose: {
|
||||
description: "Verbose output.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-xattrs": {
|
||||
description: "Ignore extended file attributes.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-fcaps": {
|
||||
description: "Ignore file capabilities.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-acls": {
|
||||
description: "Ignore access control list entries.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"allow-existing-dirs": {
|
||||
description: "Allows directories to already exist on restore.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"files-from": {
|
||||
description: "File containing match pattern for files to restore.",
|
||||
optional: true,
|
||||
},
|
||||
"no-device-nodes": {
|
||||
description: "Ignore device nodes.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-fifos": {
|
||||
description: "Ignore fifos.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-sockets": {
|
||||
description: "Ignore sockets.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Extract an archive.
|
||||
fn extract_archive(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let archive = tools::required_string_param(¶m, "archive")?;
|
||||
let target = param["target"].as_str().unwrap_or(".");
|
||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||
let no_xattrs = param["no-xattrs"].as_bool().unwrap_or(false);
|
||||
let no_fcaps = param["no-fcaps"].as_bool().unwrap_or(false);
|
||||
let no_acls = param["no-acls"].as_bool().unwrap_or(false);
|
||||
let no_device_nodes = param["no-device-nodes"].as_bool().unwrap_or(false);
|
||||
let no_fifos = param["no-fifos"].as_bool().unwrap_or(false);
|
||||
let no_sockets = param["no-sockets"].as_bool().unwrap_or(false);
|
||||
let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
|
||||
let files_from = param["files-from"].as_str();
|
||||
let empty = Vec::new();
|
||||
let arg_pattern = param["pattern"].as_array().unwrap_or(&empty);
|
||||
|
||||
let mut feature_flags = pxar::flags::DEFAULT;
|
||||
archive: String,
|
||||
pattern: Option<Vec<String>>,
|
||||
target: Option<String>,
|
||||
verbose: bool,
|
||||
no_xattrs: bool,
|
||||
no_fcaps: bool,
|
||||
no_acls: bool,
|
||||
allow_existing_dirs: bool,
|
||||
files_from: Option<String>,
|
||||
no_device_nodes: bool,
|
||||
no_fifos: bool,
|
||||
no_sockets: bool,
|
||||
) -> Result<(), Error> {
|
||||
let mut feature_flags = Flags::DEFAULT;
|
||||
if no_xattrs {
|
||||
feature_flags ^= pxar::flags::WITH_XATTRS;
|
||||
feature_flags ^= Flags::WITH_XATTRS;
|
||||
}
|
||||
if no_fcaps {
|
||||
feature_flags ^= pxar::flags::WITH_FCAPS;
|
||||
feature_flags ^= Flags::WITH_FCAPS;
|
||||
}
|
||||
if no_acls {
|
||||
feature_flags ^= pxar::flags::WITH_ACL;
|
||||
feature_flags ^= Flags::WITH_ACL;
|
||||
}
|
||||
if no_device_nodes {
|
||||
feature_flags ^= pxar::flags::WITH_DEVICE_NODES;
|
||||
feature_flags ^= Flags::WITH_DEVICE_NODES;
|
||||
}
|
||||
if no_fifos {
|
||||
feature_flags ^= pxar::flags::WITH_FIFOS;
|
||||
feature_flags ^= Flags::WITH_FIFOS;
|
||||
}
|
||||
if no_sockets {
|
||||
feature_flags ^= pxar::flags::WITH_SOCKETS;
|
||||
feature_flags ^= Flags::WITH_SOCKETS;
|
||||
}
|
||||
|
||||
let mut pattern_list = Vec::new();
|
||||
if let Some(filename) = files_from {
|
||||
let dir = nix::dir::Dir::open("./", nix::fcntl::OFlag::O_RDONLY, nix::sys::stat::Mode::empty())?;
|
||||
if let Some((mut pattern, _, _)) = pxar::MatchPattern::from_file(dir.as_raw_fd(), filename)? {
|
||||
pattern_list.append(&mut pattern);
|
||||
let pattern = pattern.unwrap_or_else(Vec::new);
|
||||
let target = target.as_ref().map_or_else(|| ".", String::as_str);
|
||||
|
||||
let mut match_list = Vec::new();
|
||||
if let Some(filename) = &files_from {
|
||||
for line in proxmox_backup::tools::file_get_non_comment_lines(filename)? {
|
||||
let line = line
|
||||
.map_err(|err| format_err!("error reading {}: {}", filename, err))?;
|
||||
match_list.push(
|
||||
MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, MatchType::Include)
|
||||
.map_err(|err| format_err!("bad pattern in file '{}': {}", filename, err))?,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
for s in arg_pattern {
|
||||
let l = s.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
|
||||
let p = pxar::MatchPattern::from_line(l.as_bytes())?
|
||||
.ok_or_else(|| format_err!("Invalid match pattern in arguments"))?;
|
||||
pattern_list.push(p);
|
||||
for entry in pattern {
|
||||
match_list.push(
|
||||
MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Include)
|
||||
.map_err(|err| format_err!("error in pattern: {}", err))?,
|
||||
);
|
||||
}
|
||||
|
||||
let pattern = if pattern_list.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(pattern_list)
|
||||
};
|
||||
|
||||
if archive == "-" {
|
||||
let stdin = std::io::stdin();
|
||||
let mut reader = stdin.lock();
|
||||
extract_archive_from_reader(&mut reader, target, feature_flags, allow_existing_dirs, verbose, pattern)?;
|
||||
extract_archive_from_reader(
|
||||
&mut reader,
|
||||
&target,
|
||||
feature_flags,
|
||||
allow_existing_dirs,
|
||||
verbose,
|
||||
&match_list,
|
||||
)?;
|
||||
} else {
|
||||
if verbose { println!("PXAR extract: {}", archive); }
|
||||
if verbose {
|
||||
println!("PXAR extract: {}", archive);
|
||||
}
|
||||
let file = std::fs::File::open(archive)?;
|
||||
let mut reader = std::io::BufReader::new(file);
|
||||
extract_archive_from_reader(&mut reader, target, feature_flags, allow_existing_dirs, verbose, pattern)?;
|
||||
extract_archive_from_reader(
|
||||
&mut reader,
|
||||
&target,
|
||||
feature_flags,
|
||||
allow_existing_dirs,
|
||||
verbose,
|
||||
&match_list,
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
archive: {
|
||||
description: "Archive name.",
|
||||
},
|
||||
source: {
|
||||
description: "Source directory.",
|
||||
},
|
||||
verbose: {
|
||||
description: "Verbose output.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-xattrs": {
|
||||
description: "Ignore extended file attributes.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-fcaps": {
|
||||
description: "Ignore file capabilities.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-acls": {
|
||||
description: "Ignore access control list entries.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"all-file-systems": {
|
||||
description: "Include mounted sudirs.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-device-nodes": {
|
||||
description: "Ignore device nodes.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-fifos": {
|
||||
description: "Ignore fifos.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-sockets": {
|
||||
description: "Ignore sockets.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
exclude: {
|
||||
description: "List of paths or pattern matching files to exclude.",
|
||||
optional: true,
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Path or pattern matching files to restore",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
"entries-max": {
|
||||
description: "Max number of entries loaded at once into memory",
|
||||
optional: true,
|
||||
default: ENCODER_MAX_ENTRIES as isize,
|
||||
minimum: 0,
|
||||
maximum: std::isize::MAX,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Create a new .pxar archive.
|
||||
fn create_archive(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
archive: String,
|
||||
source: String,
|
||||
verbose: bool,
|
||||
no_xattrs: bool,
|
||||
no_fcaps: bool,
|
||||
no_acls: bool,
|
||||
all_file_systems: bool,
|
||||
no_device_nodes: bool,
|
||||
no_fifos: bool,
|
||||
no_sockets: bool,
|
||||
exclude: Option<Vec<String>>,
|
||||
entries_max: isize,
|
||||
) -> Result<(), Error> {
|
||||
let pattern_list = {
|
||||
let input = exclude.unwrap_or_else(Vec::new);
|
||||
let mut pattern_list = Vec::with_capacity(input.len());
|
||||
for entry in input {
|
||||
pattern_list.push(
|
||||
MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude)
|
||||
.map_err(|err| format_err!("error in exclude pattern: {}", err))?,
|
||||
);
|
||||
}
|
||||
pattern_list
|
||||
};
|
||||
|
||||
let archive = tools::required_string_param(¶m, "archive")?;
|
||||
let source = tools::required_string_param(¶m, "source")?;
|
||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||
let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
|
||||
let no_xattrs = param["no-xattrs"].as_bool().unwrap_or(false);
|
||||
let no_fcaps = param["no-fcaps"].as_bool().unwrap_or(false);
|
||||
let no_acls = param["no-acls"].as_bool().unwrap_or(false);
|
||||
let no_device_nodes = param["no-device-nodes"].as_bool().unwrap_or(false);
|
||||
let no_fifos = param["no-fifos"].as_bool().unwrap_or(false);
|
||||
let no_sockets = param["no-sockets"].as_bool().unwrap_or(false);
|
||||
let empty = Vec::new();
|
||||
let exclude_pattern = param["exclude"].as_array().unwrap_or(&empty);
|
||||
let entries_max = param["entries-max"].as_u64().unwrap_or(pxar::ENCODER_MAX_ENTRIES as u64);
|
||||
|
||||
let devices = if all_file_systems { None } else { Some(HashSet::new()) };
|
||||
let device_set = if all_file_systems {
|
||||
None
|
||||
} else {
|
||||
Some(HashSet::new())
|
||||
};
|
||||
|
||||
let source = PathBuf::from(source);
|
||||
|
||||
let mut dir = nix::dir::Dir::open(
|
||||
&source, nix::fcntl::OFlag::O_NOFOLLOW, nix::sys::stat::Mode::empty())?;
|
||||
let dir = nix::dir::Dir::open(
|
||||
&source,
|
||||
nix::fcntl::OFlag::O_NOFOLLOW,
|
||||
nix::sys::stat::Mode::empty(),
|
||||
)?;
|
||||
|
||||
let file = OpenOptions::new()
|
||||
.create_new(true)
|
||||
@ -193,332 +307,150 @@ fn create_archive(
|
||||
.mode(0o640)
|
||||
.open(archive)?;
|
||||
|
||||
let mut writer = std::io::BufWriter::with_capacity(1024*1024, file);
|
||||
let mut feature_flags = pxar::flags::DEFAULT;
|
||||
let writer = std::io::BufWriter::with_capacity(1024 * 1024, file);
|
||||
let mut feature_flags = Flags::DEFAULT;
|
||||
if no_xattrs {
|
||||
feature_flags ^= pxar::flags::WITH_XATTRS;
|
||||
feature_flags ^= Flags::WITH_XATTRS;
|
||||
}
|
||||
if no_fcaps {
|
||||
feature_flags ^= pxar::flags::WITH_FCAPS;
|
||||
feature_flags ^= Flags::WITH_FCAPS;
|
||||
}
|
||||
if no_acls {
|
||||
feature_flags ^= pxar::flags::WITH_ACL;
|
||||
feature_flags ^= Flags::WITH_ACL;
|
||||
}
|
||||
if no_device_nodes {
|
||||
feature_flags ^= pxar::flags::WITH_DEVICE_NODES;
|
||||
feature_flags ^= Flags::WITH_DEVICE_NODES;
|
||||
}
|
||||
if no_fifos {
|
||||
feature_flags ^= pxar::flags::WITH_FIFOS;
|
||||
feature_flags ^= Flags::WITH_FIFOS;
|
||||
}
|
||||
if no_sockets {
|
||||
feature_flags ^= pxar::flags::WITH_SOCKETS;
|
||||
feature_flags ^= Flags::WITH_SOCKETS;
|
||||
}
|
||||
|
||||
let mut pattern_list = Vec::new();
|
||||
for s in exclude_pattern {
|
||||
let l = s.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
|
||||
let p = pxar::MatchPattern::from_line(l.as_bytes())?
|
||||
.ok_or_else(|| format_err!("Invalid match pattern in arguments"))?;
|
||||
pattern_list.push(p);
|
||||
}
|
||||
|
||||
let catalog = None::<&mut pxar::catalog::DummyCatalogWriter>;
|
||||
pxar::Encoder::encode(
|
||||
source,
|
||||
&mut dir,
|
||||
&mut writer,
|
||||
catalog,
|
||||
devices,
|
||||
verbose,
|
||||
false,
|
||||
feature_flags,
|
||||
let writer = pxar::encoder::sync::StandardWriter::new(writer);
|
||||
proxmox_backup::pxar::create_archive(
|
||||
dir,
|
||||
writer,
|
||||
pattern_list,
|
||||
feature_flags,
|
||||
device_set,
|
||||
false,
|
||||
|path| {
|
||||
if verbose {
|
||||
println!("{:?}", path);
|
||||
}
|
||||
Ok(())
|
||||
},
|
||||
entries_max as usize,
|
||||
None,
|
||||
)?;
|
||||
|
||||
writer.flush()?;
|
||||
|
||||
Ok(Value::Null)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
archive: { description: "Archive name." },
|
||||
mountpoint: { description: "Mountpoint for the file system." },
|
||||
verbose: {
|
||||
description: "Verbose output, running in the foreground (for debugging).",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Mount the archive to the provided mountpoint via FUSE.
|
||||
fn mount_archive(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let archive = tools::required_string_param(¶m, "archive")?;
|
||||
let mountpoint = tools::required_string_param(¶m, "mountpoint")?;
|
||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||
let no_mt = param["no-mt"].as_bool().unwrap_or(false);
|
||||
|
||||
let archive = Path::new(archive);
|
||||
let mountpoint = Path::new(mountpoint);
|
||||
async fn mount_archive(
|
||||
archive: String,
|
||||
mountpoint: String,
|
||||
verbose: bool,
|
||||
) -> Result<(), Error> {
|
||||
let archive = Path::new(&archive);
|
||||
let mountpoint = Path::new(&mountpoint);
|
||||
let options = OsStr::new("ro,default_permissions");
|
||||
let mut session = pxar::fuse::Session::from_path(&archive, &options, verbose)
|
||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||
// Mount the session and deamonize if verbose is not set
|
||||
session.mount(&mountpoint, !verbose)?;
|
||||
session.run_loop(!no_mt)?;
|
||||
|
||||
Ok(Value::Null)
|
||||
let session = fuse::Session::mount_path(&archive, &options, verbose, mountpoint)
|
||||
.await
|
||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||
|
||||
let mut interrupt = signal(SignalKind::interrupt())?;
|
||||
|
||||
select! {
|
||||
res = session.fuse() => res?,
|
||||
_ = interrupt.recv().fuse() => {
|
||||
if verbose {
|
||||
eprintln!("interrupted");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_CREATE_ARCHIVE: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&create_archive),
|
||||
&ObjectSchema::new(
|
||||
"Create new .pxar archive.",
|
||||
&sorted!([
|
||||
(
|
||||
"archive",
|
||||
false,
|
||||
&StringSchema::new("Archive name").schema()
|
||||
),
|
||||
(
|
||||
"source",
|
||||
false,
|
||||
&StringSchema::new("Source directory.").schema()
|
||||
),
|
||||
(
|
||||
"verbose",
|
||||
true,
|
||||
&BooleanSchema::new("Verbose output.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-xattrs",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore extended file attributes.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-fcaps",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore file capabilities.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-acls",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore access control list entries.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"all-file-systems",
|
||||
true,
|
||||
&BooleanSchema::new("Include mounted sudirs.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-device-nodes",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore device nodes.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-fifos",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore fifos.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-sockets",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore sockets.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"exclude",
|
||||
true,
|
||||
&ArraySchema::new(
|
||||
"List of paths or pattern matching files to exclude.",
|
||||
&StringSchema::new("Path or pattern matching files to restore.").schema()
|
||||
).schema()
|
||||
),
|
||||
(
|
||||
"entries-max",
|
||||
true,
|
||||
&IntegerSchema::new("Max number of entries loaded at once into memory")
|
||||
.default(pxar::ENCODER_MAX_ENTRIES as isize)
|
||||
.minimum(0)
|
||||
.maximum(std::isize::MAX)
|
||||
.schema()
|
||||
),
|
||||
]),
|
||||
)
|
||||
);
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
archive: {
|
||||
description: "Archive name.",
|
||||
},
|
||||
verbose: {
|
||||
description: "Verbose output.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// List the contents of an archive.
|
||||
fn dump_archive(archive: String, verbose: bool) -> Result<(), Error> {
|
||||
for entry in pxar::decoder::Decoder::open(archive)? {
|
||||
let entry = entry?;
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_EXTRACT_ARCHIVE: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&extract_archive),
|
||||
&ObjectSchema::new(
|
||||
"Extract an archive.",
|
||||
&sorted!([
|
||||
(
|
||||
"archive",
|
||||
false,
|
||||
&StringSchema::new("Archive name.").schema()
|
||||
),
|
||||
(
|
||||
"pattern",
|
||||
true,
|
||||
&ArraySchema::new(
|
||||
"List of paths or pattern matching files to restore",
|
||||
&StringSchema::new("Path or pattern matching files to restore.").schema()
|
||||
).schema()
|
||||
),
|
||||
(
|
||||
"target",
|
||||
true,
|
||||
&StringSchema::new("Target directory.").schema()
|
||||
),
|
||||
(
|
||||
"verbose",
|
||||
true,
|
||||
&BooleanSchema::new("Verbose output.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-xattrs",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore extended file attributes.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-fcaps",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore file capabilities.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-acls",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore access control list entries.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"allow-existing-dirs",
|
||||
true,
|
||||
&BooleanSchema::new("Allows directories to already exist on restore.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"files-from",
|
||||
true,
|
||||
&StringSchema::new("Match pattern for files to restore.").schema()
|
||||
),
|
||||
(
|
||||
"no-device-nodes",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore device nodes.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-fifos",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore fifos.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-sockets",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore sockets.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_MOUNT_ARCHIVE: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&mount_archive),
|
||||
&ObjectSchema::new(
|
||||
"Mount the archive as filesystem via FUSE.",
|
||||
&sorted!([
|
||||
(
|
||||
"archive",
|
||||
false,
|
||||
&StringSchema::new("Archive name.").schema()
|
||||
),
|
||||
(
|
||||
"mountpoint",
|
||||
false,
|
||||
&StringSchema::new("Mountpoint for the filesystem root.").schema()
|
||||
),
|
||||
(
|
||||
"verbose",
|
||||
true,
|
||||
&BooleanSchema::new("Verbose output, keeps process running in foreground (for debugging).")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-mt",
|
||||
true,
|
||||
&BooleanSchema::new("Run in single threaded mode (for debugging).")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_DUMP_ARCHIVE: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&dump_archive),
|
||||
&ObjectSchema::new(
|
||||
"List the contents of an archive.",
|
||||
&sorted!([
|
||||
( "archive", false, &StringSchema::new("Archive name.").schema()),
|
||||
( "verbose", true, &BooleanSchema::new("Verbose output.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
])
|
||||
)
|
||||
);
|
||||
if verbose {
|
||||
println!("{}", format_single_line_entry(&entry));
|
||||
} else {
|
||||
println!("{:?}", entry.path());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("create", CliCommand::new(&API_METHOD_CREATE_ARCHIVE)
|
||||
.insert(
|
||||
"create",
|
||||
CliCommand::new(&API_METHOD_CREATE_ARCHIVE)
|
||||
.arg_param(&["archive", "source"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("source", tools::complete_file_name)
|
||||
.completion_cb("source", tools::complete_file_name),
|
||||
)
|
||||
.insert("extract", CliCommand::new(&API_METHOD_EXTRACT_ARCHIVE)
|
||||
.insert(
|
||||
"extract",
|
||||
CliCommand::new(&API_METHOD_EXTRACT_ARCHIVE)
|
||||
.arg_param(&["archive", "target"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("target", tools::complete_file_name)
|
||||
.completion_cb("files-from", tools::complete_file_name)
|
||||
.completion_cb("files-from", tools::complete_file_name),
|
||||
)
|
||||
.insert("mount", CliCommand::new(&API_METHOD_MOUNT_ARCHIVE)
|
||||
.insert(
|
||||
"mount",
|
||||
CliCommand::new(&API_METHOD_MOUNT_ARCHIVE)
|
||||
.arg_param(&["archive", "mountpoint"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("mountpoint", tools::complete_file_name)
|
||||
.completion_cb("mountpoint", tools::complete_file_name),
|
||||
)
|
||||
.insert("list", CliCommand::new(&API_METHOD_DUMP_ARCHIVE)
|
||||
.insert(
|
||||
"list",
|
||||
CliCommand::new(&API_METHOD_DUMP_ARCHIVE)
|
||||
.arg_param(&["archive"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("archive", tools::complete_file_name),
|
||||
);
|
||||
|
||||
let rpcenv = CliEnvironment::new();
|
||||
run_cli_command(cmd_def, rpcenv, None);
|
||||
run_cli_command(cmd_def, rpcenv, Some(|future| {
|
||||
proxmox_backup::tools::runtime::main(future)
|
||||
}));
|
||||
}
|
||||
|
@ -3,8 +3,8 @@
|
||||
//! This library implements the client side to access the backups
|
||||
//! server using https.
|
||||
|
||||
pub mod pipe_to_stream;
|
||||
mod merge_known_chunks;
|
||||
pub mod pipe_to_stream;
|
||||
|
||||
mod http_client;
|
||||
pub use http_client::*;
|
||||
@ -24,9 +24,6 @@ pub use remote_chunk_reader::*;
|
||||
mod pxar_backup_stream;
|
||||
pub use pxar_backup_stream::*;
|
||||
|
||||
mod pxar_decode_writer;
|
||||
pub use pxar_decode_writer::*;
|
||||
|
||||
mod backup_repo;
|
||||
pub use backup_repo::*;
|
||||
|
||||
|
@ -91,7 +91,7 @@ impl BackupReader {
|
||||
&self,
|
||||
file_name: &str,
|
||||
output: W,
|
||||
) -> Result<W, Error> {
|
||||
) -> Result<(), Error> {
|
||||
let path = "download";
|
||||
let param = json!({ "file-name": file_name });
|
||||
self.h2.download(path, Some(param), output).await
|
||||
@ -103,7 +103,7 @@ impl BackupReader {
|
||||
pub async fn speedtest<W: Write + Send>(
|
||||
&self,
|
||||
output: W,
|
||||
) -> Result<W, Error> {
|
||||
) -> Result<(), Error> {
|
||||
self.h2.download("speedtest", None, output).await
|
||||
}
|
||||
|
||||
@ -112,7 +112,7 @@ impl BackupReader {
|
||||
&self,
|
||||
digest: &[u8; 32],
|
||||
output: W,
|
||||
) -> Result<W, Error> {
|
||||
) -> Result<(), Error> {
|
||||
let path = "chunk";
|
||||
let param = json!({ "digest": digest_to_hex(digest) });
|
||||
self.h2.download(path, Some(param), output).await
|
||||
@ -127,7 +127,8 @@ impl BackupReader {
|
||||
|
||||
use std::convert::TryFrom;
|
||||
|
||||
let raw_data = self.download(MANIFEST_BLOB_NAME, Vec::with_capacity(64*1024)).await?;
|
||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
blob.verify_crc()?;
|
||||
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
@ -146,13 +147,13 @@ impl BackupReader {
|
||||
name: &str,
|
||||
) -> Result<DataBlobReader<File>, Error> {
|
||||
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let mut tmpfile = self.download(name, tmpfile).await?;
|
||||
self.download(name, &mut tmpfile).await?;
|
||||
|
||||
let (csum, size) = compute_file_csum(&mut tmpfile)?;
|
||||
manifest.verify_file(name, &csum, size)?;
|
||||
@ -172,13 +173,13 @@ impl BackupReader {
|
||||
name: &str,
|
||||
) -> Result<DynamicIndexReader, Error> {
|
||||
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let tmpfile = self.download(name, tmpfile).await?;
|
||||
self.download(name, &mut tmpfile).await?;
|
||||
|
||||
let index = DynamicIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?;
|
||||
@ -200,13 +201,13 @@ impl BackupReader {
|
||||
name: &str,
|
||||
) -> Result<FixedIndexReader, Error> {
|
||||
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let tmpfile = self.download(name, tmpfile).await?;
|
||||
self.download(name, &mut tmpfile).await?;
|
||||
|
||||
let index = FixedIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?;
|
||||
|
@ -1,4 +1,5 @@
|
||||
use std::collections::HashSet;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
@ -22,6 +23,7 @@ pub struct BackupWriter {
|
||||
h2: H2Client,
|
||||
abort: AbortHandle,
|
||||
verbose: bool,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
}
|
||||
|
||||
impl Drop for BackupWriter {
|
||||
@ -38,12 +40,13 @@ pub struct BackupStats {
|
||||
|
||||
impl BackupWriter {
|
||||
|
||||
fn new(h2: H2Client, abort: AbortHandle, verbose: bool) -> Arc<Self> {
|
||||
Arc::new(Self { h2, abort, verbose })
|
||||
fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>, verbose: bool) -> Arc<Self> {
|
||||
Arc::new(Self { h2, abort, crypt_config, verbose })
|
||||
}
|
||||
|
||||
pub async fn start(
|
||||
client: HttpClient,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
datastore: &str,
|
||||
backup_type: &str,
|
||||
backup_id: &str,
|
||||
@ -64,7 +67,7 @@ impl BackupWriter {
|
||||
|
||||
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!())).await?;
|
||||
|
||||
Ok(BackupWriter::new(h2, abort, debug))
|
||||
Ok(BackupWriter::new(h2, abort, crypt_config, debug))
|
||||
}
|
||||
|
||||
pub async fn get(
|
||||
@ -159,16 +162,19 @@ impl BackupWriter {
|
||||
&self,
|
||||
data: Vec<u8>,
|
||||
file_name: &str,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
compress: bool,
|
||||
sign_only: bool,
|
||||
crypt_or_sign: Option<bool>,
|
||||
) -> Result<BackupStats, Error> {
|
||||
|
||||
let blob = if let Some(ref crypt_config) = crypt_config {
|
||||
if sign_only {
|
||||
DataBlob::create_signed(&data, crypt_config, compress)?
|
||||
} else {
|
||||
let blob = if let Some(ref crypt_config) = self.crypt_config {
|
||||
if let Some(encrypt) = crypt_or_sign {
|
||||
if encrypt {
|
||||
DataBlob::encode(&data, Some(crypt_config), compress)?
|
||||
} else {
|
||||
DataBlob::create_signed(&data, crypt_config, compress)?
|
||||
}
|
||||
} else {
|
||||
DataBlob::encode(&data, None, compress)?
|
||||
}
|
||||
} else {
|
||||
DataBlob::encode(&data, None, compress)?
|
||||
@ -187,8 +193,8 @@ impl BackupWriter {
|
||||
&self,
|
||||
src_path: P,
|
||||
file_name: &str,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
compress: bool,
|
||||
crypt_or_sign: Option<bool>,
|
||||
) -> Result<BackupStats, Error> {
|
||||
|
||||
let src_path = src_path.as_ref();
|
||||
@ -203,25 +209,16 @@ impl BackupWriter {
|
||||
.await
|
||||
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
|
||||
|
||||
let blob = DataBlob::encode(&contents, crypt_config.as_ref().map(AsRef::as_ref), compress)?;
|
||||
let raw_data = blob.into_inner();
|
||||
let size = raw_data.len() as u64;
|
||||
let csum = openssl::sha::sha256(&raw_data);
|
||||
let param = json!({
|
||||
"encoded-size": size,
|
||||
"file-name": file_name,
|
||||
});
|
||||
self.h2.upload("POST", "blob", Some(param), "application/octet-stream", raw_data).await?;
|
||||
Ok(BackupStats { size, csum })
|
||||
self.upload_blob_from_data(contents, file_name, compress, crypt_or_sign).await
|
||||
}
|
||||
|
||||
pub async fn upload_stream(
|
||||
&self,
|
||||
previous_manifest: Option<Arc<BackupManifest>>,
|
||||
archive_name: &str,
|
||||
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
||||
prefix: &str,
|
||||
fixed_size: Option<u64>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
) -> Result<BackupStats, Error> {
|
||||
let known_chunks = Arc::new(Mutex::new(HashSet::new()));
|
||||
|
||||
@ -233,7 +230,18 @@ impl BackupWriter {
|
||||
let index_path = format!("{}_index", prefix);
|
||||
let close_path = format!("{}_close", prefix);
|
||||
|
||||
self.download_chunk_list(&index_path, archive_name, known_chunks.clone()).await?;
|
||||
if let Some(manifest) = previous_manifest {
|
||||
// try, but ignore errors
|
||||
match archive_type(archive_name) {
|
||||
Ok(ArchiveType::FixedIndex) => {
|
||||
let _ = self.download_previous_fixed_index(archive_name, &manifest, known_chunks.clone()).await;
|
||||
}
|
||||
Ok(ArchiveType::DynamicIndex) => {
|
||||
let _ = self.download_previous_dynamic_index(archive_name, &manifest, known_chunks.clone()).await;
|
||||
}
|
||||
_ => { /* do nothing */ }
|
||||
}
|
||||
}
|
||||
|
||||
let wid = self.h2.post(&index_path, Some(param)).await?.as_u64().unwrap();
|
||||
|
||||
@ -244,7 +252,7 @@ impl BackupWriter {
|
||||
stream,
|
||||
&prefix,
|
||||
known_chunks.clone(),
|
||||
crypt_config,
|
||||
self.crypt_config.clone(),
|
||||
self.verbose,
|
||||
)
|
||||
.await?;
|
||||
@ -374,41 +382,93 @@ impl BackupWriter {
|
||||
(verify_queue_tx, verify_result_rx)
|
||||
}
|
||||
|
||||
pub async fn download_chunk_list(
|
||||
pub async fn download_previous_fixed_index(
|
||||
&self,
|
||||
path: &str,
|
||||
archive_name: &str,
|
||||
manifest: &BackupManifest,
|
||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
) -> Result<FixedIndexReader, Error> {
|
||||
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let param = json!({ "archive-name": archive_name });
|
||||
let request = H2Client::request_builder("localhost", "GET", path, Some(param), None).unwrap();
|
||||
self.h2.download("previous", Some(param), &mut tmpfile).await?;
|
||||
|
||||
let h2request = self.h2.send_request(request, None).await?;
|
||||
let resp = h2request.await?;
|
||||
let index = FixedIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", archive_name, err))?;
|
||||
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(archive_name, &csum, size)?;
|
||||
|
||||
let status = resp.status();
|
||||
|
||||
if !status.is_success() {
|
||||
H2Client::h2api_response(resp).await?; // raise error
|
||||
unreachable!();
|
||||
}
|
||||
|
||||
let mut body = resp.into_body();
|
||||
let mut flow_control = body.flow_control().clone();
|
||||
|
||||
let mut stream = DigestListDecoder::new(body.map_err(Error::from));
|
||||
|
||||
while let Some(chunk) = stream.try_next().await? {
|
||||
let _ = flow_control.release_capacity(chunk.len());
|
||||
known_chunks.lock().unwrap().insert(chunk);
|
||||
// add index chunks to known chunks
|
||||
let mut known_chunks = known_chunks.lock().unwrap();
|
||||
for i in 0..index.index_count() {
|
||||
known_chunks.insert(*index.index_digest(i).unwrap());
|
||||
}
|
||||
|
||||
if self.verbose {
|
||||
println!("{}: known chunks list length is {}", archive_name, known_chunks.lock().unwrap().len());
|
||||
println!("{}: known chunks list length is {}", archive_name, index.index_count());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
pub async fn download_previous_dynamic_index(
|
||||
&self,
|
||||
archive_name: &str,
|
||||
manifest: &BackupManifest,
|
||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<DynamicIndexReader, Error> {
|
||||
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let param = json!({ "archive-name": archive_name });
|
||||
self.h2.download("previous", Some(param), &mut tmpfile).await?;
|
||||
|
||||
let index = DynamicIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read dynmamic index '{}' - {}", archive_name, err))?;
|
||||
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(archive_name, &csum, size)?;
|
||||
|
||||
// add index chunks to known chunks
|
||||
let mut known_chunks = known_chunks.lock().unwrap();
|
||||
for i in 0..index.index_count() {
|
||||
known_chunks.insert(*index.index_digest(i).unwrap());
|
||||
}
|
||||
|
||||
if self.verbose {
|
||||
println!("{}: known chunks list length is {}", archive_name, index.index_count());
|
||||
}
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
/// Download backup manifest (index.json) of last backup
|
||||
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
|
||||
|
||||
use std::convert::TryFrom;
|
||||
|
||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||
|
||||
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
|
||||
self.h2.download("previous", Some(param), &mut raw_data).await?;
|
||||
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
blob.verify_crc()?;
|
||||
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
let json: Value = serde_json::from_slice(&data[..])?;
|
||||
let manifest = BackupManifest::try_from(json)?;
|
||||
|
||||
Ok(manifest)
|
||||
}
|
||||
|
||||
fn upload_chunk_info_stream(
|
||||
|
@ -707,7 +707,7 @@ impl H2Client {
|
||||
path: &str,
|
||||
param: Option<Value>,
|
||||
mut output: W,
|
||||
) -> Result<W, Error> {
|
||||
) -> Result<(), Error> {
|
||||
let request = Self::request_builder("localhost", "GET", path, param, None).unwrap();
|
||||
|
||||
let response_future = self.send_request(request, None).await?;
|
||||
@ -727,7 +727,7 @@ impl H2Client {
|
||||
output.write_all(&chunk)?;
|
||||
}
|
||||
|
||||
Ok(output)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn upload(
|
||||
|
@ -34,7 +34,7 @@ async fn pull_index_chunks<I: IndexFile>(
|
||||
continue;
|
||||
}
|
||||
//worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||
let chunk = chunk_reader.read_raw_chunk(&digest)?;
|
||||
let chunk = chunk_reader.read_raw_chunk(&digest).await?;
|
||||
|
||||
target.insert_chunk(&chunk, &digest)?;
|
||||
}
|
||||
@ -47,13 +47,13 @@ async fn download_manifest(
|
||||
filename: &std::path::Path,
|
||||
) -> Result<std::fs::File, Error> {
|
||||
|
||||
let tmp_manifest_file = std::fs::OpenOptions::new()
|
||||
let mut tmp_manifest_file = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.read(true)
|
||||
.open(&filename)?;
|
||||
|
||||
let mut tmp_manifest_file = reader.download(MANIFEST_BLOB_NAME, tmp_manifest_file).await?;
|
||||
reader.download(MANIFEST_BLOB_NAME, &mut tmp_manifest_file).await?;
|
||||
|
||||
tmp_manifest_file.seek(SeekFrom::Start(0))?;
|
||||
|
||||
@ -77,13 +77,13 @@ async fn pull_single_archive(
|
||||
tmp_path.set_extension("tmp");
|
||||
|
||||
worker.log(format!("sync archive {}", archive_name));
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.read(true)
|
||||
.open(&tmp_path)?;
|
||||
|
||||
let tmpfile = reader.download(archive_name, tmpfile).await?;
|
||||
reader.download(archive_name, &mut tmpfile).await?;
|
||||
|
||||
match archive_type(archive_name)? {
|
||||
ArchiveType::DynamicIndex => {
|
||||
@ -124,7 +124,7 @@ async fn try_client_log_download(
|
||||
.open(&tmp_path)?;
|
||||
|
||||
// Note: be silent if there is no log - only log successful download
|
||||
if let Ok(_) = reader.download(CLIENT_LOG_BLOB_NAME, tmpfile).await {
|
||||
if let Ok(()) = reader.download(CLIENT_LOG_BLOB_NAME, tmpfile).await {
|
||||
if let Err(err) = std::fs::rename(&tmp_path, &path) {
|
||||
bail!("Atomic rename file {:?} failed - {}", path, err);
|
||||
}
|
||||
|
@ -9,12 +9,12 @@ use std::thread;
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::stream::Stream;
|
||||
|
||||
use nix::dir::Dir;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
use nix::dir::Dir;
|
||||
|
||||
use crate::pxar;
|
||||
use pathpatterns::MatchEntry;
|
||||
|
||||
use crate::backup::CatalogWriter;
|
||||
|
||||
/// Stream implementation to encode and upload .pxar archives.
|
||||
@ -29,7 +29,6 @@ pub struct PxarBackupStream {
|
||||
}
|
||||
|
||||
impl Drop for PxarBackupStream {
|
||||
|
||||
fn drop(&mut self) {
|
||||
self.rx = None;
|
||||
self.child.take().unwrap().join().unwrap();
|
||||
@ -37,45 +36,48 @@ impl Drop for PxarBackupStream {
|
||||
}
|
||||
|
||||
impl PxarBackupStream {
|
||||
|
||||
pub fn new<W: Write + Send + 'static>(
|
||||
mut dir: Dir,
|
||||
path: PathBuf,
|
||||
dir: Dir,
|
||||
_path: PathBuf,
|
||||
device_set: Option<HashSet<u64>>,
|
||||
verbose: bool,
|
||||
_verbose: bool,
|
||||
skip_lost_and_found: bool,
|
||||
catalog: Arc<Mutex<CatalogWriter<W>>>,
|
||||
exclude_pattern: Vec<pxar::MatchPattern>,
|
||||
patterns: Vec<MatchEntry>,
|
||||
entries_max: usize,
|
||||
) -> Result<Self, Error> {
|
||||
|
||||
let (tx, rx) = std::sync::mpsc::sync_channel(10);
|
||||
|
||||
let buffer_size = 256*1024;
|
||||
let buffer_size = 256 * 1024;
|
||||
|
||||
let error = Arc::new(Mutex::new(None));
|
||||
let error2 = error.clone();
|
||||
let child = std::thread::Builder::new()
|
||||
.name("PxarBackupStream".to_string())
|
||||
.spawn({
|
||||
let error = Arc::clone(&error);
|
||||
move || {
|
||||
let mut catalog_guard = catalog.lock().unwrap();
|
||||
let writer = std::io::BufWriter::with_capacity(
|
||||
buffer_size,
|
||||
crate::tools::StdChannelWriter::new(tx),
|
||||
);
|
||||
|
||||
let catalog = catalog.clone();
|
||||
let child = std::thread::Builder::new().name("PxarBackupStream".to_string()).spawn(move || {
|
||||
let mut guard = catalog.lock().unwrap();
|
||||
let mut writer = std::io::BufWriter::with_capacity(buffer_size, crate::tools::StdChannelWriter::new(tx));
|
||||
|
||||
if let Err(err) = pxar::Encoder::encode(
|
||||
path,
|
||||
&mut dir,
|
||||
&mut writer,
|
||||
Some(&mut *guard),
|
||||
let writer = pxar::encoder::sync::StandardWriter::new(writer);
|
||||
if let Err(err) = crate::pxar::create_archive(
|
||||
dir,
|
||||
writer,
|
||||
patterns,
|
||||
crate::pxar::Flags::DEFAULT,
|
||||
device_set,
|
||||
verbose,
|
||||
skip_lost_and_found,
|
||||
pxar::flags::DEFAULT,
|
||||
exclude_pattern,
|
||||
|_| Ok(()),
|
||||
entries_max,
|
||||
Some(&mut *catalog_guard),
|
||||
) {
|
||||
let mut error = error2.lock().unwrap();
|
||||
let mut error = error.lock().unwrap();
|
||||
*error = Some(err.to_string());
|
||||
}
|
||||
}
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
@ -91,23 +93,31 @@ impl PxarBackupStream {
|
||||
verbose: bool,
|
||||
skip_lost_and_found: bool,
|
||||
catalog: Arc<Mutex<CatalogWriter<W>>>,
|
||||
exclude_pattern: Vec<pxar::MatchPattern>,
|
||||
patterns: Vec<MatchEntry>,
|
||||
entries_max: usize,
|
||||
) -> Result<Self, Error> {
|
||||
|
||||
let dir = nix::dir::Dir::open(dirname, OFlag::O_DIRECTORY, Mode::empty())?;
|
||||
let path = std::path::PathBuf::from(dirname);
|
||||
|
||||
Self::new(dir, path, device_set, verbose, skip_lost_and_found, catalog, exclude_pattern, entries_max)
|
||||
Self::new(
|
||||
dir,
|
||||
path,
|
||||
device_set,
|
||||
verbose,
|
||||
skip_lost_and_found,
|
||||
catalog,
|
||||
patterns,
|
||||
entries_max,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for PxarBackupStream {
|
||||
|
||||
type Item = Result<Vec<u8>, Error>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
{ // limit lock scope
|
||||
{
|
||||
// limit lock scope
|
||||
let error = self.error.lock().unwrap();
|
||||
if let Some(ref msg) = *error {
|
||||
return Poll::Ready(Some(Err(format_err!("{}", msg))));
|
||||
|
@ -1,70 +0,0 @@
|
||||
use anyhow::{Error};
|
||||
|
||||
use std::thread;
|
||||
use std::os::unix::io::FromRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::io::Write;
|
||||
|
||||
use crate::pxar;
|
||||
|
||||
/// Writer implementation to deccode a .pxar archive (download).
|
||||
|
||||
pub struct PxarDecodeWriter {
|
||||
pipe: Option<std::fs::File>,
|
||||
child: Option<thread::JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Drop for PxarDecodeWriter {
|
||||
|
||||
fn drop(&mut self) {
|
||||
drop(self.pipe.take());
|
||||
self.child.take().unwrap().join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
impl PxarDecodeWriter {
|
||||
|
||||
pub fn new(base: &Path, verbose: bool) -> Result<Self, Error> {
|
||||
let (rx, tx) = nix::unistd::pipe()?;
|
||||
|
||||
let base = PathBuf::from(base);
|
||||
|
||||
let child = thread::spawn(move|| {
|
||||
let mut reader = unsafe { std::fs::File::from_raw_fd(rx) };
|
||||
let mut decoder = pxar::SequentialDecoder::new(&mut reader, pxar::flags::DEFAULT);
|
||||
decoder.set_callback(move |path| {
|
||||
if verbose {
|
||||
println!("{:?}", path);
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
|
||||
if let Err(err) = decoder.restore(&base, &Vec::new()) {
|
||||
eprintln!("pxar decode failed - {}", err);
|
||||
}
|
||||
});
|
||||
|
||||
let pipe = unsafe { std::fs::File::from_raw_fd(tx) };
|
||||
|
||||
Ok(Self { pipe: Some(pipe), child: Some(child) })
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for PxarDecodeWriter {
|
||||
|
||||
fn write(&mut self, buffer: &[u8]) -> Result<usize, std::io::Error> {
|
||||
let pipe = match self.pipe {
|
||||
Some(ref mut pipe) => pipe,
|
||||
None => unreachable!(),
|
||||
};
|
||||
pipe.write(buffer)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> Result<(), std::io::Error> {
|
||||
let pipe = match self.pipe {
|
||||
Some(ref mut pipe) => pipe,
|
||||
None => unreachable!(),
|
||||
};
|
||||
pipe.flush()
|
||||
}
|
||||
}
|
@ -1,10 +1,12 @@
|
||||
use std::future::Future;
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{Error};
|
||||
use anyhow::Error;
|
||||
|
||||
use super::BackupReader;
|
||||
use crate::backup::{ReadChunk, DataBlob, CryptConfig};
|
||||
use crate::backup::{AsyncReadChunk, CryptConfig, DataBlob, ReadChunk};
|
||||
use crate::tools::runtime::block_on;
|
||||
|
||||
/// Read chunks from remote host using ``BackupReader``
|
||||
@ -16,7 +18,6 @@ pub struct RemoteChunkReader {
|
||||
}
|
||||
|
||||
impl RemoteChunkReader {
|
||||
|
||||
/// Create a new instance.
|
||||
///
|
||||
/// Chunks listed in ``cache_hint`` are cached and kept in RAM.
|
||||
@ -25,39 +26,39 @@ impl RemoteChunkReader {
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
cache_hint: HashMap<[u8; 32], usize>,
|
||||
) -> Self {
|
||||
|
||||
Self { client, crypt_config, cache_hint, cache: HashMap::new() }
|
||||
Self {
|
||||
client,
|
||||
crypt_config,
|
||||
cache_hint,
|
||||
cache: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadChunk for RemoteChunkReader {
|
||||
pub async fn read_raw_chunk(&mut self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
|
||||
|
||||
fn read_raw_chunk(&mut self, digest:&[u8; 32]) -> Result<DataBlob, Error> {
|
||||
|
||||
let mut chunk_data = Vec::with_capacity(4*1024*1024);
|
||||
|
||||
//tokio::task::block_in_place(|| futures::executor::block_on(self.client.download_chunk(&digest, &mut chunk_data)))?;
|
||||
block_on(async {
|
||||
// download_chunk returns the writer back to us, but we need to return a 'static value
|
||||
self.client
|
||||
.download_chunk(&digest, &mut chunk_data)
|
||||
.await
|
||||
.map(drop)
|
||||
})?;
|
||||
.await?;
|
||||
|
||||
let chunk = DataBlob::from_raw(chunk_data)?;
|
||||
chunk.verify_crc()?;
|
||||
|
||||
Ok(chunk)
|
||||
}
|
||||
}
|
||||
|
||||
fn read_chunk(&mut self, digest:&[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||
impl ReadChunk for RemoteChunkReader {
|
||||
fn read_raw_chunk(&mut self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
block_on(Self::read_raw_chunk(self, digest))
|
||||
}
|
||||
|
||||
fn read_chunk(&mut self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||
if let Some(raw_data) = self.cache.get(digest) {
|
||||
return Ok(raw_data.to_vec());
|
||||
}
|
||||
|
||||
let chunk = self.read_raw_chunk(digest)?;
|
||||
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
@ -70,5 +71,37 @@ impl ReadChunk for RemoteChunkReader {
|
||||
|
||||
Ok(raw_data)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl AsyncReadChunk for RemoteChunkReader {
|
||||
fn read_raw_chunk<'a>(
|
||||
&'a mut self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
||||
Box::pin(Self::read_raw_chunk(self, digest))
|
||||
}
|
||||
|
||||
fn read_chunk<'a>(
|
||||
&'a mut self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
|
||||
Box::pin(async move {
|
||||
if let Some(raw_data) = self.cache.get(digest) {
|
||||
return Ok(raw_data.to_vec());
|
||||
}
|
||||
|
||||
let chunk = Self::read_raw_chunk(self, digest).await?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
|
||||
let use_cache = self.cache_hint.contains_key(digest);
|
||||
if use_cache {
|
||||
self.cache.insert(*digest, raw_data.to_vec());
|
||||
}
|
||||
|
||||
Ok(raw_data)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -190,7 +190,7 @@ pub fn check_acl_path(path: &str) -> Result<(), Error> {
|
||||
"system" => {
|
||||
if components_len == 1 { return Ok(()); }
|
||||
match components[1] {
|
||||
"log" | "status" | "tasks" | "time" => {
|
||||
"disks" | "log" | "status" | "tasks" | "time" => {
|
||||
if components_len == 2 { return Ok(()); }
|
||||
}
|
||||
"services" => { // /system/services/{service}
|
||||
|
@ -141,7 +141,7 @@ pub fn get_network_interfaces() -> Result<HashMap<String, bool>, Error> {
|
||||
|
||||
pub fn compute_file_diff(filename: &str, shadow: &str) -> Result<String, Error> {
|
||||
|
||||
let output = Command::new("/usr/bin/diff")
|
||||
let output = Command::new("diff")
|
||||
.arg("-b")
|
||||
.arg("-u")
|
||||
.arg(filename)
|
||||
@ -165,10 +165,10 @@ pub fn assert_ifupdown2_installed() -> Result<(), Error> {
|
||||
|
||||
pub fn network_reload() -> Result<(), Error> {
|
||||
|
||||
let output = Command::new("/sbin/ifreload")
|
||||
let output = Command::new("ifreload")
|
||||
.arg("-a")
|
||||
.output()
|
||||
.map_err(|err| format_err!("failed to execute '/sbin/ifreload' - {}", err))?;
|
||||
.map_err(|err| format_err!("failed to execute 'ifreload' - {}", err))?;
|
||||
|
||||
crate::tools::command_output(output, None)
|
||||
.map_err(|err| format_err!("ifreload failed: {}", err))?;
|
||||
|
@ -1,229 +0,0 @@
|
||||
//! Helpers to generate a binary search tree stored in an array from a
|
||||
//! sorted array.
|
||||
//!
|
||||
//! Specifically, for any given sorted array 'input' permute the
|
||||
//! array so that the following rule holds:
|
||||
//!
|
||||
//! For each array item with index i, the item at 2i+1 is smaller and
|
||||
//! the item 2i+2 is larger.
|
||||
//!
|
||||
//! This structure permits efficient (meaning: O(log(n)) binary
|
||||
//! searches: start with item i=0 (i.e. the root of the BST), compare
|
||||
//! the value with the searched item, if smaller proceed at item
|
||||
//! 2i+1, if larger proceed at item 2i+2, and repeat, until either
|
||||
//! the item is found, or the indexes grow beyond the array size,
|
||||
//! which means the entry does not exist.
|
||||
//!
|
||||
//! Effectively this implements bisection, but instead of jumping
|
||||
//! around wildly in the array during a single search we only search
|
||||
//! with strictly monotonically increasing indexes.
|
||||
//!
|
||||
//! Algorithm is from casync (camakebst.c), simplified and optimized
|
||||
//! for rust. Permutation function originally by L. Bressel, 2017. We
|
||||
//! pass permutation info to user provided callback, which actually
|
||||
//! implements the data copy.
|
||||
//!
|
||||
//! The Wikipedia Artikel for [Binary
|
||||
//! Heap](https://en.wikipedia.org/wiki/Binary_heap) gives a short
|
||||
//! intro howto store binary trees using an array.
|
||||
|
||||
use std::cmp::Ordering;
|
||||
|
||||
#[allow(clippy::many_single_char_names)]
|
||||
fn copy_binary_search_tree_inner<F: FnMut(usize, usize)>(
|
||||
copy_func: &mut F,
|
||||
// we work on input array input[o..o+n]
|
||||
n: usize,
|
||||
o: usize,
|
||||
e: usize,
|
||||
i: usize,
|
||||
) {
|
||||
let p = 1 << e;
|
||||
|
||||
let t = p + (p>>1) - 1;
|
||||
|
||||
let m = if n > t {
|
||||
// |...........p.............t....n........(2p)|
|
||||
p - 1
|
||||
} else {
|
||||
// |...........p.....n.......t.............(2p)|
|
||||
p - 1 - (t-n)
|
||||
};
|
||||
|
||||
(copy_func)(o+m, i);
|
||||
|
||||
if m > 0 {
|
||||
copy_binary_search_tree_inner(copy_func, m, o, e-1, i*2+1);
|
||||
}
|
||||
|
||||
if (m + 1) < n {
|
||||
copy_binary_search_tree_inner(copy_func, n-m-1, o+m+1, e-1, i*2+2);
|
||||
}
|
||||
}
|
||||
|
||||
/// This function calls the provided `copy_func()` with the permutation
|
||||
/// info.
|
||||
///
|
||||
/// ```
|
||||
/// # use proxmox_backup::pxar::copy_binary_search_tree;
|
||||
/// copy_binary_search_tree(5, |src, dest| {
|
||||
/// println!("Copy {} to {}", src, dest);
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
/// This will produce the following output:
|
||||
///
|
||||
/// ```no-compile
|
||||
/// Copy 3 to 0
|
||||
/// Copy 1 to 1
|
||||
/// Copy 0 to 3
|
||||
/// Copy 2 to 4
|
||||
/// Copy 4 to 2
|
||||
/// ```
|
||||
///
|
||||
/// So this generates the following permutation: `[3,1,4,0,2]`.
|
||||
|
||||
pub fn copy_binary_search_tree<F: FnMut(usize, usize)>(
|
||||
n: usize,
|
||||
mut copy_func: F,
|
||||
) {
|
||||
if n == 0 { return };
|
||||
let e = (64 - n.leading_zeros() - 1) as usize; // fast log2(n)
|
||||
|
||||
copy_binary_search_tree_inner(&mut copy_func, n, 0, e, 0);
|
||||
}
|
||||
|
||||
|
||||
/// This function searches for the index where the comparison by the provided
|
||||
/// `compare()` function returns `Ordering::Equal`.
|
||||
/// The order of the comparison matters (noncommutative) and should be search
|
||||
/// value compared to value at given index as shown in the examples.
|
||||
/// The parameter `skip_multiples` defines the number of matches to ignore while
|
||||
/// searching before returning the index in order to lookup duplicate entries in
|
||||
/// the tree.
|
||||
///
|
||||
/// ```
|
||||
/// # use proxmox_backup::pxar::{copy_binary_search_tree, search_binary_tree_by};
|
||||
/// let mut vals = vec![0,1,2,2,2,3,4,5,6,6,7,8,8,8];
|
||||
///
|
||||
/// let clone = vals.clone();
|
||||
/// copy_binary_search_tree(vals.len(), |s, d| {
|
||||
/// vals[d] = clone[s];
|
||||
/// });
|
||||
/// let should_be = vec![5,2,8,1,3,6,8,0,2,2,4,6,7,8];
|
||||
/// assert_eq!(vals, should_be);
|
||||
///
|
||||
/// let find = 8;
|
||||
/// let skip_multiples = 0;
|
||||
/// let idx = search_binary_tree_by(0, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert_eq!(idx, Some(2));
|
||||
///
|
||||
/// let find = 8;
|
||||
/// let skip_multiples = 1;
|
||||
/// let idx = search_binary_tree_by(2, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert_eq!(idx, Some(6));
|
||||
///
|
||||
/// let find = 8;
|
||||
/// let skip_multiples = 1;
|
||||
/// let idx = search_binary_tree_by(6, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert_eq!(idx, Some(13));
|
||||
///
|
||||
/// let find = 5;
|
||||
/// let skip_multiples = 1;
|
||||
/// let idx = search_binary_tree_by(0, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert!(idx.is_none());
|
||||
///
|
||||
/// let find = 5;
|
||||
/// let skip_multiples = 0;
|
||||
/// // if start index is equal to the array length, `None` is returned.
|
||||
/// let idx = search_binary_tree_by(vals.len(), vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert!(idx.is_none());
|
||||
///
|
||||
/// let find = 5;
|
||||
/// let skip_multiples = 0;
|
||||
/// // if start index is larger than length, `None` is returned.
|
||||
/// let idx = search_binary_tree_by(vals.len() + 1, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert!(idx.is_none());
|
||||
/// ```
|
||||
|
||||
pub fn search_binary_tree_by<F: Copy + Fn(usize) -> Ordering>(
|
||||
start: usize,
|
||||
size: usize,
|
||||
skip_multiples: usize,
|
||||
compare: F
|
||||
) -> Option<usize> {
|
||||
if start >= size {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut skip = skip_multiples;
|
||||
let cmp = compare(start);
|
||||
if cmp == Ordering::Equal {
|
||||
if skip == 0 {
|
||||
// Found matching hash and want this one
|
||||
return Some(start);
|
||||
}
|
||||
// Found matching hash, but we should skip the first `skip_multiple`,
|
||||
// so continue search with reduced skip count.
|
||||
skip -= 1;
|
||||
}
|
||||
|
||||
if cmp == Ordering::Less || cmp == Ordering::Equal {
|
||||
let res = search_binary_tree_by(2 * start + 1, size, skip, compare);
|
||||
if res.is_some() {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
if cmp == Ordering::Greater || cmp == Ordering::Equal {
|
||||
let res = search_binary_tree_by(2 * start + 2, size, skip, compare);
|
||||
if res.is_some() {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binary_search_tree() {
|
||||
|
||||
fn run_test(len: usize) -> Vec<usize> {
|
||||
|
||||
const MARKER: usize = 0xfffffff;
|
||||
let mut output = vec![];
|
||||
for _i in 0..len { output.push(MARKER); }
|
||||
copy_binary_search_tree(len, |s, d| {
|
||||
assert!(output[d] == MARKER);
|
||||
output[d] = s;
|
||||
});
|
||||
if len < 32 { println!("GOT:{}:{:?}", len, output); }
|
||||
for i in 0..len {
|
||||
assert!(output[i] != MARKER);
|
||||
}
|
||||
output
|
||||
}
|
||||
|
||||
assert!(run_test(0).len() == 0);
|
||||
assert!(run_test(1) == [0]);
|
||||
assert!(run_test(2) == [1,0]);
|
||||
assert!(run_test(3) == [1,0,2]);
|
||||
assert!(run_test(4) == [2,1,3,0]);
|
||||
assert!(run_test(5) == [3,1,4,0,2]);
|
||||
assert!(run_test(6) == [3,1,5,0,2,4]);
|
||||
assert!(run_test(7) == [3,1,5,0,2,4,6]);
|
||||
assert!(run_test(8) == [4,2,6,1,3,5,7,0]);
|
||||
assert!(run_test(9) == [5,3,7,1,4,6,8,0,2]);
|
||||
assert!(run_test(10) == [6,3,8,1,5,7,9,0,2,4]);
|
||||
assert!(run_test(11) == [7,3,9,1,5,8,10,0,2,4,6]);
|
||||
assert!(run_test(12) == [7,3,10,1,5,9,11,0,2,4,6,8]);
|
||||
assert!(run_test(13) == [7,3,11,1,5,9,12,0,2,4,6,8,10]);
|
||||
assert!(run_test(14) == [7,3,11,1,5,9,13,0,2,4,6,8,10,12]);
|
||||
assert!(run_test(15) == [7,3,11,1,5,9,13,0,2,4,6,8,10,12,14]);
|
||||
assert!(run_test(16) == [8,4,12,2,6,10,14,1,3,5,7,9,11,13,15,0]);
|
||||
assert!(run_test(17) == [9,5,13,3,7,11,15,1,4,6,8,10,12,14,16,0,2]);
|
||||
|
||||
for len in 18..1000 {
|
||||
run_test(len);
|
||||
}
|
||||
}
|
998
src/pxar/create.rs
Normal file
998
src/pxar/create.rs
Normal file
@ -0,0 +1,998 @@
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::{CStr, CString, OsStr};
|
||||
use std::fmt;
|
||||
use std::io::{self, Read, Write};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use nix::dir::Dir;
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::{FileStat, Mode};
|
||||
|
||||
use pathpatterns::{MatchEntry, MatchList, MatchType, PatternFlag};
|
||||
use pxar::Metadata;
|
||||
use pxar::encoder::LinkOffset;
|
||||
|
||||
use proxmox::c_str;
|
||||
use proxmox::sys::error::SysError;
|
||||
use proxmox::tools::fd::RawFdNum;
|
||||
use proxmox::tools::vec;
|
||||
|
||||
use crate::pxar::catalog::BackupCatalogWriter;
|
||||
use crate::pxar::Flags;
|
||||
use crate::pxar::tools::assert_single_path_component;
|
||||
use crate::tools::{acl, fs, xattr, Fd};
|
||||
|
||||
fn detect_fs_type(fd: RawFd) -> Result<i64, Error> {
|
||||
let mut fs_stat = std::mem::MaybeUninit::uninit();
|
||||
let res = unsafe { libc::fstatfs(fd, fs_stat.as_mut_ptr()) };
|
||||
Errno::result(res)?;
|
||||
let fs_stat = unsafe { fs_stat.assume_init() };
|
||||
|
||||
Ok(fs_stat.f_type)
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub fn is_virtual_file_system(magic: i64) -> bool {
|
||||
use proxmox::sys::linux::magic::*;
|
||||
|
||||
match magic {
|
||||
BINFMTFS_MAGIC |
|
||||
CGROUP2_SUPER_MAGIC |
|
||||
CGROUP_SUPER_MAGIC |
|
||||
CONFIGFS_MAGIC |
|
||||
DEBUGFS_MAGIC |
|
||||
DEVPTS_SUPER_MAGIC |
|
||||
EFIVARFS_MAGIC |
|
||||
FUSE_CTL_SUPER_MAGIC |
|
||||
HUGETLBFS_MAGIC |
|
||||
MQUEUE_MAGIC |
|
||||
NFSD_MAGIC |
|
||||
PROC_SUPER_MAGIC |
|
||||
PSTOREFS_MAGIC |
|
||||
RPCAUTH_GSSMAGIC |
|
||||
SECURITYFS_MAGIC |
|
||||
SELINUX_MAGIC |
|
||||
SMACK_MAGIC |
|
||||
SYSFS_MAGIC => true,
|
||||
_ => false
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ArchiveError {
|
||||
path: PathBuf,
|
||||
error: Error,
|
||||
}
|
||||
|
||||
impl ArchiveError {
|
||||
fn new(path: PathBuf, error: Error) -> Self {
|
||||
Self { path, error }
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for ArchiveError {}
|
||||
|
||||
impl fmt::Display for ArchiveError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "error at {:?}: {}", self.path, self.error)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Eq, PartialEq, Hash)]
|
||||
struct HardLinkInfo {
|
||||
st_dev: u64,
|
||||
st_ino: u64,
|
||||
}
|
||||
|
||||
/// In case we want to collect them or redirect them we can just add this here:
|
||||
struct ErrorReporter;
|
||||
|
||||
impl std::io::Write for ErrorReporter {
|
||||
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
|
||||
std::io::stderr().write(data)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
std::io::stderr().flush()
|
||||
}
|
||||
}
|
||||
|
||||
struct Archiver<'a, 'b> {
|
||||
feature_flags: Flags,
|
||||
fs_feature_flags: Flags,
|
||||
fs_magic: i64,
|
||||
patterns: Vec<MatchEntry>,
|
||||
callback: &'a mut dyn FnMut(&Path) -> Result<(), Error>,
|
||||
catalog: Option<&'b mut dyn BackupCatalogWriter>,
|
||||
path: PathBuf,
|
||||
entry_counter: usize,
|
||||
entry_limit: usize,
|
||||
current_st_dev: libc::dev_t,
|
||||
device_set: Option<HashSet<u64>>,
|
||||
hardlinks: HashMap<HardLinkInfo, (PathBuf, LinkOffset)>,
|
||||
errors: ErrorReporter,
|
||||
file_copy_buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
type Encoder<'a, 'b> = pxar::encoder::Encoder<'a, &'b mut dyn pxar::encoder::SeqWrite>;
|
||||
|
||||
pub fn create_archive<T, F>(
|
||||
source_dir: Dir,
|
||||
mut writer: T,
|
||||
mut patterns: Vec<MatchEntry>,
|
||||
feature_flags: Flags,
|
||||
mut device_set: Option<HashSet<u64>>,
|
||||
skip_lost_and_found: bool,
|
||||
mut callback: F,
|
||||
entry_limit: usize,
|
||||
catalog: Option<&mut dyn BackupCatalogWriter>,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: pxar::encoder::SeqWrite,
|
||||
F: FnMut(&Path) -> Result<(), Error>,
|
||||
{
|
||||
let fs_magic = detect_fs_type(source_dir.as_raw_fd())?;
|
||||
if is_virtual_file_system(fs_magic) {
|
||||
bail!("refusing to backup a virtual file system");
|
||||
}
|
||||
|
||||
let fs_feature_flags = Flags::from_magic(fs_magic);
|
||||
|
||||
let stat = nix::sys::stat::fstat(source_dir.as_raw_fd())?;
|
||||
let metadata = get_metadata(
|
||||
source_dir.as_raw_fd(),
|
||||
&stat,
|
||||
feature_flags & fs_feature_flags,
|
||||
fs_magic,
|
||||
)
|
||||
.map_err(|err| format_err!("failed to get metadata for source directory: {}", err))?;
|
||||
|
||||
if let Some(ref mut set) = device_set {
|
||||
set.insert(stat.st_dev);
|
||||
}
|
||||
|
||||
let writer = &mut writer as &mut dyn pxar::encoder::SeqWrite;
|
||||
let mut encoder = Encoder::new(writer, &metadata)?;
|
||||
|
||||
if skip_lost_and_found {
|
||||
patterns.push(MatchEntry::parse_pattern(
|
||||
"**/lost+found",
|
||||
PatternFlag::PATH_NAME,
|
||||
MatchType::Exclude,
|
||||
)?);
|
||||
}
|
||||
|
||||
let mut archiver = Archiver {
|
||||
feature_flags,
|
||||
fs_feature_flags,
|
||||
fs_magic,
|
||||
callback: &mut callback,
|
||||
patterns,
|
||||
catalog,
|
||||
path: PathBuf::new(),
|
||||
entry_counter: 0,
|
||||
entry_limit,
|
||||
current_st_dev: stat.st_dev,
|
||||
device_set,
|
||||
hardlinks: HashMap::new(),
|
||||
errors: ErrorReporter,
|
||||
file_copy_buffer: vec::undefined(4 * 1024 * 1024),
|
||||
};
|
||||
|
||||
archiver.archive_dir_contents(&mut encoder, source_dir, true)?;
|
||||
encoder.finish()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct FileListEntry {
|
||||
name: CString,
|
||||
path: PathBuf,
|
||||
stat: FileStat,
|
||||
}
|
||||
|
||||
impl<'a, 'b> Archiver<'a, 'b> {
|
||||
/// Get the currently effective feature flags. (Requested flags masked by the file system
|
||||
/// feature flags).
|
||||
fn flags(&self) -> Flags {
|
||||
self.feature_flags & self.fs_feature_flags
|
||||
}
|
||||
|
||||
fn wrap_err(&self, err: Error) -> Error {
|
||||
if err.downcast_ref::<ArchiveError>().is_some() {
|
||||
err
|
||||
} else {
|
||||
ArchiveError::new(self.path.clone(), err).into()
|
||||
}
|
||||
}
|
||||
|
||||
fn archive_dir_contents(
|
||||
&mut self,
|
||||
encoder: &mut Encoder,
|
||||
mut dir: Dir,
|
||||
is_root: bool,
|
||||
) -> Result<(), Error> {
|
||||
let entry_counter = self.entry_counter;
|
||||
|
||||
let old_patterns_count = self.patterns.len();
|
||||
self.read_pxar_excludes(dir.as_raw_fd())?;
|
||||
|
||||
let file_list = self.generate_directory_file_list(&mut dir, is_root)?;
|
||||
|
||||
let dir_fd = dir.as_raw_fd();
|
||||
|
||||
let old_path = std::mem::take(&mut self.path);
|
||||
|
||||
for file_entry in file_list {
|
||||
let file_name = file_entry.name.to_bytes();
|
||||
|
||||
if is_root && file_name == b".pxarexclude-cli" {
|
||||
self.encode_pxarexclude_cli(encoder, &file_entry.name)?;
|
||||
continue;
|
||||
}
|
||||
|
||||
(self.callback)(Path::new(OsStr::from_bytes(file_name)))?;
|
||||
self.path = file_entry.path;
|
||||
self.add_entry(encoder, dir_fd, &file_entry.name, &file_entry.stat)
|
||||
.map_err(|err| self.wrap_err(err))?;
|
||||
}
|
||||
self.path = old_path;
|
||||
self.entry_counter = entry_counter;
|
||||
self.patterns.truncate(old_patterns_count);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// openat() wrapper which allows but logs `EACCES` and turns `ENOENT` into `None`.
|
||||
fn open_file(
|
||||
&mut self,
|
||||
parent: RawFd,
|
||||
file_name: &CStr,
|
||||
oflags: OFlag,
|
||||
) -> Result<Option<Fd>, Error> {
|
||||
match Fd::openat(
|
||||
&unsafe { RawFdNum::from_raw_fd(parent) },
|
||||
file_name,
|
||||
oflags,
|
||||
Mode::empty(),
|
||||
) {
|
||||
Ok(fd) => Ok(Some(fd)),
|
||||
Err(nix::Error::Sys(Errno::ENOENT)) => Ok(None),
|
||||
Err(nix::Error::Sys(Errno::EACCES)) => {
|
||||
write!(self.errors, "failed to open file: {:?}: access denied", file_name)?;
|
||||
Ok(None)
|
||||
}
|
||||
Err(other) => Err(Error::from(other)),
|
||||
}
|
||||
}
|
||||
|
||||
fn read_pxar_excludes(&mut self, parent: RawFd) -> Result<(), Error> {
|
||||
let fd = self.open_file(
|
||||
parent,
|
||||
c_str!(".pxarexclude"),
|
||||
OFlag::O_RDONLY | OFlag::O_CLOEXEC | OFlag::O_NOCTTY,
|
||||
)?;
|
||||
|
||||
let old_pattern_count = self.patterns.len();
|
||||
|
||||
if let Some(fd) = fd {
|
||||
let file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
|
||||
|
||||
use io::BufRead;
|
||||
for line in io::BufReader::new(file).lines() {
|
||||
let line = match line {
|
||||
Ok(line) => line,
|
||||
Err(err) => {
|
||||
let _ = write!(
|
||||
self.errors,
|
||||
"ignoring .pxarexclude after read error in {:?}: {}",
|
||||
self.path,
|
||||
err,
|
||||
);
|
||||
self.patterns.truncate(old_pattern_count);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let line = line.trim();
|
||||
|
||||
if line.is_empty() || line.starts_with('#') {
|
||||
continue;
|
||||
}
|
||||
|
||||
match MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, MatchType::Exclude) {
|
||||
Ok(pattern) => self.patterns.push(pattern),
|
||||
Err(err) => {
|
||||
let _ = write!(self.errors, "bad pattern in {:?}: {}", self.path, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn encode_pxarexclude_cli(
|
||||
&mut self,
|
||||
encoder: &mut Encoder,
|
||||
file_name: &CStr,
|
||||
) -> Result<(), Error> {
|
||||
let content = generate_pxar_excludes_cli(&self.patterns);
|
||||
|
||||
if let Some(ref mut catalog) = self.catalog {
|
||||
catalog.add_file(file_name, content.len() as u64, 0)?;
|
||||
}
|
||||
|
||||
let mut metadata = Metadata::default();
|
||||
metadata.stat.mode = pxar::format::mode::IFREG | 0o600;
|
||||
|
||||
let mut file = encoder.create_file(&metadata, ".pxarexclude-cli", content.len() as u64)?;
|
||||
file.write_all(&content)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn generate_directory_file_list(
|
||||
&mut self,
|
||||
dir: &mut Dir,
|
||||
is_root: bool,
|
||||
) -> Result<Vec<FileListEntry>, Error> {
|
||||
let dir_fd = dir.as_raw_fd();
|
||||
|
||||
let mut file_list = Vec::new();
|
||||
|
||||
if is_root && !self.patterns.is_empty() {
|
||||
file_list.push(FileListEntry {
|
||||
name: CString::new(".pxarexclude-cli").unwrap(),
|
||||
path: PathBuf::new(),
|
||||
stat: unsafe { std::mem::zeroed() },
|
||||
});
|
||||
}
|
||||
|
||||
for file in dir.iter() {
|
||||
let file = file?;
|
||||
|
||||
let file_name = file.file_name().to_owned();
|
||||
let file_name_bytes = file_name.to_bytes();
|
||||
if file_name_bytes == b"." || file_name_bytes == b".." {
|
||||
continue;
|
||||
}
|
||||
|
||||
if is_root && file_name_bytes == b".pxarexclude-cli" {
|
||||
continue;
|
||||
}
|
||||
|
||||
if file_name_bytes == b".pxarexclude" {
|
||||
continue;
|
||||
}
|
||||
|
||||
let os_file_name = OsStr::from_bytes(file_name_bytes);
|
||||
assert_single_path_component(os_file_name)?;
|
||||
let full_path = self.path.join(os_file_name);
|
||||
|
||||
let stat = match nix::sys::stat::fstatat(
|
||||
dir_fd,
|
||||
file_name.as_c_str(),
|
||||
nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW,
|
||||
) {
|
||||
Ok(stat) => stat,
|
||||
Err(ref err) if err.not_found() => continue,
|
||||
Err(err) => bail!("stat failed on {:?}: {}", full_path, err),
|
||||
};
|
||||
|
||||
if self
|
||||
.patterns
|
||||
.matches(full_path.as_os_str().as_bytes(), Some(stat.st_mode as u32))
|
||||
== Some(MatchType::Exclude)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
self.entry_counter += 1;
|
||||
if self.entry_counter > self.entry_limit {
|
||||
bail!("exceeded allowed number of file entries (> {})",self.entry_limit);
|
||||
}
|
||||
|
||||
file_list.push(FileListEntry {
|
||||
name: file_name,
|
||||
path: full_path,
|
||||
stat
|
||||
});
|
||||
}
|
||||
|
||||
file_list.sort_unstable_by(|a, b| a.name.cmp(&b.name));
|
||||
|
||||
Ok(file_list)
|
||||
}
|
||||
|
||||
fn report_vanished_file(&mut self) -> Result<(), Error> {
|
||||
write!(self.errors, "warning: file vanished while reading: {:?}", self.path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn report_file_shrunk_while_reading(&mut self) -> Result<(), Error> {
|
||||
write!(
|
||||
self.errors,
|
||||
"warning: file size shrunk while reading: {:?}, file will be padded with zeros!",
|
||||
self.path,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn report_file_grew_while_reading(&mut self) -> Result<(), Error> {
|
||||
write!(
|
||||
self.errors,
|
||||
"warning: file size increased while reading: {:?}, file will be truncated!",
|
||||
self.path,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_entry(
|
||||
&mut self,
|
||||
encoder: &mut Encoder,
|
||||
parent: RawFd,
|
||||
c_file_name: &CStr,
|
||||
stat: &FileStat,
|
||||
) -> Result<(), Error> {
|
||||
use pxar::format::mode;
|
||||
|
||||
let file_mode = stat.st_mode & libc::S_IFMT;
|
||||
let open_mode = if !(file_mode == libc::S_IFREG || file_mode == libc::S_IFDIR) {
|
||||
OFlag::O_PATH
|
||||
} else {
|
||||
OFlag::empty()
|
||||
};
|
||||
|
||||
let fd = self.open_file(
|
||||
parent,
|
||||
c_file_name,
|
||||
open_mode | OFlag::O_RDONLY | OFlag::O_NOFOLLOW | OFlag::O_CLOEXEC | OFlag::O_NOCTTY,
|
||||
)?;
|
||||
|
||||
let fd = match fd {
|
||||
Some(fd) => fd,
|
||||
None => {
|
||||
self.report_vanished_file()?;
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let metadata = get_metadata(fd.as_raw_fd(), &stat, self.flags(), self.fs_magic)?;
|
||||
|
||||
if self
|
||||
.patterns
|
||||
.matches(self.path.as_os_str().as_bytes(), Some(stat.st_mode as u32))
|
||||
== Some(MatchType::Exclude)
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let file_name: &Path = OsStr::from_bytes(c_file_name.to_bytes()).as_ref();
|
||||
match metadata.file_type() {
|
||||
mode::IFREG => {
|
||||
let link_info = HardLinkInfo {
|
||||
st_dev: stat.st_dev,
|
||||
st_ino: stat.st_ino,
|
||||
};
|
||||
|
||||
if stat.st_nlink > 1 {
|
||||
if let Some((path, offset)) = self.hardlinks.get(&link_info) {
|
||||
if let Some(ref mut catalog) = self.catalog {
|
||||
catalog.add_hardlink(c_file_name)?;
|
||||
}
|
||||
|
||||
encoder.add_hardlink(file_name, path, *offset)?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
let file_size = stat.st_size as u64;
|
||||
if let Some(ref mut catalog) = self.catalog {
|
||||
catalog.add_file(c_file_name, file_size, stat.st_mtime as u64)?;
|
||||
}
|
||||
|
||||
let offset: LinkOffset =
|
||||
self.add_regular_file(encoder, fd, file_name, &metadata, file_size)?;
|
||||
|
||||
if stat.st_nlink > 1 {
|
||||
self.hardlinks.insert(link_info, (self.path.clone(), offset));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
mode::IFDIR => {
|
||||
let dir = Dir::from_fd(fd.into_raw_fd())?;
|
||||
|
||||
if let Some(ref mut catalog) = self.catalog {
|
||||
catalog.start_directory(c_file_name)?;
|
||||
}
|
||||
let result = self.add_directory(encoder, dir, c_file_name, &metadata, stat);
|
||||
if let Some(ref mut catalog) = self.catalog {
|
||||
catalog.end_directory()?;
|
||||
}
|
||||
result
|
||||
}
|
||||
mode::IFSOCK => {
|
||||
if let Some(ref mut catalog) = self.catalog {
|
||||
catalog.add_socket(c_file_name)?;
|
||||
}
|
||||
|
||||
Ok(encoder.add_socket(&metadata, file_name)?)
|
||||
}
|
||||
mode::IFIFO => {
|
||||
if let Some(ref mut catalog) = self.catalog {
|
||||
catalog.add_fifo(c_file_name)?;
|
||||
}
|
||||
|
||||
Ok(encoder.add_fifo(&metadata, file_name)?)
|
||||
}
|
||||
mode::IFLNK => {
|
||||
if let Some(ref mut catalog) = self.catalog {
|
||||
catalog.add_symlink(c_file_name)?;
|
||||
}
|
||||
|
||||
self.add_symlink(encoder, fd, file_name, &metadata)
|
||||
}
|
||||
mode::IFBLK => {
|
||||
if let Some(ref mut catalog) = self.catalog {
|
||||
catalog.add_block_device(c_file_name)?;
|
||||
}
|
||||
|
||||
self.add_device(encoder, file_name, &metadata, &stat)
|
||||
}
|
||||
mode::IFCHR => {
|
||||
if let Some(ref mut catalog) = self.catalog {
|
||||
catalog.add_char_device(c_file_name)?;
|
||||
}
|
||||
|
||||
self.add_device(encoder, file_name, &metadata, &stat)
|
||||
}
|
||||
other => bail!(
|
||||
"encountered unknown file type: 0x{:x} (0o{:o})",
|
||||
other,
|
||||
other
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
fn add_directory(
|
||||
&mut self,
|
||||
encoder: &mut Encoder,
|
||||
dir: Dir,
|
||||
dir_name: &CStr,
|
||||
metadata: &Metadata,
|
||||
stat: &FileStat,
|
||||
) -> Result<(), Error> {
|
||||
let dir_name = OsStr::from_bytes(dir_name.to_bytes());
|
||||
|
||||
let mut encoder = encoder.create_directory(dir_name, &metadata)?;
|
||||
|
||||
let old_fs_magic = self.fs_magic;
|
||||
let old_fs_feature_flags = self.fs_feature_flags;
|
||||
let old_st_dev = self.current_st_dev;
|
||||
|
||||
let mut skip_contents = false;
|
||||
if old_st_dev != stat.st_dev {
|
||||
self.fs_magic = detect_fs_type(dir.as_raw_fd())?;
|
||||
self.fs_feature_flags = Flags::from_magic(self.fs_magic);
|
||||
self.current_st_dev = stat.st_dev;
|
||||
|
||||
if is_virtual_file_system(self.fs_magic) {
|
||||
skip_contents = true;
|
||||
} else if let Some(set) = &self.device_set {
|
||||
skip_contents = !set.contains(&stat.st_dev);
|
||||
}
|
||||
}
|
||||
|
||||
let result = if skip_contents {
|
||||
Ok(())
|
||||
} else {
|
||||
self.archive_dir_contents(&mut encoder, dir, false)
|
||||
};
|
||||
|
||||
self.fs_magic = old_fs_magic;
|
||||
self.fs_feature_flags = old_fs_feature_flags;
|
||||
self.current_st_dev = old_st_dev;
|
||||
|
||||
encoder.finish()?;
|
||||
result
|
||||
}
|
||||
|
||||
fn add_regular_file(
|
||||
&mut self,
|
||||
encoder: &mut Encoder,
|
||||
fd: Fd,
|
||||
file_name: &Path,
|
||||
metadata: &Metadata,
|
||||
file_size: u64,
|
||||
) -> Result<LinkOffset, Error> {
|
||||
let mut file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
|
||||
let mut remaining = file_size;
|
||||
let mut out = encoder.create_file(metadata, file_name, file_size)?;
|
||||
while remaining != 0 {
|
||||
let mut got = file.read(&mut self.file_copy_buffer[..])?;
|
||||
if got as u64 > remaining {
|
||||
self.report_file_grew_while_reading()?;
|
||||
got = remaining as usize;
|
||||
}
|
||||
out.write_all(&self.file_copy_buffer[..got])?;
|
||||
remaining -= got as u64;
|
||||
}
|
||||
if remaining > 0 {
|
||||
self.report_file_shrunk_while_reading()?;
|
||||
let to_zero = remaining.min(self.file_copy_buffer.len() as u64) as usize;
|
||||
vec::clear(&mut self.file_copy_buffer[..to_zero]);
|
||||
while remaining != 0 {
|
||||
let fill = remaining.min(self.file_copy_buffer.len() as u64) as usize;
|
||||
out.write_all(&self.file_copy_buffer[..fill])?;
|
||||
remaining -= fill as u64;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(out.file_offset())
|
||||
}
|
||||
|
||||
fn add_symlink(
|
||||
&mut self,
|
||||
encoder: &mut Encoder,
|
||||
fd: Fd,
|
||||
file_name: &Path,
|
||||
metadata: &Metadata,
|
||||
) -> Result<(), Error> {
|
||||
let dest = nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..])?;
|
||||
encoder.add_symlink(metadata, file_name, dest)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_device(
|
||||
&mut self,
|
||||
encoder: &mut Encoder,
|
||||
file_name: &Path,
|
||||
metadata: &Metadata,
|
||||
stat: &FileStat,
|
||||
) -> Result<(), Error> {
|
||||
Ok(encoder.add_device(
|
||||
metadata,
|
||||
file_name,
|
||||
pxar::format::Device::from_dev_t(stat.st_rdev),
|
||||
)?)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_metadata(fd: RawFd, stat: &FileStat, flags: Flags, fs_magic: i64) -> Result<Metadata, Error> {
|
||||
// required for some of these
|
||||
let proc_path = Path::new("/proc/self/fd/").join(fd.to_string());
|
||||
|
||||
let mtime = u64::try_from(stat.st_mtime * 1_000_000_000 + stat.st_mtime_nsec)
|
||||
.map_err(|_| format_err!("file with negative mtime"))?;
|
||||
|
||||
let mut meta = Metadata {
|
||||
stat: pxar::Stat {
|
||||
mode: u64::from(stat.st_mode),
|
||||
flags: 0,
|
||||
uid: stat.st_uid,
|
||||
gid: stat.st_gid,
|
||||
mtime,
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
get_xattr_fcaps_acl(&mut meta, fd, &proc_path, flags)?;
|
||||
get_chattr(&mut meta, fd)?;
|
||||
get_fat_attr(&mut meta, fd, fs_magic)?;
|
||||
get_quota_project_id(&mut meta, fd, flags, fs_magic)?;
|
||||
Ok(meta)
|
||||
}
|
||||
|
||||
fn errno_is_unsupported(errno: Errno) -> bool {
|
||||
match errno {
|
||||
Errno::ENOTTY | Errno::ENOSYS | Errno::EBADF | Errno::EOPNOTSUPP | Errno::EINVAL => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_fcaps(meta: &mut Metadata, fd: RawFd, flags: Flags) -> Result<(), Error> {
|
||||
if flags.contains(Flags::WITH_FCAPS) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
match xattr::fgetxattr(fd, xattr::xattr_name_fcaps()) {
|
||||
Ok(data) => {
|
||||
meta.fcaps = Some(pxar::format::FCaps { data });
|
||||
Ok(())
|
||||
}
|
||||
Err(Errno::ENODATA) => Ok(()),
|
||||
Err(Errno::EOPNOTSUPP) => Ok(()),
|
||||
Err(Errno::EBADF) => Ok(()), // symlinks
|
||||
Err(err) => bail!("failed to read file capabilities: {}", err),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_xattr_fcaps_acl(
|
||||
meta: &mut Metadata,
|
||||
fd: RawFd,
|
||||
proc_path: &Path,
|
||||
flags: Flags,
|
||||
) -> Result<(), Error> {
|
||||
if flags.contains(Flags::WITH_XATTRS) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let xattrs = match xattr::flistxattr(fd) {
|
||||
Ok(names) => names,
|
||||
Err(Errno::EOPNOTSUPP) => return Ok(()),
|
||||
Err(Errno::EBADF) => return Ok(()), // symlinks
|
||||
Err(err) => bail!("failed to read xattrs: {}", err),
|
||||
};
|
||||
|
||||
for attr in &xattrs {
|
||||
if xattr::is_security_capability(&attr) {
|
||||
get_fcaps(meta, fd, flags)?;
|
||||
continue;
|
||||
}
|
||||
|
||||
if xattr::is_acl(&attr) {
|
||||
get_acl(meta, proc_path, flags)?;
|
||||
continue;
|
||||
}
|
||||
|
||||
if !xattr::is_valid_xattr_name(&attr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
match xattr::fgetxattr(fd, attr) {
|
||||
Ok(data) => meta
|
||||
.xattrs
|
||||
.push(pxar::format::XAttr::new(attr.to_bytes(), data)),
|
||||
Err(Errno::ENODATA) => (), // it got removed while we were iterating...
|
||||
Err(Errno::EOPNOTSUPP) => (), // shouldn't be possible so just ignore this
|
||||
Err(Errno::EBADF) => (), // symlinks, shouldn't be able to reach this either
|
||||
Err(err) => bail!("error reading extended attribute {:?}: {}", attr, err),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result<(), Error> {
|
||||
let mut attr: usize = 0;
|
||||
|
||||
match unsafe { fs::read_attr_fd(fd, &mut attr) } {
|
||||
Ok(_) => (),
|
||||
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
|
||||
return Ok(());
|
||||
}
|
||||
Err(err) => bail!("failed to read file attributes: {}", err),
|
||||
}
|
||||
|
||||
metadata.stat.flags |= Flags::from_chattr(attr as u32).bits();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_fat_attr(metadata: &mut Metadata, fd: RawFd, fs_magic: i64) -> Result<(), Error> {
|
||||
use proxmox::sys::linux::magic::*;
|
||||
|
||||
if fs_magic != MSDOS_SUPER_MAGIC && fs_magic != FUSE_SUPER_MAGIC {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut attr: u32 = 0;
|
||||
|
||||
match unsafe { fs::read_fat_attr_fd(fd, &mut attr) } {
|
||||
Ok(_) => (),
|
||||
Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
|
||||
return Ok(());
|
||||
}
|
||||
Err(err) => bail!("failed to read fat attributes: {}", err),
|
||||
}
|
||||
|
||||
metadata.stat.flags |= Flags::from_fat_attr(attr).bits();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Read the quota project id for an inode, supported on ext4/XFS/FUSE/ZFS filesystems
|
||||
fn get_quota_project_id(
|
||||
metadata: &mut Metadata,
|
||||
fd: RawFd,
|
||||
flags: Flags,
|
||||
magic: i64,
|
||||
) -> Result<(), Error> {
|
||||
if !(metadata.is_dir() || metadata.is_regular_file()) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if flags.contains(Flags::WITH_QUOTA_PROJID) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
use proxmox::sys::linux::magic::*;
|
||||
|
||||
match magic {
|
||||
EXT4_SUPER_MAGIC | XFS_SUPER_MAGIC | FUSE_SUPER_MAGIC | ZFS_SUPER_MAGIC => (),
|
||||
_ => return Ok(()),
|
||||
}
|
||||
|
||||
let mut fsxattr = fs::FSXAttr::default();
|
||||
let res = unsafe { fs::fs_ioc_fsgetxattr(fd, &mut fsxattr) };
|
||||
|
||||
// On some FUSE filesystems it can happen that ioctl is not supported.
|
||||
// For these cases projid is set to 0 while the error is ignored.
|
||||
if let Err(err) = res {
|
||||
let errno = err
|
||||
.as_errno()
|
||||
.ok_or_else(|| format_err!("error while reading quota project id"))?;
|
||||
if errno_is_unsupported(errno) {
|
||||
return Ok(());
|
||||
} else {
|
||||
bail!("error while reading quota project id ({})", errno);
|
||||
}
|
||||
}
|
||||
|
||||
let projid = fsxattr.fsx_projid as u64;
|
||||
if projid != 0 {
|
||||
metadata.quota_project_id = Some(pxar::format::QuotaProjectId { projid });
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_acl(metadata: &mut Metadata, proc_path: &Path, flags: Flags) -> Result<(), Error> {
|
||||
if flags.contains(Flags::WITH_ACL) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if metadata.is_symlink() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
get_acl_do(metadata, proc_path, acl::ACL_TYPE_ACCESS)?;
|
||||
|
||||
if metadata.is_dir() {
|
||||
get_acl_do(metadata, proc_path, acl::ACL_TYPE_DEFAULT)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_acl_do(
|
||||
metadata: &mut Metadata,
|
||||
proc_path: &Path,
|
||||
acl_type: acl::ACLType,
|
||||
) -> Result<(), Error> {
|
||||
// In order to be able to get ACLs with type ACL_TYPE_DEFAULT, we have
|
||||
// to create a path for acl_get_file(). acl_get_fd() only allows to get
|
||||
// ACL_TYPE_ACCESS attributes.
|
||||
let acl = match acl::ACL::get_file(&proc_path, acl_type) {
|
||||
Ok(acl) => acl,
|
||||
// Don't bail if underlying endpoint does not support acls
|
||||
Err(Errno::EOPNOTSUPP) => return Ok(()),
|
||||
// Don't bail if the endpoint cannot carry acls
|
||||
Err(Errno::EBADF) => return Ok(()),
|
||||
// Don't bail if there is no data
|
||||
Err(Errno::ENODATA) => return Ok(()),
|
||||
Err(err) => bail!("error while reading ACL - {}", err),
|
||||
};
|
||||
|
||||
process_acl(metadata, acl, acl_type)
|
||||
}
|
||||
|
||||
fn process_acl(
|
||||
metadata: &mut Metadata,
|
||||
acl: acl::ACL,
|
||||
acl_type: acl::ACLType,
|
||||
) -> Result<(), Error> {
|
||||
use pxar::format::acl as pxar_acl;
|
||||
use pxar::format::acl::{Group, GroupObject, Permissions, User};
|
||||
|
||||
let mut acl_user = Vec::new();
|
||||
let mut acl_group = Vec::new();
|
||||
let mut acl_group_obj = None;
|
||||
let mut acl_default = None;
|
||||
let mut user_obj_permissions = None;
|
||||
let mut group_obj_permissions = None;
|
||||
let mut other_permissions = None;
|
||||
let mut mask_permissions = None;
|
||||
|
||||
for entry in &mut acl.entries() {
|
||||
let tag = entry.get_tag_type()?;
|
||||
let permissions = entry.get_permissions()?;
|
||||
match tag {
|
||||
acl::ACL_USER_OBJ => user_obj_permissions = Some(Permissions(permissions)),
|
||||
acl::ACL_GROUP_OBJ => group_obj_permissions = Some(Permissions(permissions)),
|
||||
acl::ACL_OTHER => other_permissions = Some(Permissions(permissions)),
|
||||
acl::ACL_MASK => mask_permissions = Some(Permissions(permissions)),
|
||||
acl::ACL_USER => {
|
||||
acl_user.push(User {
|
||||
uid: entry.get_qualifier()?,
|
||||
permissions: Permissions(permissions),
|
||||
});
|
||||
}
|
||||
acl::ACL_GROUP => {
|
||||
acl_group.push(Group {
|
||||
gid: entry.get_qualifier()?,
|
||||
permissions: Permissions(permissions),
|
||||
});
|
||||
}
|
||||
_ => bail!("Unexpected ACL tag encountered!"),
|
||||
}
|
||||
}
|
||||
|
||||
acl_user.sort();
|
||||
acl_group.sort();
|
||||
|
||||
match acl_type {
|
||||
acl::ACL_TYPE_ACCESS => {
|
||||
// The mask permissions are mapped to the stat group permissions
|
||||
// in case that the ACL group permissions were set.
|
||||
// Only in that case we need to store the group permissions,
|
||||
// in the other cases they are identical to the stat group permissions.
|
||||
if let (Some(gop), true) = (group_obj_permissions, mask_permissions.is_some()) {
|
||||
acl_group_obj = Some(GroupObject { permissions: gop });
|
||||
}
|
||||
|
||||
metadata.acl.users = acl_user;
|
||||
metadata.acl.groups = acl_group;
|
||||
}
|
||||
acl::ACL_TYPE_DEFAULT => {
|
||||
if user_obj_permissions != None
|
||||
|| group_obj_permissions != None
|
||||
|| other_permissions != None
|
||||
|| mask_permissions != None
|
||||
{
|
||||
acl_default = Some(pxar_acl::Default {
|
||||
// The value is set to UINT64_MAX as placeholder if one
|
||||
// of the permissions is not set
|
||||
user_obj_permissions: user_obj_permissions.unwrap_or(Permissions::NO_MASK),
|
||||
group_obj_permissions: group_obj_permissions.unwrap_or(Permissions::NO_MASK),
|
||||
other_permissions: other_permissions.unwrap_or(Permissions::NO_MASK),
|
||||
mask_permissions: mask_permissions.unwrap_or(Permissions::NO_MASK),
|
||||
});
|
||||
}
|
||||
|
||||
metadata.acl.default_users = acl_user;
|
||||
metadata.acl.default_groups = acl_group;
|
||||
}
|
||||
_ => bail!("Unexpected ACL type encountered"),
|
||||
}
|
||||
|
||||
metadata.acl.group_obj = acl_group_obj;
|
||||
metadata.acl.default = acl_default;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Note that our pattern lists are "positive". `MatchType::Include` means the file is included.
|
||||
/// Since we are generating an *exclude* list, we need to invert this, so includes get a `'!'`
|
||||
/// prefix.
|
||||
fn generate_pxar_excludes_cli(patterns: &[MatchEntry]) -> Vec<u8> {
|
||||
use pathpatterns::{MatchFlag, MatchPattern};
|
||||
|
||||
let mut content = Vec::new();
|
||||
|
||||
for pattern in patterns {
|
||||
match pattern.match_type() {
|
||||
MatchType::Include => content.push(b'!'),
|
||||
MatchType::Exclude => (),
|
||||
}
|
||||
|
||||
match pattern.pattern() {
|
||||
MatchPattern::Literal(lit) => content.extend(lit),
|
||||
MatchPattern::Pattern(pat) => content.extend(pat.pattern().to_bytes()),
|
||||
}
|
||||
|
||||
if pattern.match_flags() == MatchFlag::MATCH_DIRECTORIES && content.last() != Some(&b'/') {
|
||||
content.push(b'/');
|
||||
}
|
||||
|
||||
content.push(b'\n');
|
||||
}
|
||||
|
||||
content
|
||||
}
|
@ -1,365 +0,0 @@
|
||||
//! *pxar* format decoder for seekable files
|
||||
//!
|
||||
//! This module contain the code to decode *pxar* archive files.
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::{OsString, OsStr};
|
||||
use std::io::{Read, Seek, SeekFrom};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use libc;
|
||||
|
||||
use super::binary_search_tree::search_binary_tree_by;
|
||||
use super::format_definition::*;
|
||||
use super::sequential_decoder::SequentialDecoder;
|
||||
use super::match_pattern::MatchPattern;
|
||||
|
||||
use proxmox::tools::io::ReadExt;
|
||||
|
||||
pub struct DirectoryEntry {
|
||||
/// Points to the `PxarEntry` of the directory
|
||||
start: u64,
|
||||
/// Points past the goodbye table tail
|
||||
end: u64,
|
||||
/// Filename of entry
|
||||
pub filename: OsString,
|
||||
/// Entry (mode, permissions)
|
||||
pub entry: PxarEntry,
|
||||
/// Extended attributes
|
||||
pub xattr: PxarAttributes,
|
||||
/// Payload size
|
||||
pub size: u64,
|
||||
/// Target path for symbolic links
|
||||
pub target: Option<PathBuf>,
|
||||
/// Start offset of the payload if present.
|
||||
pub payload_offset: Option<u64>,
|
||||
}
|
||||
|
||||
/// Trait to create ReadSeek Decoder trait objects.
|
||||
trait ReadSeek: Read + Seek {}
|
||||
impl <R: Read + Seek> ReadSeek for R {}
|
||||
|
||||
// This one needs Read+Seek
|
||||
pub struct Decoder {
|
||||
inner: SequentialDecoder<Box<dyn ReadSeek + Send>>,
|
||||
root_start: u64,
|
||||
root_end: u64,
|
||||
}
|
||||
|
||||
const HEADER_SIZE: u64 = std::mem::size_of::<PxarHeader>() as u64;
|
||||
const GOODBYE_ITEM_SIZE: u64 = std::mem::size_of::<PxarGoodbyeItem>() as u64;
|
||||
|
||||
impl Decoder {
|
||||
pub fn new<R: Read + Seek + Send + 'static>(mut reader: R) -> Result<Self, Error> {
|
||||
let root_end = reader.seek(SeekFrom::End(0))?;
|
||||
let boxed_reader: Box<dyn ReadSeek + 'static + Send> = Box::new(reader);
|
||||
let inner = SequentialDecoder::new(boxed_reader, super::flags::DEFAULT);
|
||||
|
||||
Ok(Self { inner, root_start: 0, root_end })
|
||||
}
|
||||
|
||||
pub fn set_callback<F: Fn(&Path) -> Result<(), Error> + Send + 'static>(&mut self, callback: F ) {
|
||||
self.inner.set_callback(callback);
|
||||
}
|
||||
|
||||
pub fn root(&mut self) -> Result<DirectoryEntry, Error> {
|
||||
self.seek(SeekFrom::Start(0))?;
|
||||
let header: PxarHeader = self.inner.read_item()?;
|
||||
check_ca_header::<PxarEntry>(&header, PXAR_ENTRY)?;
|
||||
let entry: PxarEntry = self.inner.read_item()?;
|
||||
let (header, xattr) = self.inner.read_attributes()?;
|
||||
let (size, payload_offset) = match header.htype {
|
||||
PXAR_PAYLOAD => (header.size - HEADER_SIZE, Some(self.seek(SeekFrom::Current(0))?)),
|
||||
_ => (0, None),
|
||||
};
|
||||
|
||||
Ok(DirectoryEntry {
|
||||
start: self.root_start,
|
||||
end: self.root_end,
|
||||
filename: OsString::new(), // Empty
|
||||
entry,
|
||||
xattr,
|
||||
size,
|
||||
target: None,
|
||||
payload_offset,
|
||||
})
|
||||
}
|
||||
|
||||
fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
|
||||
let pos = self.inner.get_reader_mut().seek(pos)?;
|
||||
Ok(pos)
|
||||
}
|
||||
|
||||
pub(crate) fn root_end_offset(&self) -> u64 {
|
||||
self.root_end
|
||||
}
|
||||
|
||||
/// Restore the subarchive starting at `dir` to the provided target `path`.
|
||||
///
|
||||
/// Only restore the content matched by the MatchPattern `pattern`.
|
||||
/// An empty Vec `pattern` means restore all.
|
||||
pub fn restore(&mut self, dir: &DirectoryEntry, path: &Path, pattern: &Vec<MatchPattern>) -> Result<(), Error> {
|
||||
let start = dir.start;
|
||||
self.seek(SeekFrom::Start(start))?;
|
||||
self.inner.restore(path, pattern)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn read_directory_entry(
|
||||
&mut self,
|
||||
start: u64,
|
||||
end: u64,
|
||||
) -> Result<DirectoryEntry, Error> {
|
||||
self.seek(SeekFrom::Start(start))?;
|
||||
|
||||
let head: PxarHeader = self.inner.read_item()?;
|
||||
|
||||
if head.htype != PXAR_FILENAME {
|
||||
bail!("wrong filename header type for object [{}..{}]", start, end);
|
||||
}
|
||||
|
||||
let entry_start = start + head.size;
|
||||
|
||||
let filename = self.inner.read_filename(head.size)?;
|
||||
|
||||
let head: PxarHeader = self.inner.read_item()?;
|
||||
if head.htype == PXAR_FORMAT_HARDLINK {
|
||||
let (_, offset) = self.inner.read_hardlink(head.size)?;
|
||||
// TODO: Howto find correct end offset for hardlink target?
|
||||
// This is a bit tricky since we cannot find correct end in an efficient
|
||||
// way, on the other hand it doesn't really matter (for now) since target
|
||||
// is never a directory and end is not used in such cases.
|
||||
return self.read_directory_entry(start - offset, end);
|
||||
}
|
||||
check_ca_header::<PxarEntry>(&head, PXAR_ENTRY)?;
|
||||
let entry: PxarEntry = self.inner.read_item()?;
|
||||
let (header, xattr) = self.inner.read_attributes()?;
|
||||
let (size, payload_offset, target) = match header.htype {
|
||||
PXAR_PAYLOAD =>
|
||||
(header.size - HEADER_SIZE, Some(self.seek(SeekFrom::Current(0))?), None),
|
||||
PXAR_SYMLINK =>
|
||||
(header.size - HEADER_SIZE, None, Some(self.inner.read_link(header.size)?)),
|
||||
_ => (0, None, None),
|
||||
};
|
||||
|
||||
Ok(DirectoryEntry {
|
||||
start: entry_start,
|
||||
end,
|
||||
filename,
|
||||
entry,
|
||||
xattr,
|
||||
size,
|
||||
target,
|
||||
payload_offset,
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the goodbye table based on the provided end offset.
|
||||
///
|
||||
/// Get the goodbye table entries and the start and end offsets of the
|
||||
/// items they reference.
|
||||
/// If the start offset is provided, we use that to check the consistency of
|
||||
/// the data, else the start offset calculated based on the goodbye tail is
|
||||
/// used.
|
||||
pub(crate) fn goodbye_table(
|
||||
&mut self,
|
||||
start: Option<u64>,
|
||||
end: u64,
|
||||
) -> Result<Vec<(PxarGoodbyeItem, u64, u64)>, Error> {
|
||||
self.seek(SeekFrom::Start(end - GOODBYE_ITEM_SIZE))?;
|
||||
|
||||
let tail: PxarGoodbyeItem = self.inner.read_item()?;
|
||||
if tail.hash != PXAR_GOODBYE_TAIL_MARKER {
|
||||
bail!("missing goodbye tail marker for object at offset {}", end);
|
||||
}
|
||||
|
||||
// If the start offset was provided, we use and check based on that.
|
||||
// If not, we rely on the offset calculated from the goodbye table entry.
|
||||
let start = start.unwrap_or(end - tail.offset - tail.size);
|
||||
let goodbye_table_size = tail.size;
|
||||
if goodbye_table_size < (HEADER_SIZE + GOODBYE_ITEM_SIZE) {
|
||||
bail!("short goodbye table size for object [{}..{}]", start, end);
|
||||
}
|
||||
|
||||
let goodbye_inner_size = goodbye_table_size - HEADER_SIZE - GOODBYE_ITEM_SIZE;
|
||||
if (goodbye_inner_size % GOODBYE_ITEM_SIZE) != 0 {
|
||||
bail!(
|
||||
"wrong goodbye inner table size for entry [{}..{}]",
|
||||
start,
|
||||
end
|
||||
);
|
||||
}
|
||||
|
||||
let goodbye_start = end - goodbye_table_size;
|
||||
if tail.offset != (goodbye_start - start) {
|
||||
bail!(
|
||||
"wrong offset in goodbye tail marker for entry [{}..{}]",
|
||||
start,
|
||||
end
|
||||
);
|
||||
}
|
||||
|
||||
self.seek(SeekFrom::Start(goodbye_start))?;
|
||||
let head: PxarHeader = self.inner.read_item()?;
|
||||
if head.htype != PXAR_GOODBYE {
|
||||
bail!(
|
||||
"wrong goodbye table header type for entry [{}..{}]",
|
||||
start,
|
||||
end
|
||||
);
|
||||
}
|
||||
|
||||
if head.size != goodbye_table_size {
|
||||
bail!("wrong goodbye table size for entry [{}..{}]", start, end);
|
||||
}
|
||||
|
||||
let mut gb_entries = Vec::new();
|
||||
for i in 0..goodbye_inner_size / GOODBYE_ITEM_SIZE {
|
||||
let item: PxarGoodbyeItem = self.inner.read_item()?;
|
||||
if item.offset > (goodbye_start - start) {
|
||||
bail!(
|
||||
"goodbye entry {} offset out of range [{}..{}] {} {} {}",
|
||||
i,
|
||||
start,
|
||||
end,
|
||||
item.offset,
|
||||
goodbye_start,
|
||||
start
|
||||
);
|
||||
}
|
||||
let item_start = goodbye_start - item.offset;
|
||||
let item_end = item_start + item.size;
|
||||
if item_end > goodbye_start {
|
||||
bail!("goodbye entry {} end out of range [{}..{}]", i, start, end);
|
||||
}
|
||||
gb_entries.push((item, item_start, item_end));
|
||||
}
|
||||
|
||||
Ok(gb_entries)
|
||||
}
|
||||
|
||||
pub fn list_dir(&mut self, dir: &DirectoryEntry) -> Result<Vec<DirectoryEntry>, Error> {
|
||||
let start = dir.start;
|
||||
let end = dir.end;
|
||||
|
||||
//println!("list_dir1: {} {}", start, end);
|
||||
|
||||
if (end - start) < (HEADER_SIZE + GOODBYE_ITEM_SIZE) {
|
||||
bail!("detected short object [{}..{}]", start, end);
|
||||
}
|
||||
|
||||
let mut result = vec![];
|
||||
let goodbye_table = self.goodbye_table(Some(start), end)?;
|
||||
for (_, item_start, item_end) in goodbye_table {
|
||||
let entry = self.read_directory_entry(item_start, item_end)?;
|
||||
//println!("ENTRY: {} {} {:?}", item_start, item_end, entry.filename);
|
||||
result.push(entry);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn print_filenames<W: std::io::Write>(
|
||||
&mut self,
|
||||
output: &mut W,
|
||||
prefix: &mut PathBuf,
|
||||
dir: &DirectoryEntry,
|
||||
) -> Result<(), Error> {
|
||||
let mut list = self.list_dir(dir)?;
|
||||
|
||||
list.sort_unstable_by(|a, b| a.filename.cmp(&b.filename));
|
||||
|
||||
for item in &list {
|
||||
prefix.push(item.filename.clone());
|
||||
|
||||
let mode = item.entry.mode as u32;
|
||||
|
||||
let ifmt = mode & libc::S_IFMT;
|
||||
|
||||
writeln!(output, "{:?}", prefix)?;
|
||||
|
||||
match ifmt {
|
||||
libc::S_IFDIR => self.print_filenames(output, prefix, item)?,
|
||||
libc::S_IFREG | libc::S_IFLNK | libc::S_IFBLK | libc::S_IFCHR => {}
|
||||
_ => bail!("unknown item mode/type for {:?}", prefix),
|
||||
}
|
||||
|
||||
prefix.pop();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Lookup the item identified by `filename` in the provided `DirectoryEntry`.
|
||||
///
|
||||
/// Calculates the hash of the filename and searches for matching entries in
|
||||
/// the goodbye table of the provided `DirectoryEntry`.
|
||||
/// If found, also the filename is compared to avoid hash collision.
|
||||
/// If the filename does not match, the search resumes with the next entry in
|
||||
/// the goodbye table.
|
||||
/// If there is no entry with matching `filename`, `Ok(None)` is returned.
|
||||
pub fn lookup(
|
||||
&mut self,
|
||||
dir: &DirectoryEntry,
|
||||
filename: &OsStr,
|
||||
) -> Result<Option<DirectoryEntry>, Error> {
|
||||
let gbt = self.goodbye_table(Some(dir.start), dir.end)?;
|
||||
let hash = compute_goodbye_hash(filename.as_bytes());
|
||||
|
||||
let mut start_idx = 0;
|
||||
let mut skip_multiple = 0;
|
||||
loop {
|
||||
// Search for the next goodbye entry with matching hash.
|
||||
let idx = search_binary_tree_by(
|
||||
start_idx,
|
||||
gbt.len(),
|
||||
skip_multiple,
|
||||
|idx| hash.cmp(&gbt[idx].0.hash),
|
||||
);
|
||||
let (_item, start, end) = match idx {
|
||||
Some(idx) => &gbt[idx],
|
||||
None => return Ok(None),
|
||||
};
|
||||
|
||||
let entry = self.read_directory_entry(*start, *end)?;
|
||||
|
||||
// Possible hash collision, need to check if the found entry is indeed
|
||||
// the filename to lookup.
|
||||
if entry.filename == filename {
|
||||
return Ok(Some(entry));
|
||||
}
|
||||
// Hash collision, check the next entry in the goodbye table by starting
|
||||
// from given index but skipping one more match (so hash at index itself).
|
||||
start_idx = idx.unwrap();
|
||||
skip_multiple = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Read the payload of the file given by `entry`.
|
||||
///
|
||||
/// This will read a files payload as raw bytes starting from `offset` after
|
||||
/// the payload marker, reading `size` bytes.
|
||||
/// If the payload from `offset` to EOF is smaller than `size` bytes, the
|
||||
/// buffer with reduced size is returned.
|
||||
/// If `offset` is larger than the payload size of the `DirectoryEntry`, an
|
||||
/// empty buffer is returned.
|
||||
pub fn read(&mut self, entry: &DirectoryEntry, size: usize, offset: u64) -> Result<Vec<u8>, Error> {
|
||||
let start_offset = entry.payload_offset
|
||||
.ok_or_else(|| format_err!("entry has no payload offset"))?;
|
||||
if offset >= entry.size {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
let len = if u64::try_from(size)? > entry.size {
|
||||
usize::try_from(entry.size)?
|
||||
} else {
|
||||
size
|
||||
};
|
||||
self.seek(SeekFrom::Start(start_offset + offset))?;
|
||||
let data = self.inner.get_reader_mut().read_exact_allocated(len)?;
|
||||
|
||||
Ok(data)
|
||||
}
|
||||
}
|
@ -2,117 +2,149 @@ use std::ffi::{OsStr, OsString};
|
||||
use std::os::unix::io::{AsRawFd, RawFd};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use nix::errno::Errno;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use nix::dir::Dir;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
use nix::NixPath;
|
||||
use nix::sys::stat::{mkdirat, Mode};
|
||||
|
||||
use super::format_definition::{PxarAttributes, PxarEntry};
|
||||
use proxmox::sys::error::SysError;
|
||||
use pxar::Metadata;
|
||||
|
||||
use crate::pxar::tools::{assert_single_path_component, perms_from_metadata};
|
||||
|
||||
pub struct PxarDir {
|
||||
pub filename: OsString,
|
||||
pub entry: PxarEntry,
|
||||
pub attr: PxarAttributes,
|
||||
pub dir: Option<nix::dir::Dir>,
|
||||
}
|
||||
|
||||
pub struct PxarDirStack {
|
||||
root: RawFd,
|
||||
data: Vec<PxarDir>,
|
||||
file_name: OsString,
|
||||
metadata: Metadata,
|
||||
dir: Option<Dir>,
|
||||
}
|
||||
|
||||
impl PxarDir {
|
||||
pub fn new(filename: &OsStr, entry: PxarEntry, attr: PxarAttributes) -> Self {
|
||||
pub fn new(file_name: OsString, metadata: Metadata) -> Self {
|
||||
Self {
|
||||
filename: filename.to_os_string(),
|
||||
entry,
|
||||
attr,
|
||||
file_name,
|
||||
metadata,
|
||||
dir: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn create_dir(&self, parent: RawFd, create_new: bool) -> Result<nix::dir::Dir, nix::Error> {
|
||||
let res = self
|
||||
.filename
|
||||
.with_nix_path(|cstr| unsafe { libc::mkdirat(parent, cstr.as_ptr(), libc::S_IRWXU) })?;
|
||||
|
||||
match Errno::result(res) {
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
if err == nix::Error::Sys(nix::errno::Errno::EEXIST) {
|
||||
if create_new {
|
||||
return Err(err);
|
||||
}
|
||||
} else {
|
||||
return Err(err);
|
||||
}
|
||||
pub fn with_dir(dir: Dir, metadata: Metadata) -> Self {
|
||||
Self {
|
||||
file_name: OsString::from("."),
|
||||
metadata,
|
||||
dir: Some(dir),
|
||||
}
|
||||
}
|
||||
|
||||
let dir = nix::dir::Dir::openat(
|
||||
fn create_dir(&mut self, parent: RawFd, allow_existing_dirs: bool) -> Result<RawFd, Error> {
|
||||
match mkdirat(
|
||||
parent,
|
||||
self.filename.as_os_str(),
|
||||
self.file_name.as_os_str(),
|
||||
perms_from_metadata(&self.metadata)?,
|
||||
) {
|
||||
Ok(()) => (),
|
||||
Err(err) => {
|
||||
if !(allow_existing_dirs && err.already_exists()) {
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.open_dir(parent)
|
||||
}
|
||||
|
||||
fn open_dir(&mut self, parent: RawFd) -> Result<RawFd, Error> {
|
||||
let dir = Dir::openat(
|
||||
parent,
|
||||
self.file_name.as_os_str(),
|
||||
OFlag::O_DIRECTORY,
|
||||
Mode::empty(),
|
||||
)?;
|
||||
|
||||
Ok(dir)
|
||||
let fd = dir.as_raw_fd();
|
||||
self.dir = Some(dir);
|
||||
|
||||
Ok(fd)
|
||||
}
|
||||
|
||||
pub fn try_as_raw_fd(&self) -> Option<RawFd> {
|
||||
self.dir.as_ref().map(AsRawFd::as_raw_fd)
|
||||
}
|
||||
|
||||
pub fn metadata(&self) -> &Metadata {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
pub fn file_name(&self) -> &OsStr {
|
||||
&self.file_name
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PxarDirStack {
|
||||
dirs: Vec<PxarDir>,
|
||||
path: PathBuf,
|
||||
created: usize,
|
||||
}
|
||||
|
||||
impl PxarDirStack {
|
||||
pub fn new(parent: RawFd) -> Self {
|
||||
pub fn new(root: Dir, metadata: Metadata) -> Self {
|
||||
Self {
|
||||
root: parent,
|
||||
data: Vec::new(),
|
||||
dirs: vec![PxarDir::with_dir(root, metadata)],
|
||||
path: PathBuf::from("/"),
|
||||
created: 1, // the root directory exists
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push(&mut self, dir: PxarDir) {
|
||||
self.data.push(dir);
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.dirs.is_empty()
|
||||
}
|
||||
|
||||
pub fn pop(&mut self) -> Option<PxarDir> {
|
||||
self.data.pop()
|
||||
pub fn push(&mut self, file_name: OsString, metadata: Metadata) -> Result<(), Error> {
|
||||
assert_single_path_component(&file_name)?;
|
||||
self.path.push(&file_name);
|
||||
self.dirs.push(PxarDir::new(file_name, metadata));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn as_path_buf(&self) -> PathBuf {
|
||||
let path: PathBuf = self.data.iter().map(|d| d.filename.clone()).collect();
|
||||
path
|
||||
pub fn pop(&mut self) -> Result<Option<PxarDir>, Error> {
|
||||
let out = self.dirs.pop();
|
||||
if !self.path.pop() {
|
||||
if self.path.as_os_str() == "/" {
|
||||
// we just finished the root directory, make sure this can only happen once:
|
||||
self.path = PathBuf::new();
|
||||
} else {
|
||||
bail!("lost track of path");
|
||||
}
|
||||
}
|
||||
self.created = self.created.min(self.dirs.len());
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
pub fn last(&self) -> Option<&PxarDir> {
|
||||
self.data.last()
|
||||
pub fn last_dir_fd(&mut self, allow_existing_dirs: bool) -> Result<RawFd, Error> {
|
||||
// should not be possible given the way we use it:
|
||||
assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
|
||||
|
||||
let mut fd = self.dirs[self.created - 1]
|
||||
.try_as_raw_fd()
|
||||
.ok_or_else(|| format_err!("lost track of directory file descriptors"))?;
|
||||
while self.created < self.dirs.len() {
|
||||
fd = self.dirs[self.created].create_dir(fd, allow_existing_dirs)?;
|
||||
self.created += 1;
|
||||
}
|
||||
|
||||
pub fn last_mut(&mut self) -> Option<&mut PxarDir> {
|
||||
self.data.last_mut()
|
||||
Ok(fd)
|
||||
}
|
||||
|
||||
pub fn last_dir_fd(&self) -> Option<RawFd> {
|
||||
let last_dir = self.data.last()?;
|
||||
match &last_dir.dir {
|
||||
Some(d) => Some(d.as_raw_fd()),
|
||||
None => None,
|
||||
}
|
||||
pub fn create_last_dir(&mut self, allow_existing_dirs: bool) -> Result<(), Error> {
|
||||
let _: RawFd = self.last_dir_fd(allow_existing_dirs)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn create_all_dirs(&mut self, create_new: bool) -> Result<RawFd, Error> {
|
||||
let mut current_fd = self.root;
|
||||
for d in &mut self.data {
|
||||
match &d.dir {
|
||||
Some(dir) => current_fd = dir.as_raw_fd(),
|
||||
None => {
|
||||
let dir = d
|
||||
.create_dir(current_fd, create_new)
|
||||
.map_err(|err| format_err!("create dir failed - {}", err))?;
|
||||
current_fd = dir.as_raw_fd();
|
||||
d.dir = Some(dir);
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn root_dir_fd(&self) -> Result<RawFd, Error> {
|
||||
// should not be possible given the way we use it:
|
||||
assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
|
||||
|
||||
Ok(current_fd)
|
||||
self.dirs[0]
|
||||
.try_as_raw_fd()
|
||||
.ok_or_else(|| format_err!("lost track of directory file descriptors"))
|
||||
}
|
||||
}
|
||||
|
1332
src/pxar/encoder.rs
1332
src/pxar/encoder.rs
File diff suppressed because it is too large
Load Diff
358
src/pxar/extract.rs
Normal file
358
src/pxar/extract.rs
Normal file
@ -0,0 +1,358 @@
|
||||
//! Code for extraction of pxar contents onto the file system.
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::{CStr, CString, OsStr, OsString};
|
||||
use std::io;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use nix::dir::Dir;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
use pathpatterns::{MatchEntry, MatchList, MatchType};
|
||||
use pxar::format::Device;
|
||||
use pxar::Metadata;
|
||||
|
||||
use proxmox::c_result;
|
||||
use proxmox::tools::fs::{create_path, CreateOptions};
|
||||
|
||||
use crate::pxar::dir_stack::PxarDirStack;
|
||||
use crate::pxar::Flags;
|
||||
use crate::pxar::metadata;
|
||||
|
||||
pub fn extract_archive<T, F>(
|
||||
mut decoder: pxar::decoder::Decoder<T>,
|
||||
destination: &Path,
|
||||
match_list: &[MatchEntry],
|
||||
feature_flags: Flags,
|
||||
allow_existing_dirs: bool,
|
||||
mut callback: F,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: pxar::decoder::SeqRead,
|
||||
F: FnMut(&Path),
|
||||
{
|
||||
// we use this to keep track of our directory-traversal
|
||||
decoder.enable_goodbye_entries(true);
|
||||
|
||||
let root = decoder
|
||||
.next()
|
||||
.ok_or_else(|| format_err!("found empty pxar archive"))?
|
||||
.map_err(|err| format_err!("error reading pxar archive: {}", err))?;
|
||||
|
||||
if !root.is_dir() {
|
||||
bail!("pxar archive does not start with a directory entry!");
|
||||
}
|
||||
|
||||
create_path(
|
||||
&destination,
|
||||
None,
|
||||
Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
|
||||
)
|
||||
.map_err(|err| format_err!("error creating directory {:?}: {}", destination, err))?;
|
||||
|
||||
let dir = Dir::open(
|
||||
destination,
|
||||
OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
|
||||
Mode::empty(),
|
||||
)
|
||||
.map_err(|err| format_err!("unable to open target directory {:?}: {}", destination, err,))?;
|
||||
|
||||
let mut extractor = Extractor::new(
|
||||
dir,
|
||||
root.metadata().clone(),
|
||||
allow_existing_dirs,
|
||||
feature_flags,
|
||||
);
|
||||
|
||||
let mut match_stack = Vec::new();
|
||||
let mut current_match = true;
|
||||
while let Some(entry) = decoder.next() {
|
||||
use pxar::EntryKind;
|
||||
|
||||
let entry = entry.map_err(|err| format_err!("error reading pxar archive: {}", err))?;
|
||||
|
||||
let file_name_os = entry.file_name();
|
||||
|
||||
// safety check: a file entry in an archive must never contain slashes:
|
||||
if file_name_os.as_bytes().contains(&b'/') {
|
||||
bail!("archive file entry contains slashes, which is invalid and a security concern");
|
||||
}
|
||||
|
||||
let file_name = CString::new(file_name_os.as_bytes())
|
||||
.map_err(|_| format_err!("encountered file name with null-bytes"))?;
|
||||
|
||||
let metadata = entry.metadata();
|
||||
|
||||
let match_result = match_list.matches(
|
||||
entry.path().as_os_str().as_bytes(),
|
||||
Some(metadata.file_type() as u32),
|
||||
);
|
||||
|
||||
let did_match = match match_result {
|
||||
Some(MatchType::Include) => true,
|
||||
Some(MatchType::Exclude) => false,
|
||||
None => current_match,
|
||||
};
|
||||
match (did_match, entry.kind()) {
|
||||
(_, EntryKind::Directory) => {
|
||||
callback(entry.path());
|
||||
|
||||
let create = current_match && match_result != Some(MatchType::Exclude);
|
||||
extractor.enter_directory(file_name_os.to_owned(), metadata.clone(), create)?;
|
||||
|
||||
// We're starting a new directory, push our old matching state and replace it with
|
||||
// our new one:
|
||||
match_stack.push(current_match);
|
||||
current_match = did_match;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
(_, EntryKind::GoodbyeTable) => {
|
||||
// go up a directory
|
||||
extractor
|
||||
.leave_directory()
|
||||
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
|
||||
|
||||
// We left a directory, also get back our previous matching state. This is in sync
|
||||
// with `dir_stack` so this should never be empty except for the final goodbye
|
||||
// table, in which case we get back to the default of `true`.
|
||||
current_match = match_stack.pop().unwrap_or(true);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
(true, EntryKind::Symlink(link)) => {
|
||||
callback(entry.path());
|
||||
extractor.extract_symlink(&file_name, metadata, link.as_ref())
|
||||
}
|
||||
(true, EntryKind::Hardlink(link)) => {
|
||||
callback(entry.path());
|
||||
extractor.extract_hardlink(&file_name, link.as_os_str())
|
||||
}
|
||||
(true, EntryKind::Device(dev)) => {
|
||||
if extractor.contains_flags(Flags::WITH_DEVICE_NODES) {
|
||||
callback(entry.path());
|
||||
extractor.extract_device(&file_name, metadata, dev)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
(true, EntryKind::Fifo) => {
|
||||
if extractor.contains_flags(Flags::WITH_FIFOS) {
|
||||
callback(entry.path());
|
||||
extractor.extract_special(&file_name, metadata, 0)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
(true, EntryKind::Socket) => {
|
||||
if extractor.contains_flags(Flags::WITH_SOCKETS) {
|
||||
callback(entry.path());
|
||||
extractor.extract_special(&file_name, metadata, 0)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
(true, EntryKind::File { size, .. }) => extractor.extract_file(
|
||||
&file_name,
|
||||
metadata,
|
||||
*size,
|
||||
&mut decoder.contents().ok_or_else(|| {
|
||||
format_err!("found regular file entry without contents in archive")
|
||||
})?,
|
||||
),
|
||||
(false, _) => Ok(()), // skip this
|
||||
}
|
||||
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
|
||||
}
|
||||
|
||||
if !extractor.dir_stack.is_empty() {
|
||||
bail!("unexpected eof while decoding pxar archive");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Common state for file extraction.
|
||||
pub(crate) struct Extractor {
|
||||
feature_flags: Flags,
|
||||
allow_existing_dirs: bool,
|
||||
dir_stack: PxarDirStack,
|
||||
}
|
||||
|
||||
impl Extractor {
|
||||
/// Create a new extractor state for a target directory.
|
||||
pub fn new(
|
||||
root_dir: Dir,
|
||||
metadata: Metadata,
|
||||
allow_existing_dirs: bool,
|
||||
feature_flags: Flags,
|
||||
) -> Self {
|
||||
Self {
|
||||
dir_stack: PxarDirStack::new(root_dir, metadata),
|
||||
allow_existing_dirs,
|
||||
feature_flags,
|
||||
}
|
||||
}
|
||||
|
||||
/// When encountering a directory during extraction, this is used to keep track of it. If
|
||||
/// `create` is true it is immediately created and its metadata will be updated once we leave
|
||||
/// it. If `create` is false it will only be created if it is going to have any actual content.
|
||||
pub fn enter_directory(
|
||||
&mut self,
|
||||
file_name: OsString,
|
||||
metadata: Metadata,
|
||||
create: bool,
|
||||
) -> Result<(), Error> {
|
||||
self.dir_stack.push(file_name, metadata)?;
|
||||
|
||||
if create {
|
||||
self.dir_stack.create_last_dir(self.allow_existing_dirs)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When done with a directory we need to make sure we're
|
||||
pub fn leave_directory(&mut self) -> Result<(), Error> {
|
||||
let dir = self
|
||||
.dir_stack
|
||||
.pop()
|
||||
.map_err(|err| format_err!("unexpected end of directory entry: {}", err))?
|
||||
.ok_or_else(|| format_err!("broken pxar archive (directory stack underrun)"))?;
|
||||
|
||||
if let Some(fd) = dir.try_as_raw_fd() {
|
||||
metadata::apply(
|
||||
self.feature_flags,
|
||||
dir.metadata(),
|
||||
fd,
|
||||
&CString::new(dir.file_name().as_bytes())?,
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn contains_flags(&self, flag: Flags) -> bool {
|
||||
self.feature_flags.contains(flag)
|
||||
}
|
||||
|
||||
fn parent_fd(&mut self) -> Result<RawFd, Error> {
|
||||
self.dir_stack.last_dir_fd(self.allow_existing_dirs)
|
||||
}
|
||||
|
||||
pub fn extract_symlink(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
metadata: &Metadata,
|
||||
link: &OsStr,
|
||||
) -> Result<(), Error> {
|
||||
let parent = self.parent_fd()?;
|
||||
nix::unistd::symlinkat(link, Some(parent), file_name)?;
|
||||
metadata::apply_at(self.feature_flags, metadata, parent, file_name)
|
||||
}
|
||||
|
||||
pub fn extract_hardlink(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
link: &OsStr,
|
||||
) -> Result<(), Error> {
|
||||
crate::pxar::tools::assert_relative_path(link)?;
|
||||
|
||||
let parent = self.parent_fd()?;
|
||||
let root = self.dir_stack.root_dir_fd()?;
|
||||
let target = CString::new(link.as_bytes())?;
|
||||
nix::unistd::linkat(
|
||||
Some(root),
|
||||
target.as_c_str(),
|
||||
Some(parent),
|
||||
file_name,
|
||||
nix::unistd::LinkatFlags::NoSymlinkFollow,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn extract_device(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
metadata: &Metadata,
|
||||
device: &Device,
|
||||
) -> Result<(), Error> {
|
||||
self.extract_special(file_name, metadata, device.to_dev_t())
|
||||
}
|
||||
|
||||
pub fn extract_special(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
metadata: &Metadata,
|
||||
device: libc::dev_t,
|
||||
) -> Result<(), Error> {
|
||||
let mode = metadata.stat.mode;
|
||||
let mode = u32::try_from(mode).map_err(|_| {
|
||||
format_err!(
|
||||
"device node's mode contains illegal bits: 0x{:x} (0o{:o})",
|
||||
mode,
|
||||
mode,
|
||||
)
|
||||
})?;
|
||||
let parent = self.parent_fd()?;
|
||||
unsafe { c_result!(libc::mknodat(parent, file_name.as_ptr(), mode, device)) }
|
||||
.map_err(|err| format_err!("failed to create device node: {}", err))?;
|
||||
|
||||
metadata::apply_at(self.feature_flags, metadata, parent, file_name)
|
||||
}
|
||||
|
||||
pub fn extract_file(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
metadata: &Metadata,
|
||||
size: u64,
|
||||
contents: &mut dyn io::Read,
|
||||
) -> Result<(), Error> {
|
||||
let parent = self.parent_fd()?;
|
||||
let mut file = unsafe {
|
||||
std::fs::File::from_raw_fd(nix::fcntl::openat(
|
||||
parent,
|
||||
file_name,
|
||||
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
||||
Mode::from_bits(0o600).unwrap(),
|
||||
)?)
|
||||
};
|
||||
|
||||
let extracted = io::copy(&mut *contents, &mut file)?;
|
||||
if size != extracted {
|
||||
bail!("extracted {} bytes of a file of {} bytes", extracted, size);
|
||||
}
|
||||
|
||||
metadata::apply(self.feature_flags, metadata, file.as_raw_fd(), file_name)
|
||||
}
|
||||
|
||||
pub async fn async_extract_file<T: tokio::io::AsyncRead + Unpin>(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
metadata: &Metadata,
|
||||
size: u64,
|
||||
contents: &mut T,
|
||||
) -> Result<(), Error> {
|
||||
let parent = self.parent_fd()?;
|
||||
let mut file = tokio::fs::File::from_std(unsafe {
|
||||
std::fs::File::from_raw_fd(nix::fcntl::openat(
|
||||
parent,
|
||||
file_name,
|
||||
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
||||
Mode::from_bits(0o600).unwrap(),
|
||||
)?)
|
||||
});
|
||||
|
||||
let extracted = tokio::io::copy(&mut *contents, &mut file).await?;
|
||||
if size != extracted {
|
||||
bail!("extracted {} bytes of a file of {} bytes", extracted, size);
|
||||
}
|
||||
|
||||
metadata::apply(self.feature_flags, metadata, file.as_raw_fd(), file_name)
|
||||
}
|
||||
}
|
@ -3,315 +3,332 @@
|
||||
//! Flags for known supported features for a given filesystem can be derived
|
||||
//! from the superblocks magic number.
|
||||
|
||||
/// FAT-style 2s time granularity
|
||||
pub const WITH_2SEC_TIME: u64 = 0x40;
|
||||
/// Preserve read only flag of files
|
||||
pub const WITH_READ_ONLY: u64 = 0x80;
|
||||
/// Preserve unix permissions
|
||||
pub const WITH_PERMISSIONS: u64 = 0x100;
|
||||
/// Include symbolik links
|
||||
pub const WITH_SYMLINKS: u64 = 0x200;
|
||||
/// Include device nodes
|
||||
pub const WITH_DEVICE_NODES: u64 = 0x400;
|
||||
/// Include FIFOs
|
||||
pub const WITH_FIFOS: u64 = 0x800;
|
||||
/// Include Sockets
|
||||
pub const WITH_SOCKETS: u64 = 0x1000;
|
||||
use bitflags::bitflags;
|
||||
|
||||
/// Preserve DOS file flag `HIDDEN`
|
||||
pub const WITH_FLAG_HIDDEN: u64 = 0x2000;
|
||||
/// Preserve DOS file flag `SYSTEM`
|
||||
pub const WITH_FLAG_SYSTEM: u64 = 0x4000;
|
||||
/// Preserve DOS file flag `ARCHIVE`
|
||||
pub const WITH_FLAG_ARCHIVE: u64 = 0x8000;
|
||||
bitflags! {
|
||||
pub struct Flags: u64 {
|
||||
/// FAT-style 2s time granularity
|
||||
const WITH_2SEC_TIME = 0x40;
|
||||
/// Preserve read only flag of files
|
||||
const WITH_READ_ONLY = 0x80;
|
||||
/// Preserve unix permissions
|
||||
const WITH_PERMISSIONS = 0x100;
|
||||
/// Include symbolik links
|
||||
const WITH_SYMLINKS = 0x200;
|
||||
/// Include device nodes
|
||||
const WITH_DEVICE_NODES = 0x400;
|
||||
/// Include FIFOs
|
||||
const WITH_FIFOS = 0x800;
|
||||
/// Include Sockets
|
||||
const WITH_SOCKETS = 0x1000;
|
||||
|
||||
// chattr() flags
|
||||
/// Linux file attribute `APPEND`
|
||||
pub const WITH_FLAG_APPEND: u64 = 0x10000;
|
||||
/// Linux file attribute `NOATIME`
|
||||
pub const WITH_FLAG_NOATIME: u64 = 0x20000;
|
||||
/// Linux file attribute `COMPR`
|
||||
pub const WITH_FLAG_COMPR: u64 = 0x40000;
|
||||
/// Linux file attribute `NOCOW`
|
||||
pub const WITH_FLAG_NOCOW: u64 = 0x80000;
|
||||
/// Linux file attribute `NODUMP`
|
||||
pub const WITH_FLAG_NODUMP: u64 = 0x0010_0000;
|
||||
/// Linux file attribute `DIRSYNC`
|
||||
pub const WITH_FLAG_DIRSYNC: u64 = 0x0020_0000;
|
||||
/// Linux file attribute `IMMUTABLE`
|
||||
pub const WITH_FLAG_IMMUTABLE: u64 = 0x0040_0000;
|
||||
/// Linux file attribute `SYNC`
|
||||
pub const WITH_FLAG_SYNC: u64 = 0x0080_0000;
|
||||
/// Linux file attribute `NOCOMP`
|
||||
pub const WITH_FLAG_NOCOMP: u64 = 0x0100_0000;
|
||||
/// Linux file attribute `PROJINHERIT`
|
||||
pub const WITH_FLAG_PROJINHERIT: u64 = 0x0200_0000;
|
||||
/// Preserve DOS file flag `HIDDEN`
|
||||
const WITH_FLAG_HIDDEN = 0x2000;
|
||||
/// Preserve DOS file flag `SYSTEM`
|
||||
const WITH_FLAG_SYSTEM = 0x4000;
|
||||
/// Preserve DOS file flag `ARCHIVE`
|
||||
const WITH_FLAG_ARCHIVE = 0x8000;
|
||||
|
||||
// chattr() flags
|
||||
/// Linux file attribute `APPEND`
|
||||
const WITH_FLAG_APPEND = 0x10000;
|
||||
/// Linux file attribute `NOATIME`
|
||||
const WITH_FLAG_NOATIME = 0x20000;
|
||||
/// Linux file attribute `COMPR`
|
||||
const WITH_FLAG_COMPR = 0x40000;
|
||||
/// Linux file attribute `NOCOW`
|
||||
const WITH_FLAG_NOCOW = 0x80000;
|
||||
/// Linux file attribute `NODUMP`
|
||||
const WITH_FLAG_NODUMP = 0x0010_0000;
|
||||
/// Linux file attribute `DIRSYNC`
|
||||
const WITH_FLAG_DIRSYNC = 0x0020_0000;
|
||||
/// Linux file attribute `IMMUTABLE`
|
||||
const WITH_FLAG_IMMUTABLE = 0x0040_0000;
|
||||
/// Linux file attribute `SYNC`
|
||||
const WITH_FLAG_SYNC = 0x0080_0000;
|
||||
/// Linux file attribute `NOCOMP`
|
||||
const WITH_FLAG_NOCOMP = 0x0100_0000;
|
||||
/// Linux file attribute `PROJINHERIT`
|
||||
const WITH_FLAG_PROJINHERIT = 0x0200_0000;
|
||||
|
||||
|
||||
/// Preserve BTRFS subvolume flag
|
||||
pub const WITH_SUBVOLUME: u64 = 0x0400_0000;
|
||||
/// Preserve BTRFS read-only subvolume flag
|
||||
pub const WITH_SUBVOLUME_RO: u64 = 0x0800_0000;
|
||||
/// Preserve BTRFS subvolume flag
|
||||
const WITH_SUBVOLUME = 0x0400_0000;
|
||||
/// Preserve BTRFS read-only subvolume flag
|
||||
const WITH_SUBVOLUME_RO = 0x0800_0000;
|
||||
|
||||
/// Preserve Extended Attribute metadata
|
||||
pub const WITH_XATTRS: u64 = 0x1000_0000;
|
||||
/// Preserve Access Control List metadata
|
||||
pub const WITH_ACL: u64 = 0x2000_0000;
|
||||
/// Preserve SELinux security context
|
||||
pub const WITH_SELINUX: u64 = 0x4000_0000;
|
||||
/// Preserve "security.capability" xattr
|
||||
pub const WITH_FCAPS: u64 = 0x8000_0000;
|
||||
/// Preserve Extended Attribute metadata
|
||||
const WITH_XATTRS = 0x1000_0000;
|
||||
/// Preserve Access Control List metadata
|
||||
const WITH_ACL = 0x2000_0000;
|
||||
/// Preserve SELinux security context
|
||||
const WITH_SELINUX = 0x4000_0000;
|
||||
/// Preserve "security.capability" xattr
|
||||
const WITH_FCAPS = 0x8000_0000;
|
||||
|
||||
/// Preserve XFS/ext4/ZFS project quota ID
|
||||
pub const WITH_QUOTA_PROJID: u64 = 0x0001_0000_0000;
|
||||
/// Preserve XFS/ext4/ZFS project quota ID
|
||||
const WITH_QUOTA_PROJID = 0x0001_0000_0000;
|
||||
|
||||
/// Support ".pxarexclude" files
|
||||
pub const EXCLUDE_FILE: u64 = 0x1000_0000_0000_0000;
|
||||
/// Exclude submounts
|
||||
pub const EXCLUDE_SUBMOUNTS: u64 = 0x4000_0000_0000_0000;
|
||||
/// Exclude entries with chattr flag NODUMP
|
||||
pub const EXCLUDE_NODUMP: u64 = 0x8000_0000_0000_0000;
|
||||
/// Support ".pxarexclude" files
|
||||
const EXCLUDE_FILE = 0x1000_0000_0000_0000;
|
||||
/// Exclude submounts
|
||||
const EXCLUDE_SUBMOUNTS = 0x4000_0000_0000_0000;
|
||||
/// Exclude entries with chattr flag NODUMP
|
||||
const EXCLUDE_NODUMP = 0x8000_0000_0000_0000;
|
||||
|
||||
/// Definitions of typical feature flags for the *pxar* encoder/decoder.
|
||||
/// By this expensive syscalls for unsupported features are avoided.
|
||||
// Definitions of typical feature flags for the *pxar* encoder/decoder.
|
||||
// By this expensive syscalls for unsupported features are avoided.
|
||||
|
||||
/// All chattr file attributes
|
||||
pub const WITH_CHATTR: u64 =
|
||||
WITH_FLAG_APPEND|
|
||||
WITH_FLAG_NOATIME|
|
||||
WITH_FLAG_COMPR|
|
||||
WITH_FLAG_NOCOW|
|
||||
WITH_FLAG_NODUMP|
|
||||
WITH_FLAG_DIRSYNC|
|
||||
WITH_FLAG_IMMUTABLE|
|
||||
WITH_FLAG_SYNC|
|
||||
WITH_FLAG_NOCOMP|
|
||||
WITH_FLAG_PROJINHERIT;
|
||||
/// All chattr file attributes
|
||||
const WITH_CHATTR =
|
||||
Flags::WITH_FLAG_APPEND.bits() |
|
||||
Flags::WITH_FLAG_NOATIME.bits() |
|
||||
Flags::WITH_FLAG_COMPR.bits() |
|
||||
Flags::WITH_FLAG_NOCOW.bits() |
|
||||
Flags::WITH_FLAG_NODUMP.bits() |
|
||||
Flags::WITH_FLAG_DIRSYNC.bits() |
|
||||
Flags::WITH_FLAG_IMMUTABLE.bits() |
|
||||
Flags::WITH_FLAG_SYNC.bits() |
|
||||
Flags::WITH_FLAG_NOCOMP.bits() |
|
||||
Flags::WITH_FLAG_PROJINHERIT.bits();
|
||||
|
||||
/// All FAT file attributes
|
||||
pub const WITH_FAT_ATTRS: u64 =
|
||||
WITH_FLAG_HIDDEN|
|
||||
WITH_FLAG_SYSTEM|
|
||||
WITH_FLAG_ARCHIVE;
|
||||
/// All FAT file attributes
|
||||
const WITH_FAT_ATTRS =
|
||||
Flags::WITH_FLAG_HIDDEN.bits() |
|
||||
Flags::WITH_FLAG_SYSTEM.bits() |
|
||||
Flags::WITH_FLAG_ARCHIVE.bits();
|
||||
|
||||
/// All bits that may also be exposed via fuse
|
||||
pub const WITH_FUSE: u64 =
|
||||
WITH_2SEC_TIME|
|
||||
WITH_READ_ONLY|
|
||||
WITH_PERMISSIONS|
|
||||
WITH_SYMLINKS|
|
||||
WITH_DEVICE_NODES|
|
||||
WITH_FIFOS|
|
||||
WITH_SOCKETS|
|
||||
WITH_FAT_ATTRS|
|
||||
WITH_CHATTR|
|
||||
WITH_XATTRS;
|
||||
/// All bits that may also be exposed via fuse
|
||||
const WITH_FUSE =
|
||||
Flags::WITH_2SEC_TIME.bits() |
|
||||
Flags::WITH_READ_ONLY.bits() |
|
||||
Flags::WITH_PERMISSIONS.bits() |
|
||||
Flags::WITH_SYMLINKS.bits() |
|
||||
Flags::WITH_DEVICE_NODES.bits() |
|
||||
Flags::WITH_FIFOS.bits() |
|
||||
Flags::WITH_SOCKETS.bits() |
|
||||
Flags::WITH_FAT_ATTRS.bits() |
|
||||
Flags::WITH_CHATTR.bits() |
|
||||
Flags::WITH_XATTRS.bits();
|
||||
|
||||
|
||||
/// Default feature flags for encoder/decoder
|
||||
pub const DEFAULT: u64 =
|
||||
WITH_SYMLINKS|
|
||||
WITH_DEVICE_NODES|
|
||||
WITH_FIFOS|
|
||||
WITH_SOCKETS|
|
||||
WITH_FLAG_HIDDEN|
|
||||
WITH_FLAG_SYSTEM|
|
||||
WITH_FLAG_ARCHIVE|
|
||||
WITH_FLAG_APPEND|
|
||||
WITH_FLAG_NOATIME|
|
||||
WITH_FLAG_COMPR|
|
||||
WITH_FLAG_NOCOW|
|
||||
//WITH_FLAG_NODUMP|
|
||||
WITH_FLAG_DIRSYNC|
|
||||
WITH_FLAG_IMMUTABLE|
|
||||
WITH_FLAG_SYNC|
|
||||
WITH_FLAG_NOCOMP|
|
||||
WITH_FLAG_PROJINHERIT|
|
||||
WITH_SUBVOLUME|
|
||||
WITH_SUBVOLUME_RO|
|
||||
WITH_XATTRS|
|
||||
WITH_ACL|
|
||||
WITH_SELINUX|
|
||||
WITH_FCAPS|
|
||||
WITH_QUOTA_PROJID|
|
||||
EXCLUDE_NODUMP|
|
||||
EXCLUDE_FILE;
|
||||
/// Default feature flags for encoder/decoder
|
||||
const DEFAULT =
|
||||
Flags::WITH_SYMLINKS.bits() |
|
||||
Flags::WITH_DEVICE_NODES.bits() |
|
||||
Flags::WITH_FIFOS.bits() |
|
||||
Flags::WITH_SOCKETS.bits() |
|
||||
Flags::WITH_FLAG_HIDDEN.bits() |
|
||||
Flags::WITH_FLAG_SYSTEM.bits() |
|
||||
Flags::WITH_FLAG_ARCHIVE.bits() |
|
||||
Flags::WITH_FLAG_APPEND.bits() |
|
||||
Flags::WITH_FLAG_NOATIME.bits() |
|
||||
Flags::WITH_FLAG_COMPR.bits() |
|
||||
Flags::WITH_FLAG_NOCOW.bits() |
|
||||
//WITH_FLAG_NODUMP.bits() |
|
||||
Flags::WITH_FLAG_DIRSYNC.bits() |
|
||||
Flags::WITH_FLAG_IMMUTABLE.bits() |
|
||||
Flags::WITH_FLAG_SYNC.bits() |
|
||||
Flags::WITH_FLAG_NOCOMP.bits() |
|
||||
Flags::WITH_FLAG_PROJINHERIT.bits() |
|
||||
Flags::WITH_SUBVOLUME.bits() |
|
||||
Flags::WITH_SUBVOLUME_RO.bits() |
|
||||
Flags::WITH_XATTRS.bits() |
|
||||
Flags::WITH_ACL.bits() |
|
||||
Flags::WITH_SELINUX.bits() |
|
||||
Flags::WITH_FCAPS.bits() |
|
||||
Flags::WITH_QUOTA_PROJID.bits() |
|
||||
Flags::EXCLUDE_NODUMP.bits() |
|
||||
Flags::EXCLUDE_FILE.bits();
|
||||
}
|
||||
}
|
||||
|
||||
// form /usr/include/linux/fs.h
|
||||
const FS_APPEND_FL: u32 = 0x0000_0020;
|
||||
const FS_NOATIME_FL: u32 = 0x0000_0080;
|
||||
const FS_COMPR_FL: u32 = 0x0000_0004;
|
||||
const FS_NOCOW_FL: u32 = 0x0080_0000;
|
||||
const FS_NODUMP_FL: u32 = 0x0000_0040;
|
||||
const FS_DIRSYNC_FL: u32 = 0x0001_0000;
|
||||
const FS_IMMUTABLE_FL: u32 = 0x0000_0010;
|
||||
const FS_SYNC_FL: u32 = 0x0000_0008;
|
||||
const FS_NOCOMP_FL: u32 = 0x0000_0400;
|
||||
const FS_PROJINHERIT_FL: u32 = 0x2000_0000;
|
||||
impl Default for Flags {
|
||||
fn default() -> Flags {
|
||||
Flags::DEFAULT
|
||||
}
|
||||
}
|
||||
|
||||
static CHATTR_MAP: [(u64, u32); 10] = [
|
||||
( WITH_FLAG_APPEND, FS_APPEND_FL ),
|
||||
( WITH_FLAG_NOATIME, FS_NOATIME_FL ),
|
||||
( WITH_FLAG_COMPR, FS_COMPR_FL ),
|
||||
( WITH_FLAG_NOCOW, FS_NOCOW_FL ),
|
||||
( WITH_FLAG_NODUMP, FS_NODUMP_FL ),
|
||||
( WITH_FLAG_DIRSYNC, FS_DIRSYNC_FL ),
|
||||
( WITH_FLAG_IMMUTABLE, FS_IMMUTABLE_FL ),
|
||||
( WITH_FLAG_SYNC, FS_SYNC_FL ),
|
||||
( WITH_FLAG_NOCOMP, FS_NOCOMP_FL ),
|
||||
( WITH_FLAG_PROJINHERIT, FS_PROJINHERIT_FL ),
|
||||
];
|
||||
impl Flags {
|
||||
/// Get a set of feature flags from file attributes.
|
||||
pub fn from_chattr(attr: u32) -> Flags {
|
||||
// form /usr/include/linux/fs.h
|
||||
const FS_APPEND_FL: u32 = 0x0000_0020;
|
||||
const FS_NOATIME_FL: u32 = 0x0000_0080;
|
||||
const FS_COMPR_FL: u32 = 0x0000_0004;
|
||||
const FS_NOCOW_FL: u32 = 0x0080_0000;
|
||||
const FS_NODUMP_FL: u32 = 0x0000_0040;
|
||||
const FS_DIRSYNC_FL: u32 = 0x0001_0000;
|
||||
const FS_IMMUTABLE_FL: u32 = 0x0000_0010;
|
||||
const FS_SYNC_FL: u32 = 0x0000_0008;
|
||||
const FS_NOCOMP_FL: u32 = 0x0000_0400;
|
||||
const FS_PROJINHERIT_FL: u32 = 0x2000_0000;
|
||||
|
||||
pub fn feature_flags_from_chattr(attr: u32) -> u64 {
|
||||
const CHATTR_MAP: [(Flags, u32); 10] = [
|
||||
( Flags::WITH_FLAG_APPEND, FS_APPEND_FL ),
|
||||
( Flags::WITH_FLAG_NOATIME, FS_NOATIME_FL ),
|
||||
( Flags::WITH_FLAG_COMPR, FS_COMPR_FL ),
|
||||
( Flags::WITH_FLAG_NOCOW, FS_NOCOW_FL ),
|
||||
( Flags::WITH_FLAG_NODUMP, FS_NODUMP_FL ),
|
||||
( Flags::WITH_FLAG_DIRSYNC, FS_DIRSYNC_FL ),
|
||||
( Flags::WITH_FLAG_IMMUTABLE, FS_IMMUTABLE_FL ),
|
||||
( Flags::WITH_FLAG_SYNC, FS_SYNC_FL ),
|
||||
( Flags::WITH_FLAG_NOCOMP, FS_NOCOMP_FL ),
|
||||
( Flags::WITH_FLAG_PROJINHERIT, FS_PROJINHERIT_FL ),
|
||||
];
|
||||
|
||||
let mut flags = 0u64;
|
||||
let mut flags = Flags::empty();
|
||||
|
||||
for (fe_flag, fs_flag) in &CHATTR_MAP {
|
||||
if (attr & fs_flag) != 0 { flags |= fe_flag; }
|
||||
if (attr & fs_flag) != 0 {
|
||||
flags |= *fe_flag;
|
||||
}
|
||||
}
|
||||
|
||||
flags
|
||||
}
|
||||
}
|
||||
|
||||
// from /usr/include/linux/msdos_fs.h
|
||||
const ATTR_HIDDEN: u32 = 2;
|
||||
const ATTR_SYS: u32 = 4;
|
||||
const ATTR_ARCH: u32 = 32;
|
||||
/// Get a set of feature flags from FAT attributes.
|
||||
pub fn from_fat_attr(attr: u32) -> Flags {
|
||||
// from /usr/include/linux/msdos_fs.h
|
||||
const ATTR_HIDDEN: u32 = 2;
|
||||
const ATTR_SYS: u32 = 4;
|
||||
const ATTR_ARCH: u32 = 32;
|
||||
|
||||
static FAT_ATTR_MAP: [(u64, u32); 3] = [
|
||||
( WITH_FLAG_HIDDEN, ATTR_HIDDEN ),
|
||||
( WITH_FLAG_SYSTEM, ATTR_SYS ),
|
||||
( WITH_FLAG_ARCHIVE, ATTR_ARCH ),
|
||||
];
|
||||
const FAT_ATTR_MAP: [(Flags, u32); 3] = [
|
||||
( Flags::WITH_FLAG_HIDDEN, ATTR_HIDDEN ),
|
||||
( Flags::WITH_FLAG_SYSTEM, ATTR_SYS ),
|
||||
( Flags::WITH_FLAG_ARCHIVE, ATTR_ARCH ),
|
||||
];
|
||||
|
||||
pub fn feature_flags_from_fat_attr(attr: u32) -> u64 {
|
||||
|
||||
let mut flags = 0u64;
|
||||
let mut flags = Flags::empty();
|
||||
|
||||
for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
|
||||
if (attr & fs_flag) != 0 { flags |= fe_flag; }
|
||||
if (attr & fs_flag) != 0 {
|
||||
flags |= *fe_flag;
|
||||
}
|
||||
}
|
||||
|
||||
flags
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Return the supported *pxar* feature flags based on the magic number of the filesystem.
|
||||
pub fn feature_flags_from_magic(magic: i64) -> u64 {
|
||||
/// Return the supported *pxar* feature flags based on the magic number of the filesystem.
|
||||
pub fn from_magic(magic: i64) -> Flags {
|
||||
use proxmox::sys::linux::magic::*;
|
||||
match magic {
|
||||
MSDOS_SUPER_MAGIC => {
|
||||
WITH_2SEC_TIME|
|
||||
WITH_READ_ONLY|
|
||||
WITH_FAT_ATTRS
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_FAT_ATTRS
|
||||
},
|
||||
EXT4_SUPER_MAGIC => {
|
||||
WITH_2SEC_TIME|
|
||||
WITH_READ_ONLY|
|
||||
WITH_PERMISSIONS|
|
||||
WITH_SYMLINKS|
|
||||
WITH_DEVICE_NODES|
|
||||
WITH_FIFOS|
|
||||
WITH_SOCKETS|
|
||||
WITH_FLAG_APPEND|
|
||||
WITH_FLAG_NOATIME|
|
||||
WITH_FLAG_NODUMP|
|
||||
WITH_FLAG_DIRSYNC|
|
||||
WITH_FLAG_IMMUTABLE|
|
||||
WITH_FLAG_SYNC|
|
||||
WITH_XATTRS|
|
||||
WITH_ACL|
|
||||
WITH_SELINUX|
|
||||
WITH_FCAPS|
|
||||
WITH_QUOTA_PROJID
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS |
|
||||
Flags::WITH_FLAG_APPEND |
|
||||
Flags::WITH_FLAG_NOATIME |
|
||||
Flags::WITH_FLAG_NODUMP |
|
||||
Flags::WITH_FLAG_DIRSYNC |
|
||||
Flags::WITH_FLAG_IMMUTABLE |
|
||||
Flags::WITH_FLAG_SYNC |
|
||||
Flags::WITH_XATTRS |
|
||||
Flags::WITH_ACL |
|
||||
Flags::WITH_SELINUX |
|
||||
Flags::WITH_FCAPS |
|
||||
Flags::WITH_QUOTA_PROJID
|
||||
},
|
||||
XFS_SUPER_MAGIC => {
|
||||
WITH_2SEC_TIME|
|
||||
WITH_READ_ONLY|
|
||||
WITH_PERMISSIONS|
|
||||
WITH_SYMLINKS|
|
||||
WITH_DEVICE_NODES|
|
||||
WITH_FIFOS|
|
||||
WITH_SOCKETS|
|
||||
WITH_FLAG_APPEND|
|
||||
WITH_FLAG_NOATIME|
|
||||
WITH_FLAG_NODUMP|
|
||||
WITH_FLAG_IMMUTABLE|
|
||||
WITH_FLAG_SYNC|
|
||||
WITH_XATTRS|
|
||||
WITH_ACL|
|
||||
WITH_SELINUX|
|
||||
WITH_FCAPS|
|
||||
WITH_QUOTA_PROJID
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS |
|
||||
Flags::WITH_FLAG_APPEND |
|
||||
Flags::WITH_FLAG_NOATIME |
|
||||
Flags::WITH_FLAG_NODUMP |
|
||||
Flags::WITH_FLAG_IMMUTABLE |
|
||||
Flags::WITH_FLAG_SYNC |
|
||||
Flags::WITH_XATTRS |
|
||||
Flags::WITH_ACL |
|
||||
Flags::WITH_SELINUX |
|
||||
Flags::WITH_FCAPS |
|
||||
Flags::WITH_QUOTA_PROJID
|
||||
},
|
||||
ZFS_SUPER_MAGIC => {
|
||||
WITH_2SEC_TIME|
|
||||
WITH_READ_ONLY|
|
||||
WITH_PERMISSIONS|
|
||||
WITH_SYMLINKS|
|
||||
WITH_DEVICE_NODES|
|
||||
WITH_FIFOS|
|
||||
WITH_SOCKETS|
|
||||
WITH_FLAG_APPEND|
|
||||
WITH_FLAG_NOATIME|
|
||||
WITH_FLAG_NODUMP|
|
||||
WITH_FLAG_DIRSYNC|
|
||||
WITH_FLAG_IMMUTABLE|
|
||||
WITH_FLAG_SYNC|
|
||||
WITH_XATTRS|
|
||||
WITH_ACL|
|
||||
WITH_SELINUX|
|
||||
WITH_FCAPS|
|
||||
WITH_QUOTA_PROJID
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS |
|
||||
Flags::WITH_FLAG_APPEND |
|
||||
Flags::WITH_FLAG_NOATIME |
|
||||
Flags::WITH_FLAG_NODUMP |
|
||||
Flags::WITH_FLAG_DIRSYNC |
|
||||
Flags::WITH_FLAG_IMMUTABLE |
|
||||
Flags::WITH_FLAG_SYNC |
|
||||
Flags::WITH_XATTRS |
|
||||
Flags::WITH_ACL |
|
||||
Flags::WITH_SELINUX |
|
||||
Flags::WITH_FCAPS |
|
||||
Flags::WITH_QUOTA_PROJID
|
||||
},
|
||||
BTRFS_SUPER_MAGIC => {
|
||||
WITH_2SEC_TIME|
|
||||
WITH_READ_ONLY|
|
||||
WITH_PERMISSIONS|
|
||||
WITH_SYMLINKS|
|
||||
WITH_DEVICE_NODES|
|
||||
WITH_FIFOS|
|
||||
WITH_SOCKETS|
|
||||
WITH_FLAG_APPEND|
|
||||
WITH_FLAG_NOATIME|
|
||||
WITH_FLAG_COMPR|
|
||||
WITH_FLAG_NOCOW|
|
||||
WITH_FLAG_NODUMP|
|
||||
WITH_FLAG_DIRSYNC|
|
||||
WITH_FLAG_IMMUTABLE|
|
||||
WITH_FLAG_SYNC|
|
||||
WITH_FLAG_NOCOMP|
|
||||
WITH_XATTRS|
|
||||
WITH_ACL|
|
||||
WITH_SELINUX|
|
||||
WITH_SUBVOLUME|
|
||||
WITH_SUBVOLUME_RO|
|
||||
WITH_FCAPS
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS |
|
||||
Flags::WITH_FLAG_APPEND |
|
||||
Flags::WITH_FLAG_NOATIME |
|
||||
Flags::WITH_FLAG_COMPR |
|
||||
Flags::WITH_FLAG_NOCOW |
|
||||
Flags::WITH_FLAG_NODUMP |
|
||||
Flags::WITH_FLAG_DIRSYNC |
|
||||
Flags::WITH_FLAG_IMMUTABLE |
|
||||
Flags::WITH_FLAG_SYNC |
|
||||
Flags::WITH_FLAG_NOCOMP |
|
||||
Flags::WITH_XATTRS |
|
||||
Flags::WITH_ACL |
|
||||
Flags::WITH_SELINUX |
|
||||
Flags::WITH_SUBVOLUME |
|
||||
Flags::WITH_SUBVOLUME_RO |
|
||||
Flags::WITH_FCAPS
|
||||
},
|
||||
TMPFS_MAGIC => {
|
||||
WITH_2SEC_TIME|
|
||||
WITH_READ_ONLY|
|
||||
WITH_PERMISSIONS|
|
||||
WITH_SYMLINKS|
|
||||
WITH_DEVICE_NODES|
|
||||
WITH_FIFOS|
|
||||
WITH_SOCKETS|
|
||||
WITH_ACL|
|
||||
WITH_SELINUX
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS |
|
||||
Flags::WITH_ACL |
|
||||
Flags::WITH_SELINUX
|
||||
},
|
||||
// FUSE mounts are special as the supported feature set
|
||||
// is not clear a priori.
|
||||
FUSE_SUPER_MAGIC => {
|
||||
WITH_FUSE
|
||||
Flags::WITH_FUSE
|
||||
},
|
||||
_ => {
|
||||
WITH_2SEC_TIME|
|
||||
WITH_READ_ONLY|
|
||||
WITH_PERMISSIONS|
|
||||
WITH_SYMLINKS|
|
||||
WITH_DEVICE_NODES|
|
||||
WITH_FIFOS|
|
||||
WITH_SOCKETS
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,263 +0,0 @@
|
||||
//! *pxar* binary format definition
|
||||
//!
|
||||
//! Please note the all values are stored in little endian ordering.
|
||||
//!
|
||||
//! The Archive contains a list of items. Each item starts with a
|
||||
//! `PxarHeader`, followed by the item data.
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use endian_trait::Endian;
|
||||
use anyhow::{bail, Error};
|
||||
use siphasher::sip::SipHasher24;
|
||||
|
||||
|
||||
/// Header types identifying items stored in the archive
|
||||
pub const PXAR_ENTRY: u64 = 0x1396fabcea5bbb51;
|
||||
pub const PXAR_FILENAME: u64 = 0x6dbb6ebcb3161f0b;
|
||||
pub const PXAR_SYMLINK: u64 = 0x664a6fb6830e0d6c;
|
||||
pub const PXAR_DEVICE: u64 = 0xac3dace369dfe643;
|
||||
pub const PXAR_XATTR: u64 = 0xb8157091f80bc486;
|
||||
pub const PXAR_ACL_USER: u64 = 0x297dc88b2ef12faf;
|
||||
pub const PXAR_ACL_GROUP: u64 = 0x36f2acb56cb3dd0b;
|
||||
pub const PXAR_ACL_GROUP_OBJ: u64 = 0x23047110441f38f3;
|
||||
pub const PXAR_ACL_DEFAULT: u64 = 0xfe3eeda6823c8cd0;
|
||||
pub const PXAR_ACL_DEFAULT_USER: u64 = 0xbdf03df9bd010a91;
|
||||
pub const PXAR_ACL_DEFAULT_GROUP: u64 = 0xa0cb1168782d1f51;
|
||||
pub const PXAR_FCAPS: u64 = 0xf7267db0afed0629;
|
||||
pub const PXAR_QUOTA_PROJID: u64 = 0x161baf2d8772a72b;
|
||||
|
||||
/// Marks item as hardlink
|
||||
/// compute_goodbye_hash(b"__PROXMOX_FORMAT_HARDLINK__");
|
||||
pub const PXAR_FORMAT_HARDLINK: u64 = 0x2c5e06f634f65b86;
|
||||
/// Marks the beginning of the payload (actual content) of regular files
|
||||
pub const PXAR_PAYLOAD: u64 = 0x8b9e1d93d6dcffc9;
|
||||
/// Marks item as entry of goodbye table
|
||||
pub const PXAR_GOODBYE: u64 = 0xdfd35c5e8327c403;
|
||||
/// The end marker used in the GOODBYE object
|
||||
pub const PXAR_GOODBYE_TAIL_MARKER: u64 = 0x57446fa533702943;
|
||||
|
||||
#[derive(Debug, Endian)]
|
||||
#[repr(C)]
|
||||
pub struct PxarHeader {
|
||||
/// The item type (see `PXAR_` constants).
|
||||
pub htype: u64,
|
||||
/// The size of the item, including the size of `PxarHeader`.
|
||||
pub size: u64,
|
||||
}
|
||||
|
||||
#[derive(Endian)]
|
||||
#[repr(C)]
|
||||
pub struct PxarEntry {
|
||||
pub mode: u64,
|
||||
pub flags: u64,
|
||||
pub uid: u32,
|
||||
pub gid: u32,
|
||||
pub mtime: u64,
|
||||
}
|
||||
|
||||
#[derive(Endian)]
|
||||
#[repr(C)]
|
||||
pub struct PxarDevice {
|
||||
pub major: u64,
|
||||
pub minor: u64,
|
||||
}
|
||||
|
||||
#[derive(Endian)]
|
||||
#[repr(C)]
|
||||
pub struct PxarGoodbyeItem {
|
||||
/// SipHash24 of the directory item name. The last GOODBYE item
|
||||
/// uses the special hash value `PXAR_GOODBYE_TAIL_MARKER`.
|
||||
pub hash: u64,
|
||||
/// The offset from the start of the GOODBYE object to the start
|
||||
/// of the matching directory item (point to a FILENAME). The last
|
||||
/// GOODBYE item points to the start of the matching ENTRY
|
||||
/// object.
|
||||
pub offset: u64,
|
||||
/// The overall size of the directory item. The last GOODBYE item
|
||||
/// repeats the size of the GOODBYE item.
|
||||
pub size: u64,
|
||||
}
|
||||
|
||||
/// Helper function to extract file names from binary archive.
|
||||
pub fn read_os_string(buffer: &[u8]) -> std::ffi::OsString {
|
||||
let len = buffer.len();
|
||||
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
let name = if len > 0 && buffer[len - 1] == 0 {
|
||||
std::ffi::OsStr::from_bytes(&buffer[0..len - 1])
|
||||
} else {
|
||||
std::ffi::OsStr::from_bytes(&buffer)
|
||||
};
|
||||
|
||||
name.into()
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq)]
|
||||
#[repr(C)]
|
||||
pub struct PxarXAttr {
|
||||
pub name: Vec<u8>,
|
||||
pub value: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Ord for PxarXAttr {
|
||||
fn cmp(&self, other: &PxarXAttr) -> Ordering {
|
||||
self.name.cmp(&other.name)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for PxarXAttr {
|
||||
fn partial_cmp(&self, other: &PxarXAttr) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for PxarXAttr {
|
||||
fn eq(&self, other: &PxarXAttr) -> bool {
|
||||
self.name == other.name
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[repr(C)]
|
||||
pub struct PxarFCaps {
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Endian, Eq)]
|
||||
#[repr(C)]
|
||||
pub struct PxarACLUser {
|
||||
pub uid: u64,
|
||||
pub permissions: u64,
|
||||
//pub name: Vec<u64>, not impl for now
|
||||
}
|
||||
|
||||
// TODO if also name is impl, sort by uid, then by name and last by permissions
|
||||
impl Ord for PxarACLUser {
|
||||
fn cmp(&self, other: &PxarACLUser) -> Ordering {
|
||||
match self.uid.cmp(&other.uid) {
|
||||
// uids are equal, entries ordered by permissions
|
||||
Ordering::Equal => self.permissions.cmp(&other.permissions),
|
||||
// uids are different, entries ordered by uid
|
||||
uid_order => uid_order,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for PxarACLUser {
|
||||
fn partial_cmp(&self, other: &PxarACLUser) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for PxarACLUser {
|
||||
fn eq(&self, other: &PxarACLUser) -> bool {
|
||||
self.uid == other.uid && self.permissions == other.permissions
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Endian, Eq)]
|
||||
#[repr(C)]
|
||||
pub struct PxarACLGroup {
|
||||
pub gid: u64,
|
||||
pub permissions: u64,
|
||||
//pub name: Vec<u64>, not impl for now
|
||||
}
|
||||
|
||||
// TODO if also name is impl, sort by gid, then by name and last by permissions
|
||||
impl Ord for PxarACLGroup {
|
||||
fn cmp(&self, other: &PxarACLGroup) -> Ordering {
|
||||
match self.gid.cmp(&other.gid) {
|
||||
// gids are equal, entries are ordered by permissions
|
||||
Ordering::Equal => self.permissions.cmp(&other.permissions),
|
||||
// gids are different, entries ordered by gid
|
||||
gid_ordering => gid_ordering,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for PxarACLGroup {
|
||||
fn partial_cmp(&self, other: &PxarACLGroup) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for PxarACLGroup {
|
||||
fn eq(&self, other: &PxarACLGroup) -> bool {
|
||||
self.gid == other.gid && self.permissions == other.permissions
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Endian)]
|
||||
#[repr(C)]
|
||||
pub struct PxarACLGroupObj {
|
||||
pub permissions: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Endian)]
|
||||
#[repr(C)]
|
||||
pub struct PxarACLDefault {
|
||||
pub user_obj_permissions: u64,
|
||||
pub group_obj_permissions: u64,
|
||||
pub other_permissions: u64,
|
||||
pub mask_permissions: u64,
|
||||
}
|
||||
|
||||
pub(crate) struct PxarACL {
|
||||
pub users: Vec<PxarACLUser>,
|
||||
pub groups: Vec<PxarACLGroup>,
|
||||
pub group_obj: Option<PxarACLGroupObj>,
|
||||
pub default: Option<PxarACLDefault>,
|
||||
}
|
||||
|
||||
pub const PXAR_ACL_PERMISSION_READ: u64 = 4;
|
||||
pub const PXAR_ACL_PERMISSION_WRITE: u64 = 2;
|
||||
pub const PXAR_ACL_PERMISSION_EXECUTE: u64 = 1;
|
||||
|
||||
#[derive(Debug, Endian)]
|
||||
#[repr(C)]
|
||||
pub struct PxarQuotaProjID {
|
||||
pub projid: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct PxarAttributes {
|
||||
pub xattrs: Vec<PxarXAttr>,
|
||||
pub fcaps: Option<PxarFCaps>,
|
||||
pub quota_projid: Option<PxarQuotaProjID>,
|
||||
pub acl_user: Vec<PxarACLUser>,
|
||||
pub acl_group: Vec<PxarACLGroup>,
|
||||
pub acl_group_obj: Option<PxarACLGroupObj>,
|
||||
pub acl_default: Option<PxarACLDefault>,
|
||||
pub acl_default_user: Vec<PxarACLUser>,
|
||||
pub acl_default_group: Vec<PxarACLGroup>,
|
||||
}
|
||||
|
||||
/// Create SipHash values for goodby tables.
|
||||
//pub fn compute_goodbye_hash(name: &std::ffi::CStr) -> u64 {
|
||||
pub fn compute_goodbye_hash(name: &[u8]) -> u64 {
|
||||
use std::hash::Hasher;
|
||||
let mut hasher = SipHasher24::new_with_keys(0x8574442b0f1d84b3, 0x2736ed30d1c22ec1);
|
||||
hasher.write(name);
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
pub fn check_ca_header<T>(head: &PxarHeader, htype: u64) -> Result<(), Error> {
|
||||
if head.htype != htype {
|
||||
bail!(
|
||||
"got wrong header type ({:016x} != {:016x})",
|
||||
head.htype,
|
||||
htype
|
||||
);
|
||||
}
|
||||
if head.size != (std::mem::size_of::<T>() + std::mem::size_of::<PxarHeader>()) as u64 {
|
||||
bail!("got wrong header size for type {:016x}", htype);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The format requires to build sorted directory lookup tables in
|
||||
/// memory, so we restrict the number of allowed entries to limit
|
||||
/// maximum memory usage.
|
||||
pub const ENCODER_MAX_ENTRIES: usize = 1024 * 1024;
|
1438
src/pxar/fuse.rs
1438
src/pxar/fuse.rs
File diff suppressed because it is too large
Load Diff
@ -1,36 +0,0 @@
|
||||
use libc;
|
||||
use nix::sys::stat::FileStat;
|
||||
|
||||
#[inline(always)]
|
||||
pub fn is_directory(stat: &FileStat) -> bool {
|
||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFDIR
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn is_symlink(stat: &FileStat) -> bool {
|
||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFLNK
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn is_reg_file(stat: &FileStat) -> bool {
|
||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFREG
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn is_block_dev(stat: &FileStat) -> bool {
|
||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFBLK
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn is_char_dev(stat: &FileStat) -> bool {
|
||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFCHR
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn is_fifo(stat: &FileStat) -> bool {
|
||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFIFO
|
||||
}
|
||||
#[inline(always)]
|
||||
pub fn is_socket(stat: &FileStat) -> bool {
|
||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFSOCK
|
||||
}
|
@ -1,514 +0,0 @@
|
||||
//! `MatchPattern` defines a match pattern used to match filenames encountered
|
||||
//! during encoding or decoding of a `pxar` archive.
|
||||
//! `fnmatch` is used internally to match filenames against the patterns.
|
||||
//! Shell wildcard pattern can be used to match multiple filenames, see manpage
|
||||
//! `glob(7)`.
|
||||
//! `**` is treated special, as it matches multiple directories in a path.
|
||||
|
||||
use std::ffi::{CStr, CString};
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use std::os::unix::io::{FromRawFd, RawFd};
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use libc::{c_char, c_int};
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl;
|
||||
use nix::fcntl::{AtFlags, OFlag};
|
||||
use nix::sys::stat;
|
||||
use nix::sys::stat::{FileStat, Mode};
|
||||
use nix::NixPath;
|
||||
|
||||
pub const FNM_NOMATCH: c_int = 1;
|
||||
|
||||
extern "C" {
|
||||
fn fnmatch(pattern: *const c_char, string: *const c_char, flags: c_int) -> c_int;
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
pub enum MatchType {
|
||||
None,
|
||||
Positive,
|
||||
Negative,
|
||||
PartialPositive,
|
||||
PartialNegative,
|
||||
}
|
||||
|
||||
/// `MatchPattern` provides functionality for filename glob pattern matching
|
||||
/// based on glibc's `fnmatch`.
|
||||
/// Positive matches return `MatchType::PartialPositive` or `MatchType::Positive`.
|
||||
/// Patterns starting with `!` are interpreted as negation, meaning they will
|
||||
/// return `MatchType::PartialNegative` or `MatchType::Negative`.
|
||||
/// No matches result in `MatchType::None`.
|
||||
/// # Examples:
|
||||
/// ```
|
||||
/// # use std::ffi::CString;
|
||||
/// # use self::proxmox_backup::pxar::{MatchPattern, MatchType};
|
||||
/// # fn main() -> Result<(), anyhow::Error> {
|
||||
/// let filename = CString::new("some.conf")?;
|
||||
/// let is_dir = false;
|
||||
///
|
||||
/// /// Positive match of any file ending in `.conf` in any subdirectory
|
||||
/// let positive = MatchPattern::from_line(b"**/*.conf")?.unwrap();
|
||||
/// let m_positive = positive.as_slice().matches_filename(&filename, is_dir)?;
|
||||
/// assert!(m_positive == MatchType::Positive);
|
||||
///
|
||||
/// /// Negative match of filenames starting with `s`
|
||||
/// let negative = MatchPattern::from_line(b"![s]*")?.unwrap();
|
||||
/// let m_negative = negative.as_slice().matches_filename(&filename, is_dir)?;
|
||||
/// assert!(m_negative == MatchType::Negative);
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
#[derive(Clone, Eq, PartialOrd)]
|
||||
pub struct MatchPattern {
|
||||
pattern: Vec<u8>,
|
||||
match_positive: bool,
|
||||
match_dir_only: bool,
|
||||
}
|
||||
|
||||
impl std::cmp::PartialEq for MatchPattern {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.pattern == other.pattern
|
||||
&& self.match_positive == other.match_positive
|
||||
&& self.match_dir_only == other.match_dir_only
|
||||
}
|
||||
}
|
||||
|
||||
impl std::cmp::Ord for MatchPattern {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
(&self.pattern, &self.match_positive, &self.match_dir_only)
|
||||
.cmp(&(&other.pattern, &other.match_positive, &other.match_dir_only))
|
||||
}
|
||||
}
|
||||
|
||||
impl MatchPattern {
|
||||
/// Read a list of `MatchPattern` from file.
|
||||
/// The file is read line by line (lines terminated by newline character),
|
||||
/// each line may only contain one pattern.
|
||||
/// Leading `/` are ignored and lines starting with `#` are interpreted as
|
||||
/// comments and not included in the resulting list.
|
||||
/// Patterns ending in `/` will match only directories.
|
||||
///
|
||||
/// On success, a list of match pattern is returned as well as the raw file
|
||||
/// byte buffer together with the files stats.
|
||||
/// This is done in order to avoid reading the file more than once during
|
||||
/// encoding of the archive.
|
||||
pub fn from_file<P: ?Sized + NixPath>(
|
||||
parent_fd: RawFd,
|
||||
filename: &P,
|
||||
) -> Result<Option<(Vec<MatchPattern>, Vec<u8>, FileStat)>, nix::Error> {
|
||||
let stat = match stat::fstatat(parent_fd, filename, AtFlags::AT_SYMLINK_NOFOLLOW) {
|
||||
Ok(stat) => stat,
|
||||
Err(nix::Error::Sys(Errno::ENOENT)) => return Ok(None),
|
||||
Err(err) => return Err(err),
|
||||
};
|
||||
|
||||
let filefd = fcntl::openat(parent_fd, filename, OFlag::O_NOFOLLOW, Mode::empty())?;
|
||||
let mut file = unsafe { File::from_raw_fd(filefd) };
|
||||
|
||||
let mut content_buffer = Vec::new();
|
||||
let _bytes = file.read_to_end(&mut content_buffer)
|
||||
.map_err(|_| Errno::EIO)?;
|
||||
|
||||
let mut match_pattern = Vec::new();
|
||||
for line in content_buffer.split(|&c| c == b'\n') {
|
||||
if line.is_empty() {
|
||||
continue;
|
||||
}
|
||||
if let Some(pattern) = Self::from_line(line)? {
|
||||
match_pattern.push(pattern);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some((match_pattern, content_buffer, stat)))
|
||||
}
|
||||
|
||||
/// Interpret a byte buffer as a sinlge line containing a valid
|
||||
/// `MatchPattern`.
|
||||
/// Pattern starting with `#` are interpreted as comments, returning `Ok(None)`.
|
||||
/// Pattern starting with '!' are interpreted as negative match pattern.
|
||||
/// Pattern with trailing `/` match only against directories.
|
||||
/// `.` as well as `..` and any pattern containing `\0` are invalid and will
|
||||
/// result in an error with Errno::EINVAL.
|
||||
pub fn from_line(line: &[u8]) -> Result<Option<MatchPattern>, nix::Error> {
|
||||
let mut input = line;
|
||||
|
||||
if input.starts_with(b"#") {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let match_positive = if input.starts_with(b"!") {
|
||||
// Reduce slice view to exclude "!"
|
||||
input = &input[1..];
|
||||
false
|
||||
} else {
|
||||
true
|
||||
};
|
||||
|
||||
// Paths ending in / match only directory names (no filenames)
|
||||
let match_dir_only = if input.ends_with(b"/") {
|
||||
let len = input.len();
|
||||
input = &input[..len - 1];
|
||||
true
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
// Ignore initial slash
|
||||
if input.starts_with(b"/") {
|
||||
input = &input[1..];
|
||||
}
|
||||
|
||||
if input.is_empty() || input == b"." || input == b".." || input.contains(&b'\0') {
|
||||
return Err(nix::Error::Sys(Errno::EINVAL));
|
||||
}
|
||||
|
||||
Ok(Some(MatchPattern {
|
||||
pattern: input.to_vec(),
|
||||
match_positive,
|
||||
match_dir_only,
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
/// Create a `MatchPatternSlice` of the `MatchPattern` to give a view of the
|
||||
/// `MatchPattern` without copying its content.
|
||||
pub fn as_slice<'a>(&'a self) -> MatchPatternSlice<'a> {
|
||||
MatchPatternSlice {
|
||||
pattern: self.pattern.as_slice(),
|
||||
match_positive: self.match_positive,
|
||||
match_dir_only: self.match_dir_only,
|
||||
}
|
||||
}
|
||||
|
||||
/// Dump the content of the `MatchPattern` to stdout.
|
||||
/// Intended for debugging purposes only.
|
||||
pub fn dump(&self) {
|
||||
match (self.match_positive, self.match_dir_only) {
|
||||
(true, true) => println!("{:#?}/", self.pattern),
|
||||
(true, false) => println!("{:#?}", self.pattern),
|
||||
(false, true) => println!("!{:#?}/", self.pattern),
|
||||
(false, false) => println!("!{:#?}", self.pattern),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a list of MatchPattern to bytes in order to write them to e.g.
|
||||
/// a file.
|
||||
pub fn to_bytes(patterns: &[MatchPattern]) -> Vec<u8> {
|
||||
let mut slices = Vec::new();
|
||||
for pattern in patterns {
|
||||
slices.push(pattern.as_slice());
|
||||
}
|
||||
|
||||
MatchPatternSlice::to_bytes(&slices)
|
||||
}
|
||||
|
||||
/// Invert the match type for this MatchPattern.
|
||||
pub fn invert(&mut self) {
|
||||
self.match_positive = !self.match_positive;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct MatchPatternSlice<'a> {
|
||||
pattern: &'a [u8],
|
||||
match_positive: bool,
|
||||
match_dir_only: bool,
|
||||
}
|
||||
|
||||
impl<'a> MatchPatternSlice<'a> {
|
||||
/// Returns the pattern before the first `/` encountered as `MatchPatternSlice`.
|
||||
/// If no slash is encountered, the `MatchPatternSlice` will be a copy of the
|
||||
/// original pattern.
|
||||
/// ```
|
||||
/// # use self::proxmox_backup::pxar::{MatchPattern, MatchPatternSlice, MatchType};
|
||||
/// # fn main() -> Result<(), anyhow::Error> {
|
||||
/// let pattern = MatchPattern::from_line(b"some/match/pattern/")?.unwrap();
|
||||
/// let slice = pattern.as_slice();
|
||||
/// let front = slice.get_front_pattern();
|
||||
/// /// ... will be the same as ...
|
||||
/// let front_pattern = MatchPattern::from_line(b"some")?.unwrap();
|
||||
/// let front_slice = front_pattern.as_slice();
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn get_front_pattern(&'a self) -> MatchPatternSlice<'a> {
|
||||
let (front, _) = self.split_at_slash();
|
||||
MatchPatternSlice {
|
||||
pattern: front,
|
||||
match_positive: self.match_positive,
|
||||
match_dir_only: self.match_dir_only,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the pattern after the first encountered `/` as `MatchPatternSlice`.
|
||||
/// If no slash is encountered, the `MatchPatternSlice` will be empty.
|
||||
/// ```
|
||||
/// # use self::proxmox_backup::pxar::{MatchPattern, MatchPatternSlice, MatchType};
|
||||
/// # fn main() -> Result<(), anyhow::Error> {
|
||||
/// let pattern = MatchPattern::from_line(b"some/match/pattern/")?.unwrap();
|
||||
/// let slice = pattern.as_slice();
|
||||
/// let rest = slice.get_rest_pattern();
|
||||
/// /// ... will be the same as ...
|
||||
/// let rest_pattern = MatchPattern::from_line(b"match/pattern/")?.unwrap();
|
||||
/// let rest_slice = rest_pattern.as_slice();
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn get_rest_pattern(&'a self) -> MatchPatternSlice<'a> {
|
||||
let (_, rest) = self.split_at_slash();
|
||||
MatchPatternSlice {
|
||||
pattern: rest,
|
||||
match_positive: self.match_positive,
|
||||
match_dir_only: self.match_dir_only,
|
||||
}
|
||||
}
|
||||
|
||||
/// Splits the `MatchPatternSlice` at the first slash encountered and returns the
|
||||
/// content before (front pattern) and after the slash (rest pattern),
|
||||
/// omitting the slash itself.
|
||||
/// Slices starting with `**/` are an exception to this, as the corresponding
|
||||
/// `MatchPattern` is intended to match multiple directories.
|
||||
/// These pattern slices therefore return a `*` as front pattern and the original
|
||||
/// pattern itself as rest pattern.
|
||||
fn split_at_slash(&'a self) -> (&'a [u8], &'a [u8]) {
|
||||
let pattern = if self.pattern.starts_with(b"./") {
|
||||
&self.pattern[2..]
|
||||
} else {
|
||||
self.pattern
|
||||
};
|
||||
|
||||
let (mut front, mut rest) = match pattern.iter().position(|&c| c == b'/') {
|
||||
Some(ind) => {
|
||||
let (front, rest) = pattern.split_at(ind);
|
||||
(front, &rest[1..])
|
||||
}
|
||||
None => (pattern, &pattern[0..0]),
|
||||
};
|
||||
// '**' is treated such that it maches any directory
|
||||
if front == b"**" {
|
||||
front = b"*";
|
||||
rest = pattern;
|
||||
}
|
||||
|
||||
(front, rest)
|
||||
}
|
||||
|
||||
/// Convert a list of `MatchPatternSlice`s to bytes in order to write them to e.g.
|
||||
/// a file.
|
||||
pub fn to_bytes(patterns: &[MatchPatternSlice]) -> Vec<u8> {
|
||||
let mut buffer = Vec::new();
|
||||
for pattern in patterns {
|
||||
if !pattern.match_positive { buffer.push(b'!'); }
|
||||
buffer.extend_from_slice(&pattern.pattern);
|
||||
if pattern.match_dir_only { buffer.push(b'/'); }
|
||||
buffer.push(b'\n');
|
||||
}
|
||||
buffer
|
||||
}
|
||||
|
||||
/// Match the given filename against this `MatchPatternSlice`.
|
||||
/// If the filename matches the pattern completely, `MatchType::Positive` or
|
||||
/// `MatchType::Negative` is returned, depending if the match pattern is was
|
||||
/// declared as positive (no `!` prefix) or negative (`!` prefix).
|
||||
/// If the pattern matched only up to the first slash of the pattern,
|
||||
/// `MatchType::PartialPositive` or `MatchType::PartialNegatie` is returned.
|
||||
/// If the pattern was postfixed by a trailing `/` a match is only valid if
|
||||
/// the parameter `is_dir` equals `true`.
|
||||
/// No match results in `MatchType::None`.
|
||||
pub fn matches_filename(&self, filename: &CStr, is_dir: bool) -> Result<MatchType, Error> {
|
||||
let mut res = MatchType::None;
|
||||
let (front, _) = self.split_at_slash();
|
||||
|
||||
let front = CString::new(front).unwrap();
|
||||
let fnmatch_res = unsafe {
|
||||
let front_ptr = front.as_ptr() as *const libc::c_char;
|
||||
let filename_ptr = filename.as_ptr() as *const libc::c_char;
|
||||
fnmatch(front_ptr, filename_ptr, 0)
|
||||
};
|
||||
if fnmatch_res < 0 {
|
||||
bail!("error in fnmatch inside of MatchPattern");
|
||||
}
|
||||
if fnmatch_res == 0 {
|
||||
res = if self.match_positive {
|
||||
MatchType::PartialPositive
|
||||
} else {
|
||||
MatchType::PartialNegative
|
||||
};
|
||||
}
|
||||
|
||||
let full = if self.pattern.starts_with(b"**/") {
|
||||
CString::new(&self.pattern[3..]).unwrap()
|
||||
} else {
|
||||
CString::new(&self.pattern[..]).unwrap()
|
||||
};
|
||||
let fnmatch_res = unsafe {
|
||||
let full_ptr = full.as_ptr() as *const libc::c_char;
|
||||
let filename_ptr = filename.as_ptr() as *const libc::c_char;
|
||||
fnmatch(full_ptr, filename_ptr, 0)
|
||||
};
|
||||
if fnmatch_res < 0 {
|
||||
bail!("error in fnmatch inside of MatchPattern");
|
||||
}
|
||||
if fnmatch_res == 0 {
|
||||
res = if self.match_positive {
|
||||
MatchType::Positive
|
||||
} else {
|
||||
MatchType::Negative
|
||||
};
|
||||
}
|
||||
|
||||
if !is_dir && self.match_dir_only {
|
||||
res = MatchType::None;
|
||||
}
|
||||
|
||||
if !is_dir && (res == MatchType::PartialPositive || res == MatchType::PartialNegative) {
|
||||
res = MatchType::None;
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Match the given filename against the set of `MatchPatternSlice`s.
|
||||
///
|
||||
/// A positive match is intended to includes the full subtree (unless another
|
||||
/// negative match excludes entries later).
|
||||
/// The `MatchType` together with an updated `MatchPatternSlice` list for passing
|
||||
/// to the matched child is returned.
|
||||
/// ```
|
||||
/// # use std::ffi::CString;
|
||||
/// # use self::proxmox_backup::pxar::{MatchPattern, MatchPatternSlice, MatchType};
|
||||
/// # fn main() -> Result<(), anyhow::Error> {
|
||||
/// let patterns = vec![
|
||||
/// MatchPattern::from_line(b"some/match/pattern/")?.unwrap(),
|
||||
/// MatchPattern::from_line(b"to_match/")?.unwrap()
|
||||
/// ];
|
||||
/// let mut slices = Vec::new();
|
||||
/// for pattern in &patterns {
|
||||
/// slices.push(pattern.as_slice());
|
||||
/// }
|
||||
/// let filename = CString::new("some")?;
|
||||
/// let is_dir = true;
|
||||
/// let (match_type, child_pattern) = MatchPatternSlice::match_filename_include(
|
||||
/// &filename,
|
||||
/// is_dir,
|
||||
/// &slices
|
||||
/// )?;
|
||||
/// assert_eq!(match_type, MatchType::PartialPositive);
|
||||
/// /// child pattern will be the same as ...
|
||||
/// let pattern = MatchPattern::from_line(b"match/pattern/")?.unwrap();
|
||||
/// let slice = pattern.as_slice();
|
||||
///
|
||||
/// let filename = CString::new("to_match")?;
|
||||
/// let is_dir = true;
|
||||
/// let (match_type, child_pattern) = MatchPatternSlice::match_filename_include(
|
||||
/// &filename,
|
||||
/// is_dir,
|
||||
/// &slices
|
||||
/// )?;
|
||||
/// assert_eq!(match_type, MatchType::Positive);
|
||||
/// /// child pattern will be the same as ...
|
||||
/// let pattern = MatchPattern::from_line(b"**/*")?.unwrap();
|
||||
/// let slice = pattern.as_slice();
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn match_filename_include(
|
||||
filename: &CStr,
|
||||
is_dir: bool,
|
||||
match_pattern: &'a [MatchPatternSlice<'a>],
|
||||
) -> Result<(MatchType, Vec<MatchPatternSlice<'a>>), Error> {
|
||||
let mut child_pattern = Vec::new();
|
||||
let mut match_state = MatchType::None;
|
||||
|
||||
for pattern in match_pattern {
|
||||
match pattern.matches_filename(filename, is_dir)? {
|
||||
MatchType::None => continue,
|
||||
MatchType::Positive => match_state = MatchType::Positive,
|
||||
MatchType::Negative => match_state = MatchType::Negative,
|
||||
MatchType::PartialPositive => {
|
||||
if match_state != MatchType::Negative && match_state != MatchType::Positive {
|
||||
match_state = MatchType::PartialPositive;
|
||||
}
|
||||
child_pattern.push(pattern.get_rest_pattern());
|
||||
}
|
||||
MatchType::PartialNegative => {
|
||||
if match_state == MatchType::PartialPositive {
|
||||
match_state = MatchType::PartialNegative;
|
||||
}
|
||||
child_pattern.push(pattern.get_rest_pattern());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((match_state, child_pattern))
|
||||
}
|
||||
|
||||
/// Match the given filename against the set of `MatchPatternSlice`s.
|
||||
///
|
||||
/// A positive match is intended to exclude the full subtree, independent of
|
||||
/// matches deeper down the tree.
|
||||
/// The `MatchType` together with an updated `MatchPattern` list for passing
|
||||
/// to the matched child is returned.
|
||||
/// ```
|
||||
/// # use std::ffi::CString;
|
||||
/// # use self::proxmox_backup::pxar::{MatchPattern, MatchPatternSlice, MatchType};
|
||||
/// # fn main() -> Result<(), anyhow::Error> {
|
||||
/// let patterns = vec![
|
||||
/// MatchPattern::from_line(b"some/match/pattern/")?.unwrap(),
|
||||
/// MatchPattern::from_line(b"to_match/")?.unwrap()
|
||||
/// ];
|
||||
/// let mut slices = Vec::new();
|
||||
/// for pattern in &patterns {
|
||||
/// slices.push(pattern.as_slice());
|
||||
/// }
|
||||
/// let filename = CString::new("some")?;
|
||||
/// let is_dir = true;
|
||||
/// let (match_type, child_pattern) = MatchPatternSlice::match_filename_exclude(
|
||||
/// &filename,
|
||||
/// is_dir,
|
||||
/// &slices,
|
||||
/// )?;
|
||||
/// assert_eq!(match_type, MatchType::PartialPositive);
|
||||
/// /// child pattern will be the same as ...
|
||||
/// let pattern = MatchPattern::from_line(b"match/pattern/")?.unwrap();
|
||||
/// let slice = pattern.as_slice();
|
||||
///
|
||||
/// let filename = CString::new("to_match")?;
|
||||
/// let is_dir = true;
|
||||
/// let (match_type, child_pattern) = MatchPatternSlice::match_filename_exclude(
|
||||
/// &filename,
|
||||
/// is_dir,
|
||||
/// &slices,
|
||||
/// )?;
|
||||
/// assert_eq!(match_type, MatchType::Positive);
|
||||
/// /// child pattern will be empty
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn match_filename_exclude(
|
||||
filename: &CStr,
|
||||
is_dir: bool,
|
||||
match_pattern: &'a [MatchPatternSlice<'a>],
|
||||
) -> Result<(MatchType, Vec<MatchPatternSlice<'a>>), Error> {
|
||||
let mut child_pattern = Vec::new();
|
||||
let mut match_state = MatchType::None;
|
||||
|
||||
for pattern in match_pattern {
|
||||
match pattern.matches_filename(filename, is_dir)? {
|
||||
MatchType::None => {}
|
||||
MatchType::Positive => match_state = MatchType::Positive,
|
||||
MatchType::Negative => match_state = MatchType::Negative,
|
||||
match_type => {
|
||||
if match_state != MatchType::Positive && match_state != MatchType::Negative {
|
||||
match_state = match_type;
|
||||
}
|
||||
child_pattern.push(pattern.get_rest_pattern());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((match_state, child_pattern))
|
||||
}
|
||||
}
|
319
src/pxar/metadata.rs
Normal file
319
src/pxar/metadata.rs
Normal file
@ -0,0 +1,319 @@
|
||||
use std::ffi::{CStr, CString};
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
use pxar::Metadata;
|
||||
|
||||
use proxmox::c_result;
|
||||
use proxmox::sys::error::SysError;
|
||||
use proxmox::tools::fd::RawFdNum;
|
||||
|
||||
use crate::pxar::tools::perms_from_metadata;
|
||||
use crate::pxar::Flags;
|
||||
use crate::tools::{acl, fs, xattr};
|
||||
|
||||
//
|
||||
// utility functions
|
||||
//
|
||||
|
||||
fn allow_notsupp<E: SysError>(err: E) -> Result<(), E> {
|
||||
if err.is_errno(Errno::EOPNOTSUPP) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
|
||||
fn allow_notsupp_remember<E: SysError>(err: E, not_supp: &mut bool) -> Result<(), E> {
|
||||
if err.is_errno(Errno::EOPNOTSUPP) {
|
||||
*not_supp = true;
|
||||
Ok(())
|
||||
} else {
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
|
||||
fn nsec_to_update_timespec(mtime_nsec: u64) -> [libc::timespec; 2] {
|
||||
// restore mtime
|
||||
const UTIME_OMIT: i64 = (1 << 30) - 2;
|
||||
const NANOS_PER_SEC: i64 = 1_000_000_000;
|
||||
|
||||
let sec = (mtime_nsec as i64) / NANOS_PER_SEC;
|
||||
let nsec = (mtime_nsec as i64) % NANOS_PER_SEC;
|
||||
|
||||
let times: [libc::timespec; 2] = [
|
||||
libc::timespec {
|
||||
tv_sec: 0,
|
||||
tv_nsec: UTIME_OMIT,
|
||||
},
|
||||
libc::timespec {
|
||||
tv_sec: sec,
|
||||
tv_nsec: nsec,
|
||||
},
|
||||
];
|
||||
|
||||
times
|
||||
}
|
||||
|
||||
//
|
||||
// metadata application:
|
||||
//
|
||||
|
||||
pub fn apply_at(
|
||||
flags: Flags,
|
||||
metadata: &Metadata,
|
||||
parent: RawFd,
|
||||
file_name: &CStr,
|
||||
) -> Result<(), Error> {
|
||||
let fd = proxmox::tools::fd::Fd::openat(
|
||||
&unsafe { RawFdNum::from_raw_fd(parent) },
|
||||
file_name,
|
||||
OFlag::O_PATH | OFlag::O_CLOEXEC | OFlag::O_NOFOLLOW,
|
||||
Mode::empty(),
|
||||
)?;
|
||||
|
||||
apply(flags, metadata, fd.as_raw_fd(), file_name)
|
||||
}
|
||||
|
||||
pub fn apply(flags: Flags, metadata: &Metadata, fd: RawFd, file_name: &CStr) -> Result<(), Error> {
|
||||
let c_proc_path = CString::new(format!("/proc/self/fd/{}", fd)).unwrap();
|
||||
|
||||
if metadata.stat.flags != 0 {
|
||||
todo!("apply flags!");
|
||||
}
|
||||
|
||||
unsafe {
|
||||
// UID and GID first, as this fails if we lose access anyway.
|
||||
c_result!(libc::chown(
|
||||
c_proc_path.as_ptr(),
|
||||
metadata.stat.uid,
|
||||
metadata.stat.gid
|
||||
))
|
||||
.map(drop)
|
||||
.or_else(allow_notsupp)?;
|
||||
}
|
||||
|
||||
let mut skip_xattrs = false;
|
||||
apply_xattrs(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)?;
|
||||
add_fcaps(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)?;
|
||||
apply_acls(flags, &c_proc_path, metadata)?;
|
||||
apply_quota_project_id(flags, fd, metadata)?;
|
||||
|
||||
// Finally mode and time. We may lose access with mode, but the changing the mode also
|
||||
// affects times.
|
||||
if !metadata.is_symlink() {
|
||||
c_result!(unsafe {
|
||||
libc::chmod(c_proc_path.as_ptr(), perms_from_metadata(metadata)?.bits())
|
||||
})
|
||||
.map(drop)
|
||||
.or_else(allow_notsupp)?;
|
||||
}
|
||||
|
||||
let res = c_result!(unsafe {
|
||||
libc::utimensat(
|
||||
libc::AT_FDCWD,
|
||||
c_proc_path.as_ptr(),
|
||||
nsec_to_update_timespec(metadata.stat.mtime).as_ptr(),
|
||||
0,
|
||||
)
|
||||
});
|
||||
match res {
|
||||
Ok(_) => (),
|
||||
Err(ref err) if err.is_errno(Errno::EOPNOTSUPP) => (),
|
||||
Err(ref err) if err.is_errno(Errno::EPERM) => {
|
||||
println!(
|
||||
"failed to restore mtime attribute on {:?}: {}",
|
||||
file_name, err
|
||||
);
|
||||
}
|
||||
Err(err) => return Err(err.into()),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_fcaps(
|
||||
flags: Flags,
|
||||
c_proc_path: *const libc::c_char,
|
||||
metadata: &Metadata,
|
||||
skip_xattrs: &mut bool,
|
||||
) -> Result<(), Error> {
|
||||
if *skip_xattrs || !flags.contains(Flags::WITH_FCAPS) {
|
||||
return Ok(());
|
||||
}
|
||||
let fcaps = match metadata.fcaps.as_ref() {
|
||||
Some(fcaps) => fcaps,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
c_result!(unsafe {
|
||||
libc::setxattr(
|
||||
c_proc_path,
|
||||
xattr::xattr_name_fcaps().as_ptr(),
|
||||
fcaps.data.as_ptr() as *const libc::c_void,
|
||||
fcaps.data.len(),
|
||||
0,
|
||||
)
|
||||
})
|
||||
.map(drop)
|
||||
.or_else(|err| allow_notsupp_remember(err, skip_xattrs))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_xattrs(
|
||||
flags: Flags,
|
||||
c_proc_path: *const libc::c_char,
|
||||
metadata: &Metadata,
|
||||
skip_xattrs: &mut bool,
|
||||
) -> Result<(), Error> {
|
||||
if *skip_xattrs || !flags.contains(Flags::WITH_XATTRS) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
for xattr in &metadata.xattrs {
|
||||
if *skip_xattrs {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if !xattr::is_valid_xattr_name(xattr.name()) {
|
||||
println!("skipping invalid xattr named {:?}", xattr.name());
|
||||
continue;
|
||||
}
|
||||
|
||||
c_result!(unsafe {
|
||||
libc::setxattr(
|
||||
c_proc_path,
|
||||
xattr.name().as_ptr() as *const libc::c_char,
|
||||
xattr.value().as_ptr() as *const libc::c_void,
|
||||
xattr.value().len(),
|
||||
0,
|
||||
)
|
||||
})
|
||||
.map(drop)
|
||||
.or_else(|err| allow_notsupp_remember(err, &mut *skip_xattrs))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_acls(flags: Flags, c_proc_path: &CStr, metadata: &Metadata) -> Result<(), Error> {
|
||||
if !flags.contains(Flags::WITH_ACL) || metadata.acl.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut acl = acl::ACL::init(5)?;
|
||||
|
||||
// acl type access:
|
||||
acl.add_entry_full(
|
||||
acl::ACL_USER_OBJ,
|
||||
None,
|
||||
acl::mode_user_to_acl_permissions(metadata.stat.mode),
|
||||
)?;
|
||||
|
||||
acl.add_entry_full(
|
||||
acl::ACL_OTHER,
|
||||
None,
|
||||
acl::mode_other_to_acl_permissions(metadata.stat.mode),
|
||||
)?;
|
||||
|
||||
match metadata.acl.group_obj.as_ref() {
|
||||
Some(group_obj) => {
|
||||
acl.add_entry_full(
|
||||
acl::ACL_MASK,
|
||||
None,
|
||||
acl::mode_group_to_acl_permissions(metadata.stat.mode),
|
||||
)?;
|
||||
acl.add_entry_full(acl::ACL_GROUP_OBJ, None, group_obj.permissions.0)?;
|
||||
}
|
||||
None => {
|
||||
acl.add_entry_full(
|
||||
acl::ACL_GROUP_OBJ,
|
||||
None,
|
||||
acl::mode_group_to_acl_permissions(metadata.stat.mode),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
for user in &metadata.acl.users {
|
||||
acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
|
||||
}
|
||||
|
||||
for group in &metadata.acl.groups {
|
||||
acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
|
||||
}
|
||||
|
||||
if !acl.is_valid() {
|
||||
bail!("Error while restoring ACL - ACL invalid");
|
||||
}
|
||||
|
||||
acl.set_file(c_proc_path, acl::ACL_TYPE_ACCESS)?;
|
||||
drop(acl);
|
||||
|
||||
// acl type default:
|
||||
if let Some(default) = metadata.acl.default.as_ref() {
|
||||
let mut acl = acl::ACL::init(5)?;
|
||||
|
||||
acl.add_entry_full(acl::ACL_USER_OBJ, None, default.user_obj_permissions.0)?;
|
||||
|
||||
acl.add_entry_full(acl::ACL_GROUP_OBJ, None, default.group_obj_permissions.0)?;
|
||||
|
||||
acl.add_entry_full(acl::ACL_OTHER, None, default.other_permissions.0)?;
|
||||
|
||||
if default.mask_permissions != pxar::format::acl::Permissions::NO_MASK {
|
||||
acl.add_entry_full(acl::ACL_MASK, None, default.mask_permissions.0)?;
|
||||
}
|
||||
|
||||
for user in &metadata.acl.default_users {
|
||||
acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
|
||||
}
|
||||
|
||||
for group in &metadata.acl.default_groups {
|
||||
acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
|
||||
}
|
||||
|
||||
if !acl.is_valid() {
|
||||
bail!("Error while restoring ACL - ACL invalid");
|
||||
}
|
||||
|
||||
acl.set_file(c_proc_path, acl::ACL_TYPE_DEFAULT)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_quota_project_id(flags: Flags, fd: RawFd, metadata: &Metadata) -> Result<(), Error> {
|
||||
if !flags.contains(Flags::WITH_QUOTA_PROJID) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let projid = match metadata.quota_project_id {
|
||||
Some(projid) => projid,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
let mut fsxattr = fs::FSXAttr::default();
|
||||
unsafe {
|
||||
fs::fs_ioc_fsgetxattr(fd, &mut fsxattr).map_err(|err| {
|
||||
format_err!(
|
||||
"error while getting fsxattr to restore quota project id - {}",
|
||||
err
|
||||
)
|
||||
})?;
|
||||
|
||||
fsxattr.fsx_projid = projid.projid as u32;
|
||||
|
||||
fs::fs_ioc_fssetxattr(fd, &fsxattr).map_err(|err| {
|
||||
format_err!(
|
||||
"error while setting fsxattr to restore quota project id - {}",
|
||||
err
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
@ -47,33 +47,23 @@
|
||||
//! (user, group, acl, ...) because this is already defined by the
|
||||
//! linked `ENTRY`.
|
||||
|
||||
mod binary_search_tree;
|
||||
pub use binary_search_tree::*;
|
||||
|
||||
pub mod flags;
|
||||
pub use flags::*;
|
||||
|
||||
mod format_definition;
|
||||
pub use format_definition::*;
|
||||
|
||||
mod encoder;
|
||||
pub use encoder::*;
|
||||
|
||||
mod sequential_decoder;
|
||||
pub use sequential_decoder::*;
|
||||
|
||||
mod decoder;
|
||||
pub use decoder::*;
|
||||
|
||||
mod match_pattern;
|
||||
pub use match_pattern::*;
|
||||
|
||||
mod dir_stack;
|
||||
pub use dir_stack::*;
|
||||
|
||||
pub mod fuse;
|
||||
pub use fuse::*;
|
||||
|
||||
pub mod catalog;
|
||||
pub(crate) mod create;
|
||||
pub(crate) mod dir_stack;
|
||||
pub(crate) mod extract;
|
||||
pub(crate) mod metadata;
|
||||
pub mod fuse;
|
||||
pub(crate) mod tools;
|
||||
|
||||
mod helper;
|
||||
mod flags;
|
||||
pub use flags::Flags;
|
||||
|
||||
pub use create::create_archive;
|
||||
pub use extract::extract_archive;
|
||||
|
||||
/// The format requires to build sorted directory lookup tables in
|
||||
/// memory, so we restrict the number of allowed entries to limit
|
||||
/// maximum memory usage.
|
||||
pub const ENCODER_MAX_ENTRIES: usize = 1024 * 1024;
|
||||
|
||||
pub use tools::{format_multi_line_entry, format_single_line_entry};
|
File diff suppressed because it is too large
Load Diff
203
src/pxar/tools.rs
Normal file
203
src/pxar/tools.rs
Normal file
@ -0,0 +1,203 @@
|
||||
//! Some common methods used within the pxar code.
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
use pxar::{mode, Entry, EntryKind, Metadata};
|
||||
|
||||
/// Get the file permissions as `nix::Mode`
|
||||
pub fn perms_from_metadata(meta: &Metadata) -> Result<Mode, Error> {
|
||||
let mode = meta.stat.get_permission_bits();
|
||||
u32::try_from(mode)
|
||||
.map_err(drop)
|
||||
.and_then(|mode| Mode::from_bits(mode).ok_or(()))
|
||||
.map_err(|_| format_err!("mode contains illegal bits: 0x{:x} (0o{:o})", mode, mode))
|
||||
}
|
||||
|
||||
/// Make sure path is relative and not '.' or '..'.
|
||||
pub fn assert_relative_path<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
|
||||
assert_relative_path_do(Path::new(path))
|
||||
}
|
||||
|
||||
/// Make sure path is a single component and not '.' or '..'.
|
||||
pub fn assert_single_path_component<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
|
||||
assert_single_path_component_do(Path::new(path))
|
||||
}
|
||||
|
||||
fn assert_relative_path_do(path: &Path) -> Result<(), Error> {
|
||||
if !path.is_relative() {
|
||||
bail!("bad absolute file name in archive: {:?}", path);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn assert_single_path_component_do(path: &Path) -> Result<(), Error> {
|
||||
assert_relative_path_do(path)?;
|
||||
|
||||
let mut components = path.components();
|
||||
match components.next() {
|
||||
Some(std::path::Component::Normal(_)) => (),
|
||||
_ => bail!("invalid path component in archive: {:?}", path),
|
||||
}
|
||||
|
||||
if components.next().is_some() {
|
||||
bail!(
|
||||
"invalid path with multiple components in archive: {:?}",
|
||||
path
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
fn symbolic_mode(c: u64, special: bool, special_x: u8, special_no_x: u8) -> [u8; 3] {
|
||||
[
|
||||
if 0 != c & 4 { b'r' } else { b'-' },
|
||||
if 0 != c & 2 { b'w' } else { b'-' },
|
||||
match (c & 1, special) {
|
||||
(0, false) => b'-',
|
||||
(0, true) => special_no_x,
|
||||
(_, false) => b'x',
|
||||
(_, true) => special_x,
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
fn mode_string(entry: &Entry) -> String {
|
||||
// https://www.gnu.org/software/coreutils/manual/html_node/What-information-is-listed.html#What-information-is-listed
|
||||
// additionally we use:
|
||||
// file type capital 'L' hard links
|
||||
// a second '+' after the mode to show non-acl xattr presence
|
||||
//
|
||||
// Trwxrwxrwx++ uid/gid size mtime filename [-> destination]
|
||||
|
||||
let meta = entry.metadata();
|
||||
let mode = meta.stat.mode;
|
||||
let type_char = if entry.is_hardlink() {
|
||||
'L'
|
||||
} else {
|
||||
match mode & mode::IFMT {
|
||||
mode::IFREG => '-',
|
||||
mode::IFBLK => 'b',
|
||||
mode::IFCHR => 'c',
|
||||
mode::IFDIR => 'd',
|
||||
mode::IFLNK => 'l',
|
||||
mode::IFIFO => 'p',
|
||||
mode::IFSOCK => 's',
|
||||
_ => '?',
|
||||
}
|
||||
};
|
||||
|
||||
let fmt_u = symbolic_mode((mode >> 6) & 7, 0 != mode & mode::ISUID, b's', b'S');
|
||||
let fmt_g = symbolic_mode((mode >> 3) & 7, 0 != mode & mode::ISGID, b's', b'S');
|
||||
let fmt_o = symbolic_mode((mode >> 3) & 7, 0 != mode & mode::ISVTX, b't', b'T');
|
||||
|
||||
let has_acls = if meta.acl.is_empty() { ' ' } else { '+' };
|
||||
|
||||
let has_xattrs = if meta.xattrs.is_empty() { ' ' } else { '+' };
|
||||
|
||||
format!(
|
||||
"{}{}{}{}{}{}",
|
||||
type_char,
|
||||
unsafe { std::str::from_utf8_unchecked(&fmt_u) },
|
||||
unsafe { std::str::from_utf8_unchecked(&fmt_g) },
|
||||
unsafe { std::str::from_utf8_unchecked(&fmt_o) },
|
||||
has_acls,
|
||||
has_xattrs,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn format_single_line_entry(entry: &Entry) -> String {
|
||||
use chrono::offset::TimeZone;
|
||||
|
||||
let mode_string = mode_string(entry);
|
||||
|
||||
let meta = entry.metadata();
|
||||
let mtime = meta.mtime_as_duration();
|
||||
let mtime = chrono::Local.timestamp(mtime.as_secs() as i64, mtime.subsec_nanos());
|
||||
|
||||
let (size, link) = match entry.kind() {
|
||||
EntryKind::File { size, .. } => (format!("{}", *size), String::new()),
|
||||
EntryKind::Symlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
|
||||
EntryKind::Hardlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
|
||||
EntryKind::Device(dev) => (format!("{},{}", dev.major, dev.minor), String::new()),
|
||||
_ => ("0".to_string(), String::new()),
|
||||
};
|
||||
|
||||
format!(
|
||||
"{} {:<13} {} {:>8} {:?}{}",
|
||||
mode_string,
|
||||
format!("{}/{}", meta.stat.uid, meta.stat.gid),
|
||||
mtime.format("%Y-%m-%d %H:%M:%S"),
|
||||
size,
|
||||
entry.path(),
|
||||
link,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn format_multi_line_entry(entry: &Entry) -> String {
|
||||
use chrono::offset::TimeZone;
|
||||
|
||||
let mode_string = mode_string(entry);
|
||||
|
||||
let meta = entry.metadata();
|
||||
let mtime = meta.mtime_as_duration();
|
||||
let mtime = chrono::Local.timestamp(mtime.as_secs() as i64, mtime.subsec_nanos());
|
||||
|
||||
let (size, link, type_name) = match entry.kind() {
|
||||
EntryKind::File { size, .. } => (format!("{}", *size), String::new(), "file"),
|
||||
EntryKind::Symlink(link) => (
|
||||
"0".to_string(),
|
||||
format!(" -> {:?}", link.as_os_str()),
|
||||
"symlink",
|
||||
),
|
||||
EntryKind::Hardlink(link) => (
|
||||
"0".to_string(),
|
||||
format!(" -> {:?}", link.as_os_str()),
|
||||
"symlink",
|
||||
),
|
||||
EntryKind::Device(dev) => (
|
||||
format!("{},{}", dev.major, dev.minor),
|
||||
String::new(),
|
||||
if meta.stat.is_chardev() {
|
||||
"characters pecial file"
|
||||
} else if meta.stat.is_blockdev() {
|
||||
"block special file"
|
||||
} else {
|
||||
"device"
|
||||
},
|
||||
),
|
||||
EntryKind::Socket => ("0".to_string(), String::new(), "socket"),
|
||||
EntryKind::Fifo => ("0".to_string(), String::new(), "fifo"),
|
||||
EntryKind::Directory => ("0".to_string(), String::new(), "directory"),
|
||||
EntryKind::GoodbyeTable => ("0".to_string(), String::new(), "bad entry"),
|
||||
};
|
||||
|
||||
let file_name = match std::str::from_utf8(entry.path().as_os_str().as_bytes()) {
|
||||
Ok(name) => std::borrow::Cow::Borrowed(name),
|
||||
Err(_) => std::borrow::Cow::Owned(format!("{:?}", entry.path())),
|
||||
};
|
||||
|
||||
format!(
|
||||
" File: {}{}\n \
|
||||
Size: {:<13} Type: {}\n\
|
||||
Access: ({:o}/{}) Uid: {:<5} Gid: {:<5}\n\
|
||||
Modify: {}\n",
|
||||
file_name,
|
||||
link,
|
||||
size,
|
||||
type_name,
|
||||
meta.file_mode(),
|
||||
mode_string,
|
||||
meta.stat.uid,
|
||||
meta.stat.gid,
|
||||
mtime.format("%Y-%m-%d %H:%M:%S"),
|
||||
)
|
||||
}
|
@ -1,15 +1,14 @@
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use std::path::PathBuf;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{RwLock};
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::tools::fs::{create_path, CreateOptions};
|
||||
|
||||
use crate::api2::types::{RRDMode, RRDTimeFrameResolution};
|
||||
use crate::tools::epoch_now_f64;
|
||||
|
||||
use super::*;
|
||||
|
||||
@ -35,11 +34,6 @@ pub fn create_rrdb_dir() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn now() -> Result<f64, Error> {
|
||||
let time = SystemTime::now().duration_since(UNIX_EPOCH)?;
|
||||
Ok(time.as_secs_f64())
|
||||
}
|
||||
|
||||
pub fn update_value(rel_path: &str, value: f64, dst: DST, save: bool) -> Result<(), Error> {
|
||||
|
||||
let mut path = PathBuf::from(PBS_RRD_BASEDIR);
|
||||
@ -48,7 +42,7 @@ pub fn update_value(rel_path: &str, value: f64, dst: DST, save: bool) -> Result<
|
||||
std::fs::create_dir_all(path.parent().unwrap())?;
|
||||
|
||||
let mut map = RRD_CACHE.write().unwrap();
|
||||
let now = now()?;
|
||||
let now = epoch_now_f64()?;
|
||||
|
||||
if let Some(rrd) = map.get_mut(rel_path) {
|
||||
rrd.update(now, value);
|
||||
@ -71,41 +65,18 @@ pub fn update_value(rel_path: &str, value: f64, dst: DST, save: bool) -> Result<
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn extract_data(
|
||||
pub fn extract_cached_data(
|
||||
base: &str,
|
||||
items: &[&str],
|
||||
name: &str,
|
||||
now: f64,
|
||||
timeframe: RRDTimeFrameResolution,
|
||||
mode: RRDMode,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let now = now()?;
|
||||
) -> Option<(u64, u64, Vec<Option<f64>>)> {
|
||||
|
||||
let map = RRD_CACHE.read().unwrap();
|
||||
|
||||
let mut result = Vec::new();
|
||||
|
||||
for name in items.iter() {
|
||||
let rrd = match map.get(&format!("{}/{}", base, name)) {
|
||||
Some(rrd) => rrd,
|
||||
None => continue,
|
||||
};
|
||||
let (start, reso, list) = rrd.extract_data(now, timeframe, mode);
|
||||
let mut t = start;
|
||||
for index in 0..RRD_DATA_ENTRIES {
|
||||
if result.len() <= index {
|
||||
if let Some(value) = list[index] {
|
||||
result.push(json!({ "time": t, *name: value }));
|
||||
} else {
|
||||
result.push(json!({ "time": t }));
|
||||
match map.get(&format!("{}/{}", base, name)) {
|
||||
Some(rrd) => Some(rrd.extract_data(now, timeframe, mode)),
|
||||
None => None,
|
||||
}
|
||||
} else {
|
||||
if let Some(value) = list[index] {
|
||||
result[index][name] = value.into();
|
||||
}
|
||||
}
|
||||
t += reso;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result.into())
|
||||
}
|
||||
|
@ -323,7 +323,7 @@ fn get_index(username: Option<String>, token: Option<String>, template: &Handleb
|
||||
|
||||
if let Some(query_str) = parts.uri.query() {
|
||||
for (k, v) in form_urlencoded::parse(query_str.as_bytes()).into_owned() {
|
||||
if k == "debug" && v == "1" || v == "true" {
|
||||
if k == "debug" && v != "0" && v != "false" {
|
||||
debug = true;
|
||||
}
|
||||
}
|
||||
|
@ -213,6 +213,8 @@ pub fn upid_read_status(upid: &UPID) -> Result<String, Error> {
|
||||
Some(rest) => {
|
||||
if rest == "OK" {
|
||||
status = String::from(rest);
|
||||
} else if rest.starts_with("WARNINGS: ") {
|
||||
status = String::from(rest);
|
||||
} else if rest.starts_with("ERROR: ") {
|
||||
status = String::from(&rest[7..]);
|
||||
}
|
||||
@ -234,7 +236,7 @@ pub struct TaskListInfo {
|
||||
pub upid_str: String,
|
||||
/// Task `(endtime, status)` if already finished
|
||||
///
|
||||
/// The `status` ise iether `unknown`, `OK`, or `ERROR: ...`
|
||||
/// The `status` is either `unknown`, `OK`, `WARN`, or `ERROR: ...`
|
||||
pub state: Option<(i64, String)>, // endtime, status
|
||||
}
|
||||
|
||||
@ -385,6 +387,7 @@ impl std::fmt::Display for WorkerTask {
|
||||
struct WorkerTaskData {
|
||||
logger: FileLogger,
|
||||
progress: f64, // 0..1
|
||||
warn_count: u64,
|
||||
pub abort_listeners: Vec<oneshot::Sender<()>>,
|
||||
}
|
||||
|
||||
@ -424,6 +427,7 @@ impl WorkerTask {
|
||||
data: Mutex::new(WorkerTaskData {
|
||||
logger,
|
||||
progress: 0.0,
|
||||
warn_count: 0,
|
||||
abort_listeners: vec![],
|
||||
}),
|
||||
});
|
||||
@ -507,8 +511,11 @@ impl WorkerTask {
|
||||
/// Log task result, remove task from running list
|
||||
pub fn log_result(&self, result: &Result<(), Error>) {
|
||||
|
||||
let warn_count = self.data.lock().unwrap().warn_count;
|
||||
if let Err(err) = result {
|
||||
self.log(&format!("TASK ERROR: {}", err));
|
||||
} else if warn_count > 0 {
|
||||
self.log(format!("TASK WARNINGS: {}", warn_count));
|
||||
} else {
|
||||
self.log("TASK OK");
|
||||
}
|
||||
@ -524,6 +531,13 @@ impl WorkerTask {
|
||||
data.logger.log(msg);
|
||||
}
|
||||
|
||||
/// Log a message as warning.
|
||||
pub fn warn<S: AsRef<str>>(&self, msg: S) {
|
||||
let mut data = self.data.lock().unwrap();
|
||||
data.logger.log(format!("WARN: {}", msg.as_ref()));
|
||||
data.warn_count += 1;
|
||||
}
|
||||
|
||||
/// Set progress indicator
|
||||
pub fn progress(&self, progress: f64) {
|
||||
if progress >= 0.0 && progress <= 1.0 {
|
||||
|
82
src/tools.rs
82
src/tools.rs
@ -5,11 +5,11 @@ use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::hash::BuildHasher;
|
||||
use std::fs::{File, OpenOptions};
|
||||
use std::io::ErrorKind;
|
||||
use std::io::Read;
|
||||
use std::io::{self, BufRead, ErrorKind, Read};
|
||||
use std::os::unix::io::{AsRawFd, RawFd};
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
use std::time::{SystemTime, SystemTimeError, UNIX_EPOCH};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::Value;
|
||||
@ -31,7 +31,9 @@ pub mod lru_cache;
|
||||
pub mod runtime;
|
||||
pub mod ticket;
|
||||
pub mod timer;
|
||||
pub mod statistics;
|
||||
pub mod systemd;
|
||||
pub mod nom;
|
||||
|
||||
mod wrapped_reader_stream;
|
||||
pub use wrapped_reader_stream::*;
|
||||
@ -480,7 +482,7 @@ pub fn normalize_uri_path(path: &str) -> Result<(String, Vec<&str>), Error> {
|
||||
/// is considered successful.
|
||||
pub fn command_output(
|
||||
output: std::process::Output,
|
||||
exit_code_check: Option<fn(i32) -> bool>
|
||||
exit_code_check: Option<fn(i32) -> bool>,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
if !output.status.success() {
|
||||
@ -507,6 +509,19 @@ pub fn command_output(
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
pub fn run_command(
|
||||
mut command: std::process::Command,
|
||||
exit_code_check: Option<fn(i32) -> bool>,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let output = command.output()
|
||||
.map_err(|err| format_err!("failed to execute {:?} - {}", command, err))?;
|
||||
|
||||
let output = crate::tools::command_output(output, exit_code_check)
|
||||
.map_err(|err| format_err!("command {:?} failed - {}", command, err))?;
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
pub fn fd_change_cloexec(fd: RawFd, on: bool) -> Result<(), Error> {
|
||||
use nix::fcntl::{fcntl, FdFlag, F_GETFD, F_SETFD};
|
||||
@ -538,12 +553,27 @@ pub fn fail_on_shutdown() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// wrap nix::unistd::pipe2 + O_CLOEXEC into something returning guarded file descriptors
|
||||
/// safe wrapper for `nix::unistd::pipe2` defaulting to `O_CLOEXEC` and guarding the file
|
||||
/// descriptors.
|
||||
pub fn pipe() -> Result<(Fd, Fd), Error> {
|
||||
let (pin, pout) = nix::unistd::pipe2(nix::fcntl::OFlag::O_CLOEXEC)?;
|
||||
Ok((Fd(pin), Fd(pout)))
|
||||
}
|
||||
|
||||
/// safe wrapper for `nix::sys::socket::socketpair` defaulting to `O_CLOEXEC` and guarding the file
|
||||
/// descriptors.
|
||||
pub fn socketpair() -> Result<(Fd, Fd), Error> {
|
||||
use nix::sys::socket;
|
||||
let (pa, pb) = socket::socketpair(
|
||||
socket::AddressFamily::Unix,
|
||||
socket::SockType::Stream,
|
||||
None,
|
||||
socket::SockFlag::SOCK_CLOEXEC,
|
||||
)?;
|
||||
Ok((Fd(pa), Fd(pb)))
|
||||
}
|
||||
|
||||
|
||||
/// An easy way to convert types to Any
|
||||
///
|
||||
/// Mostly useful to downcast trait objects (see RpcEnvironment).
|
||||
@ -572,3 +602,47 @@ pub const DEFAULT_ENCODE_SET: &AsciiSet = &percent_encoding::CONTROLS // 0..1f a
|
||||
.add(b'?')
|
||||
.add(b'{')
|
||||
.add(b'}');
|
||||
|
||||
/// Get an iterator over lines of a file, skipping empty lines and comments (lines starting with a
|
||||
/// `#`).
|
||||
pub fn file_get_non_comment_lines<P: AsRef<Path>>(
|
||||
path: P,
|
||||
) -> Result<impl Iterator<Item = io::Result<String>>, Error> {
|
||||
let path = path.as_ref();
|
||||
|
||||
Ok(io::BufReader::new(
|
||||
File::open(path).map_err(|err| format_err!("error opening {:?}: {}", path, err))?,
|
||||
)
|
||||
.lines()
|
||||
.filter_map(|line| match line {
|
||||
Ok(line) => {
|
||||
let line = line.trim();
|
||||
if line.is_empty() || line.starts_with('#') {
|
||||
None
|
||||
} else {
|
||||
Some(Ok(line.to_string()))
|
||||
}
|
||||
}
|
||||
Err(err) => Some(Err(err)),
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn epoch_now() -> Result<Duration, SystemTimeError> {
|
||||
SystemTime::now().duration_since(UNIX_EPOCH)
|
||||
}
|
||||
|
||||
pub fn epoch_now_f64() -> Result<f64, SystemTimeError> {
|
||||
Ok(epoch_now()?.as_secs_f64())
|
||||
}
|
||||
|
||||
pub fn epoch_now_u64() -> Result<u64, SystemTimeError> {
|
||||
Ok(epoch_now()?.as_secs())
|
||||
}
|
||||
|
||||
pub fn setup_safe_path_env() {
|
||||
std::env::set_var("PATH", "/sbin:/bin:/usr/sbin:/usr/bin");
|
||||
// Make %ENV safer - as suggested by https://perldoc.perl.org/perlsec.html
|
||||
for name in &["IFS", "CDPATH", "ENV", "BASH_ENV"] {
|
||||
std::env::remove_var(name);
|
||||
}
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ use std::ptr;
|
||||
|
||||
use libc::{c_char, c_int, c_uint, c_void};
|
||||
use nix::errno::Errno;
|
||||
use nix::NixPath;
|
||||
|
||||
// from: acl/include/acl.h
|
||||
pub const ACL_UNDEFINED_ID: u32 = 0xffffffff;
|
||||
@ -100,14 +101,11 @@ impl ACL {
|
||||
Ok(ACL { ptr })
|
||||
}
|
||||
|
||||
pub fn set_file<P: AsRef<Path>>(&self, path: P, acl_type: ACLType) -> Result<(), nix::errno::Errno> {
|
||||
let path_cstr = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap();
|
||||
let res = unsafe { acl_set_file(path_cstr.as_ptr(), acl_type, self.ptr) };
|
||||
if res < 0 {
|
||||
return Err(Errno::last());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
pub fn set_file<P: NixPath + ?Sized>(&self, path: &P, acl_type: ACLType) -> nix::Result<()> {
|
||||
path.with_nix_path(|path| {
|
||||
Errno::result(unsafe { acl_set_file(path.as_ptr(), acl_type, self.ptr) })
|
||||
})?
|
||||
.map(drop)
|
||||
}
|
||||
|
||||
pub fn get_fd(fd: RawFd) -> Result<ACL, nix::errno::Errno> {
|
||||
|
@ -2,12 +2,14 @@
|
||||
|
||||
use std::ffi::CString;
|
||||
use std::future::Future;
|
||||
use std::os::raw::{c_char, c_int};
|
||||
use std::io::{Read, Write};
|
||||
use std::os::raw::{c_char, c_uchar, c_int};
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::panic::UnwindSafe;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
@ -16,6 +18,11 @@ use proxmox::tools::io::{ReadExt, WriteExt};
|
||||
use crate::server;
|
||||
use crate::tools::{fd_change_cloexec, self};
|
||||
|
||||
#[link(name = "systemd")]
|
||||
extern "C" {
|
||||
fn sd_journal_stream_fd(identifier: *const c_uchar, priority: c_int, level_prefix: c_int) -> c_int;
|
||||
}
|
||||
|
||||
// Unfortunately FnBox is nightly-only and Box<FnOnce> is unusable, so just use Box<Fn>...
|
||||
pub type BoxedStoreFunc = Box<dyn FnMut() -> Result<String, Error> + UnwindSafe + Send>;
|
||||
|
||||
@ -31,6 +38,7 @@ pub trait Reloadable: Sized {
|
||||
#[derive(Default)]
|
||||
pub struct Reloader {
|
||||
pre_exec: Vec<PreExecEntry>,
|
||||
self_exe: PathBuf,
|
||||
}
|
||||
|
||||
// Currently we only need environment variables for storage, but in theory we could also add
|
||||
@ -41,10 +49,13 @@ struct PreExecEntry {
|
||||
}
|
||||
|
||||
impl Reloader {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
pub fn new() -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
pre_exec: Vec::new(),
|
||||
}
|
||||
|
||||
// Get the path to our executable as PathBuf
|
||||
self_exe: std::fs::read_link("/proc/self/exe")?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Restore an object from an environment variable of the given name, or, if none exists, uses
|
||||
@ -78,13 +89,6 @@ impl Reloader {
|
||||
}
|
||||
|
||||
pub fn fork_restart(self) -> Result<(), Error> {
|
||||
// Get the path to our executable as CString
|
||||
let exe = CString::new(
|
||||
std::fs::read_link("/proc/self/exe")?
|
||||
.into_os_string()
|
||||
.as_bytes()
|
||||
)?;
|
||||
|
||||
// Get our parameters as Vec<CString>
|
||||
let args = std::env::args_os();
|
||||
let mut new_args = Vec::with_capacity(args.len());
|
||||
@ -93,7 +97,7 @@ impl Reloader {
|
||||
}
|
||||
|
||||
// Synchronisation pipe:
|
||||
let (pin, pout) = super::pipe()?;
|
||||
let (pold, pnew) = super::socketpair()?;
|
||||
|
||||
// Start ourselves in the background:
|
||||
use nix::unistd::{fork, ForkResult};
|
||||
@ -102,30 +106,60 @@ impl Reloader {
|
||||
// Double fork so systemd can supervise us without nagging...
|
||||
match fork() {
|
||||
Ok(ForkResult::Child) => {
|
||||
std::mem::drop(pin);
|
||||
std::mem::drop(pold);
|
||||
// At this point we call pre-exec helpers. We must be certain that if they fail for
|
||||
// whatever reason we can still call `_exit()`, so use catch_unwind.
|
||||
match std::panic::catch_unwind(move || {
|
||||
let mut pout = unsafe {
|
||||
std::fs::File::from_raw_fd(pout.into_raw_fd())
|
||||
let mut pnew = unsafe {
|
||||
std::fs::File::from_raw_fd(pnew.into_raw_fd())
|
||||
};
|
||||
let pid = nix::unistd::Pid::this();
|
||||
if let Err(e) = unsafe { pout.write_host_value(pid.as_raw()) } {
|
||||
if let Err(e) = unsafe { pnew.write_host_value(pid.as_raw()) } {
|
||||
log::error!("failed to send new server PID to parent: {}", e);
|
||||
unsafe {
|
||||
libc::_exit(-1);
|
||||
}
|
||||
}
|
||||
std::mem::drop(pout);
|
||||
self.do_exec(exe, new_args)
|
||||
|
||||
let mut ok = [0u8];
|
||||
if let Err(e) = pnew.read_exact(&mut ok) {
|
||||
log::error!("parent vanished before notifying systemd: {}", e);
|
||||
unsafe {
|
||||
libc::_exit(-1);
|
||||
}
|
||||
}
|
||||
assert_eq!(ok[0], 1, "reload handshake should have sent a 1 byte");
|
||||
|
||||
std::mem::drop(pnew);
|
||||
|
||||
// Try to reopen STDOUT/STDERR journald streams to get correct PID in logs
|
||||
let ident = CString::new(self.self_exe.file_name().unwrap().as_bytes()).unwrap();
|
||||
let ident = ident.as_bytes();
|
||||
let fd = unsafe { sd_journal_stream_fd(ident.as_ptr(), libc::LOG_INFO, 1) };
|
||||
if fd >= 0 && fd != 1 {
|
||||
let fd = proxmox::tools::fd::Fd(fd); // add drop handler
|
||||
nix::unistd::dup2(fd.as_raw_fd(), 1)?;
|
||||
} else {
|
||||
log::error!("failed to update STDOUT journal redirection ({})", fd);
|
||||
}
|
||||
let fd = unsafe { sd_journal_stream_fd(ident.as_ptr(), libc::LOG_ERR, 1) };
|
||||
if fd >= 0 && fd != 2 {
|
||||
let fd = proxmox::tools::fd::Fd(fd); // add drop handler
|
||||
nix::unistd::dup2(fd.as_raw_fd(), 2)?;
|
||||
} else {
|
||||
log::error!("failed to update STDERR journal redirection ({})", fd);
|
||||
}
|
||||
|
||||
self.do_reexec(new_args)
|
||||
})
|
||||
{
|
||||
Ok(_) => eprintln!("do_exec returned unexpectedly!"),
|
||||
Ok(Ok(())) => eprintln!("do_reexec returned!"),
|
||||
Ok(Err(err)) => eprintln!("do_reexec failed: {}", err),
|
||||
Err(_) => eprintln!("panic in re-exec"),
|
||||
}
|
||||
}
|
||||
Ok(ForkResult::Parent { child }) => {
|
||||
std::mem::drop((pin, pout));
|
||||
std::mem::drop((pold, pnew));
|
||||
log::debug!("forked off a new server (second pid: {})", child);
|
||||
}
|
||||
Err(e) => log::error!("fork() failed, restart delayed: {}", e),
|
||||
@ -137,11 +171,11 @@ impl Reloader {
|
||||
}
|
||||
Ok(ForkResult::Parent { child }) => {
|
||||
log::debug!("forked off a new server (first pid: {}), waiting for 2nd pid", child);
|
||||
std::mem::drop(pout);
|
||||
let mut pin = unsafe {
|
||||
std::fs::File::from_raw_fd(pin.into_raw_fd())
|
||||
std::mem::drop(pnew);
|
||||
let mut pold = unsafe {
|
||||
std::fs::File::from_raw_fd(pold.into_raw_fd())
|
||||
};
|
||||
let child = nix::unistd::Pid::from_raw(match unsafe { pin.read_le_value() } {
|
||||
let child = nix::unistd::Pid::from_raw(match unsafe { pold.read_le_value() } {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log::error!("failed to receive pid of double-forked child process: {}", e);
|
||||
@ -153,6 +187,12 @@ impl Reloader {
|
||||
if let Err(e) = systemd_notify(SystemdNotify::MainPid(child)) {
|
||||
log::error!("failed to notify systemd about the new main pid: {}", e);
|
||||
}
|
||||
|
||||
// notify child that it is now the new main process:
|
||||
if let Err(e) = pold.write_all(&[1u8]) {
|
||||
log::error!("child vanished during reload: {}", e);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
@ -162,12 +202,13 @@ impl Reloader {
|
||||
}
|
||||
}
|
||||
|
||||
fn do_exec(self, exe: CString, args: Vec<CString>) -> Result<(), Error> {
|
||||
fn do_reexec(self, args: Vec<CString>) -> Result<(), Error> {
|
||||
let exe = CString::new(self.self_exe.as_os_str().as_bytes())?;
|
||||
self.pre_exec()?;
|
||||
nix::unistd::setsid()?;
|
||||
let args: Vec<&std::ffi::CStr> = args.iter().map(|s| s.as_ref()).collect();
|
||||
nix::unistd::execvp(&exe, &args)?;
|
||||
Ok(())
|
||||
panic!("exec misbehaved");
|
||||
}
|
||||
}
|
||||
|
||||
@ -223,7 +264,7 @@ where
|
||||
F: FnOnce(tokio::net::TcpListener, NotifyReady) -> Result<S, Error>,
|
||||
S: Future<Output = ()>,
|
||||
{
|
||||
let mut reloader = Reloader::new();
|
||||
let mut reloader = Reloader::new()?;
|
||||
|
||||
let listener: tokio::net::TcpListener = reloader.restore(
|
||||
"PROXMOX_BACKUP_LISTEN_FD",
|
||||
|
@ -4,44 +4,37 @@ use std::collections::{HashMap, HashSet};
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::io;
|
||||
use std::os::unix::ffi::{OsStrExt, OsStringExt};
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use bitflags::bitflags;
|
||||
use anyhow::{format_err, Error};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use libc::dev_t;
|
||||
use once_cell::sync::OnceCell;
|
||||
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::sys::error::io_err_other;
|
||||
use proxmox::sys::linux::procfs::{MountInfo, mountinfo::Device};
|
||||
use proxmox::{io_bail, io_format_err};
|
||||
use proxmox::api::api;
|
||||
|
||||
use crate::api2::types::{BLOCKDEVICE_NAME_REGEX, StorageStatus};
|
||||
|
||||
mod zfs;
|
||||
pub use zfs::*;
|
||||
mod zpool_status;
|
||||
pub use zpool_status::*;
|
||||
mod zpool_list;
|
||||
pub use zpool_list::*;
|
||||
mod lvm;
|
||||
pub use lvm::*;
|
||||
mod smart;
|
||||
pub use smart::*;
|
||||
|
||||
bitflags! {
|
||||
/// Ways a device is being used.
|
||||
pub struct DiskUse: u32 {
|
||||
/// Currently mounted.
|
||||
const MOUNTED = 0x0000_0001;
|
||||
|
||||
/// Currently used as member of a device-mapper device.
|
||||
const DEVICE_MAPPER = 0x0000_0002;
|
||||
|
||||
/// Contains partitions.
|
||||
const PARTITIONS = 0x0001_0000;
|
||||
|
||||
/// The disk has a partition type which belongs to an LVM PV.
|
||||
const LVM = 0x0002_0000;
|
||||
|
||||
/// The disk has a partition type which belongs to a zpool.
|
||||
const ZFS = 0x0004_0000;
|
||||
|
||||
/// The disk is used by ceph.
|
||||
const CEPH = 0x0008_0000;
|
||||
}
|
||||
lazy_static::lazy_static!{
|
||||
static ref ISCSI_PATH_REGEX: regex::Regex =
|
||||
regex::Regex::new(r"host[^/]*/session[^/]*").unwrap();
|
||||
}
|
||||
|
||||
/// Disk management context.
|
||||
@ -70,8 +63,6 @@ impl DiskManage {
|
||||
|
||||
/// Get a `Disk` from a device node (eg. `/dev/sda`).
|
||||
pub fn disk_by_node<P: AsRef<Path>>(self: Arc<Self>, devnode: P) -> io::Result<Disk> {
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
|
||||
let devnode = devnode.as_ref();
|
||||
|
||||
let meta = std::fs::metadata(devnode)?;
|
||||
@ -101,10 +92,14 @@ impl DiskManage {
|
||||
})
|
||||
}
|
||||
|
||||
/// Get a `Disk` for a name in `/sys/block/<name>`.
|
||||
pub fn disk_by_name(self: Arc<Self>, name: &str) -> io::Result<Disk> {
|
||||
let syspath = format!("/sys/block/{}", name);
|
||||
self.disk_by_sys_path(&syspath)
|
||||
}
|
||||
|
||||
/// Gather information about mounted disks:
|
||||
fn mounted_devices(&self) -> Result<&HashSet<dev_t>, Error> {
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
|
||||
self.mounted_devices
|
||||
.get_or_try_init(|| -> Result<_, Error> {
|
||||
let mut mounted = HashSet::new();
|
||||
@ -264,12 +259,17 @@ impl Disk {
|
||||
}
|
||||
|
||||
/// Convenience wrapper for reading a `/sys` file which contains just a simple `OsString`.
|
||||
fn read_sys_os_str<P: AsRef<Path>>(&self, path: P) -> io::Result<Option<OsString>> {
|
||||
Ok(self.read_sys(path.as_ref())?.map(OsString::from_vec))
|
||||
pub fn read_sys_os_str<P: AsRef<Path>>(&self, path: P) -> io::Result<Option<OsString>> {
|
||||
Ok(self.read_sys(path.as_ref())?.map(|mut v| {
|
||||
if Some(&b'\n') == v.last() {
|
||||
v.pop();
|
||||
}
|
||||
OsString::from_vec(v)
|
||||
}))
|
||||
}
|
||||
|
||||
/// Convenience wrapper for reading a `/sys` file which contains just a simple utf-8 string.
|
||||
fn read_sys_str<P: AsRef<Path>>(&self, path: P) -> io::Result<Option<String>> {
|
||||
pub fn read_sys_str<P: AsRef<Path>>(&self, path: P) -> io::Result<Option<String>> {
|
||||
Ok(match self.read_sys(path.as_ref())? {
|
||||
Some(data) => Some(String::from_utf8(data).map_err(io_err_other)?),
|
||||
None => None,
|
||||
@ -277,7 +277,7 @@ impl Disk {
|
||||
}
|
||||
|
||||
/// Convenience wrapper for unsigned integer `/sys` values up to 64 bit.
|
||||
fn read_sys_u64<P: AsRef<Path>>(&self, path: P) -> io::Result<Option<u64>> {
|
||||
pub fn read_sys_u64<P: AsRef<Path>>(&self, path: P) -> io::Result<Option<u64>> {
|
||||
Ok(match self.read_sys_str(path)? {
|
||||
Some(data) => Some(data.trim().parse().map_err(io_err_other)?),
|
||||
None => None,
|
||||
@ -287,7 +287,7 @@ impl Disk {
|
||||
/// Get the disk's size in bytes.
|
||||
pub fn size(&self) -> io::Result<u64> {
|
||||
Ok(*self.info.size.get_or_try_init(|| {
|
||||
self.read_sys_u64("size")?.ok_or_else(|| {
|
||||
self.read_sys_u64("size")?.map(|s| s*512).ok_or_else(|| {
|
||||
io_format_err!(
|
||||
"failed to get disk size from {:?}",
|
||||
self.syspath().join("size"),
|
||||
@ -400,8 +400,9 @@ impl Disk {
|
||||
/// Attempt to guess the disk type.
|
||||
pub fn guess_disk_type(&self) -> io::Result<DiskType> {
|
||||
Ok(match self.rotational()? {
|
||||
Some(false) => DiskType::Ssd,
|
||||
Some(true) => DiskType::Hdd,
|
||||
_ => match self.ata_rotation_rate_rpm() {
|
||||
None => match self.ata_rotation_rate_rpm() {
|
||||
Some(_) => DiskType::Hdd,
|
||||
None => match self.bus() {
|
||||
Some(bus) if bus == "usb" => DiskType::Usb,
|
||||
@ -433,7 +434,9 @@ impl Disk {
|
||||
.info
|
||||
.has_holders
|
||||
.get_or_try_init(|| -> io::Result<bool> {
|
||||
for entry in std::fs::read_dir(self.syspath())? {
|
||||
let mut subdir = self.syspath().to_owned();
|
||||
subdir.push("holders");
|
||||
for entry in std::fs::read_dir(subdir)? {
|
||||
match entry?.file_name().as_bytes() {
|
||||
b"." | b".." => (),
|
||||
_ => return Ok(true),
|
||||
@ -473,10 +476,40 @@ impl Disk {
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// List device partitions
|
||||
pub fn partitions(&self) -> Result<HashMap<u64, Disk>, Error> {
|
||||
|
||||
let sys_path = self.syspath();
|
||||
let device = self.sysname().to_string_lossy().to_string();
|
||||
|
||||
let mut map = HashMap::new();
|
||||
|
||||
for item in crate::tools::fs::read_subdir(libc::AT_FDCWD, sys_path)? {
|
||||
let item = item?;
|
||||
let name = match item.file_name().to_str() {
|
||||
Ok(name) => name,
|
||||
Err(_) => continue, // skip non utf8 entries
|
||||
};
|
||||
|
||||
if !name.starts_with(&device) { continue; }
|
||||
|
||||
let mut part_path = sys_path.to_owned();
|
||||
part_path.push(name);
|
||||
|
||||
let disk_part = self.manager.clone().disk_by_sys_path(&part_path)?;
|
||||
|
||||
if let Some(partition) = disk_part.read_sys_u64("partition")? {
|
||||
map.insert(partition, disk_part);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(map)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns disk usage information (total, used, avail)
|
||||
pub fn disk_usage(path: &std::path::Path) -> Result<(u64, u64, u64), Error> {
|
||||
pub fn disk_usage(path: &std::path::Path) -> Result<StorageStatus, Error> {
|
||||
|
||||
let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
|
||||
|
||||
@ -487,9 +520,16 @@ pub fn disk_usage(path: &std::path::Path) -> Result<(u64, u64, u64), Error> {
|
||||
|
||||
let bsize = stat.f_bsize as u64;
|
||||
|
||||
Ok((stat.f_blocks*bsize, (stat.f_blocks-stat.f_bfree)*bsize, stat.f_bavail*bsize))
|
||||
Ok(StorageStatus{
|
||||
total: stat.f_blocks*bsize,
|
||||
used: (stat.f_blocks-stat.f_bfree)*bsize,
|
||||
avail: stat.f_bavail*bsize,
|
||||
})
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all="lowercase")]
|
||||
/// This is just a rough estimate for a "type" of disk.
|
||||
pub enum DiskType {
|
||||
/// We know nothing.
|
||||
@ -518,16 +558,10 @@ pub struct BlockDevStat {
|
||||
/// Use lsblk to read partition type uuids.
|
||||
pub fn get_partition_type_info() -> Result<HashMap<String, Vec<String>>, Error> {
|
||||
|
||||
const LSBLK_BIN_PATH: &str = "/usr/bin/lsblk";
|
||||
|
||||
let mut command = std::process::Command::new(LSBLK_BIN_PATH);
|
||||
let mut command = std::process::Command::new("lsblk");
|
||||
command.args(&["--json", "-o", "path,parttype"]);
|
||||
|
||||
let output = command.output()
|
||||
.map_err(|err| format_err!("failed to execute '{}' - {}", LSBLK_BIN_PATH, err))?;
|
||||
|
||||
let output = crate::tools::command_output(output, None)
|
||||
.map_err(|err| format_err!("lsblk command failed: {}", err))?;
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
|
||||
let mut res: HashMap<String, Vec<String>> = HashMap::new();
|
||||
|
||||
@ -553,3 +587,412 @@ pub fn get_partition_type_info() -> Result<HashMap<String, Vec<String>>, Error>
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all="lowercase")]
|
||||
pub enum DiskUsageType {
|
||||
/// Disk is not used (as far we can tell)
|
||||
Unused,
|
||||
/// Disk is mounted
|
||||
Mounted,
|
||||
/// Disk is used by LVM
|
||||
LVM,
|
||||
/// Disk is used by ZFS
|
||||
ZFS,
|
||||
/// Disk is used by device-mapper
|
||||
DeviceMapper,
|
||||
/// Disk has partitions
|
||||
Partitions,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
used: {
|
||||
type: DiskUsageType,
|
||||
},
|
||||
"disk-type": {
|
||||
type: DiskType,
|
||||
},
|
||||
status: {
|
||||
type: SmartStatus,
|
||||
}
|
||||
}
|
||||
)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Information about how a Disk is used
|
||||
pub struct DiskUsageInfo {
|
||||
/// Disk name (/sys/block/<name>)
|
||||
pub name: String,
|
||||
pub used: DiskUsageType,
|
||||
pub disk_type: DiskType,
|
||||
pub status: SmartStatus,
|
||||
/// Disk wearout
|
||||
pub wearout: Option<f64>,
|
||||
/// Vendor
|
||||
pub vendor: Option<String>,
|
||||
/// Model
|
||||
pub model: Option<String>,
|
||||
/// WWN
|
||||
pub wwn: Option<String>,
|
||||
/// Disk size
|
||||
pub size: u64,
|
||||
/// Serisal number
|
||||
pub serial: Option<String>,
|
||||
/// Linux device path (/dev/xxx)
|
||||
pub devpath: Option<String>,
|
||||
/// Set if disk contains a GPT partition table
|
||||
pub gpt: bool,
|
||||
/// RPM
|
||||
pub rpm: Option<u64>,
|
||||
}
|
||||
|
||||
fn scan_partitions(
|
||||
disk_manager: Arc<DiskManage>,
|
||||
lvm_devices: &HashSet<u64>,
|
||||
zfs_devices: &HashSet<u64>,
|
||||
device: &str,
|
||||
) -> Result<DiskUsageType, Error> {
|
||||
|
||||
let mut sys_path = std::path::PathBuf::from("/sys/block");
|
||||
sys_path.push(device);
|
||||
|
||||
let mut used = DiskUsageType::Unused;
|
||||
|
||||
let mut found_lvm = false;
|
||||
let mut found_zfs = false;
|
||||
let mut found_mountpoints = false;
|
||||
let mut found_dm = false;
|
||||
let mut found_partitions = false;
|
||||
|
||||
for item in crate::tools::fs::read_subdir(libc::AT_FDCWD, &sys_path)? {
|
||||
let item = item?;
|
||||
let name = match item.file_name().to_str() {
|
||||
Ok(name) => name,
|
||||
Err(_) => continue, // skip non utf8 entries
|
||||
};
|
||||
if !name.starts_with(device) { continue; }
|
||||
|
||||
found_partitions = true;
|
||||
|
||||
let mut part_path = sys_path.clone();
|
||||
part_path.push(name);
|
||||
|
||||
let data = disk_manager.clone().disk_by_sys_path(&part_path)?;
|
||||
|
||||
let devnum = data.devnum()?;
|
||||
|
||||
if lvm_devices.contains(&devnum) {
|
||||
found_lvm = true;
|
||||
}
|
||||
|
||||
if data.is_mounted()? {
|
||||
found_mountpoints = true;
|
||||
}
|
||||
|
||||
if data.has_holders()? {
|
||||
found_dm = true;
|
||||
}
|
||||
|
||||
if zfs_devices.contains(&devnum) {
|
||||
found_zfs = true;
|
||||
}
|
||||
}
|
||||
|
||||
if found_mountpoints {
|
||||
used = DiskUsageType::Mounted;
|
||||
} else if found_lvm {
|
||||
used = DiskUsageType::LVM;
|
||||
} else if found_zfs {
|
||||
used = DiskUsageType::ZFS;
|
||||
} else if found_dm {
|
||||
used = DiskUsageType::DeviceMapper;
|
||||
} else if found_partitions {
|
||||
used = DiskUsageType::Partitions;
|
||||
}
|
||||
|
||||
Ok(used)
|
||||
}
|
||||
|
||||
|
||||
/// Get disk usage information for a single disk
|
||||
pub fn get_disk_usage_info(
|
||||
disk: &str,
|
||||
no_smart: bool,
|
||||
) -> Result<DiskUsageInfo, Error> {
|
||||
let mut filter = Vec::new();
|
||||
filter.push(disk.to_string());
|
||||
let mut map = get_disks(Some(filter), no_smart)?;
|
||||
if let Some(info) = map.remove(disk) {
|
||||
return Ok(info);
|
||||
} else {
|
||||
bail!("failed to get disk usage info - internal error"); // should not happen
|
||||
}
|
||||
}
|
||||
|
||||
/// Get disk usage information for multiple disks
|
||||
pub fn get_disks(
|
||||
// filter - list of device names (without leading /dev)
|
||||
disks: Option<Vec<String>>,
|
||||
// do no include data from smartctl
|
||||
no_smart: bool,
|
||||
) -> Result<HashMap<String, DiskUsageInfo>, Error> {
|
||||
|
||||
let disk_manager = DiskManage::new();
|
||||
|
||||
let partition_type_map = get_partition_type_info()?;
|
||||
|
||||
let zfs_devices = zfs_devices(&partition_type_map, None)?;
|
||||
|
||||
let lvm_devices = get_lvm_devices(&partition_type_map)?;
|
||||
|
||||
// fixme: ceph journals/volumes
|
||||
|
||||
let mut result = HashMap::new();
|
||||
|
||||
for item in crate::tools::fs::scan_subdir(libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX)? {
|
||||
let item = item?;
|
||||
|
||||
let name = item.file_name().to_str().unwrap().to_string();
|
||||
|
||||
if let Some(ref disks) = disks {
|
||||
if !disks.contains(&name) { continue; }
|
||||
}
|
||||
|
||||
let sys_path = format!("/sys/block/{}", name);
|
||||
|
||||
if let Ok(target) = std::fs::read_link(&sys_path) {
|
||||
if let Some(target) = target.to_str() {
|
||||
if ISCSI_PATH_REGEX.is_match(target) { continue; } // skip iSCSI devices
|
||||
}
|
||||
}
|
||||
|
||||
let disk = disk_manager.clone().disk_by_sys_path(&sys_path)?;
|
||||
|
||||
let devnum = disk.devnum()?;
|
||||
|
||||
let size = match disk.size() {
|
||||
Ok(size) => size,
|
||||
Err(_) => continue, // skip devices with unreadable size
|
||||
};
|
||||
|
||||
let disk_type = match disk.guess_disk_type() {
|
||||
Ok(disk_type) => disk_type,
|
||||
Err(_) => continue, // skip devices with undetectable type
|
||||
};
|
||||
|
||||
let mut usage = DiskUsageType::Unused;
|
||||
|
||||
if lvm_devices.contains(&devnum) {
|
||||
usage = DiskUsageType::LVM;
|
||||
}
|
||||
|
||||
match disk.is_mounted() {
|
||||
Ok(true) => usage = DiskUsageType::Mounted,
|
||||
Ok(false) => {},
|
||||
Err(_) => continue, // skip devices with undetectable mount status
|
||||
}
|
||||
|
||||
if zfs_devices.contains(&devnum) {
|
||||
usage = DiskUsageType::ZFS;
|
||||
}
|
||||
|
||||
let vendor = disk.vendor().unwrap_or(None).
|
||||
map(|s| s.to_string_lossy().trim().to_string());
|
||||
|
||||
let model = disk.model().map(|s| s.to_string_lossy().into_owned());
|
||||
|
||||
let serial = disk.serial().map(|s| s.to_string_lossy().into_owned());
|
||||
|
||||
let devpath = disk.device_path().map(|p| p.to_owned())
|
||||
.map(|p| p.to_string_lossy().to_string());
|
||||
|
||||
|
||||
let wwn = disk.wwn().map(|s| s.to_string_lossy().into_owned());
|
||||
|
||||
if usage != DiskUsageType::Mounted {
|
||||
match scan_partitions(disk_manager.clone(), &lvm_devices, &zfs_devices, &name) {
|
||||
Ok(part_usage) => {
|
||||
if part_usage != DiskUsageType::Unused {
|
||||
usage = part_usage;
|
||||
}
|
||||
},
|
||||
Err(_) => continue, // skip devices if scan_partitions fail
|
||||
};
|
||||
}
|
||||
|
||||
let mut status = SmartStatus::Unknown;
|
||||
let mut wearout = None;
|
||||
|
||||
if !no_smart {
|
||||
if let Ok(smart) = get_smart_data(&disk, false) {
|
||||
status = smart.status;
|
||||
wearout = smart.wearout;
|
||||
}
|
||||
}
|
||||
|
||||
let info = DiskUsageInfo {
|
||||
name: name.clone(),
|
||||
vendor, model, serial, devpath, size, wwn, disk_type,
|
||||
status, wearout,
|
||||
used: usage,
|
||||
gpt: disk.has_gpt(),
|
||||
rpm: disk.ata_rotation_rate_rpm(),
|
||||
};
|
||||
|
||||
result.insert(name, info);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Try to reload the partition table
|
||||
pub fn reread_partition_table(disk: &Disk) -> Result<(), Error> {
|
||||
|
||||
let disk_path = match disk.device_path() {
|
||||
Some(path) => path,
|
||||
None => bail!("disk {:?} has no node in /dev", disk.syspath()),
|
||||
};
|
||||
|
||||
let mut command = std::process::Command::new("blockdev");
|
||||
command.arg("--rereadpt");
|
||||
command.arg(disk_path);
|
||||
|
||||
crate::tools::run_command(command, None)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Initialize disk by writing a GPT partition table
|
||||
pub fn inititialize_gpt_disk(disk: &Disk, uuid: Option<&str>) -> Result<(), Error> {
|
||||
|
||||
let disk_path = match disk.device_path() {
|
||||
Some(path) => path,
|
||||
None => bail!("disk {:?} has no node in /dev", disk.syspath()),
|
||||
};
|
||||
|
||||
let uuid = uuid.unwrap_or("R"); // R .. random disk GUID
|
||||
|
||||
let mut command = std::process::Command::new("sgdisk");
|
||||
command.arg(disk_path);
|
||||
command.args(&["-U", uuid]);
|
||||
|
||||
crate::tools::run_command(command, None)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a single linux partition using the whole available space
|
||||
pub fn create_single_linux_partition(disk: &Disk) -> Result<Disk, Error> {
|
||||
|
||||
let disk_path = match disk.device_path() {
|
||||
Some(path) => path,
|
||||
None => bail!("disk {:?} has no node in /dev", disk.syspath()),
|
||||
};
|
||||
|
||||
let mut command = std::process::Command::new("sgdisk");
|
||||
command.args(&["-n1", "-t1:8300"]);
|
||||
command.arg(disk_path);
|
||||
|
||||
crate::tools::run_command(command, None)?;
|
||||
|
||||
let mut partitions = disk.partitions()?;
|
||||
|
||||
match partitions.remove(&1) {
|
||||
Some(partition) => Ok(partition),
|
||||
None => bail!("unable to lookup device partition"),
|
||||
}
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all="lowercase")]
|
||||
pub enum FileSystemType {
|
||||
/// Linux Ext4
|
||||
Ext4,
|
||||
/// XFS
|
||||
Xfs,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for FileSystemType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let text = match self {
|
||||
FileSystemType::Ext4 => "ext4",
|
||||
FileSystemType::Xfs => "xfs",
|
||||
};
|
||||
write!(f, "{}", text)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for FileSystemType {
|
||||
type Err = serde_json::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
use serde::de::IntoDeserializer;
|
||||
Self::deserialize(s.into_deserializer())
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a file system on a disk or disk partition
|
||||
pub fn create_file_system(disk: &Disk, fs_type: FileSystemType) -> Result<(), Error> {
|
||||
|
||||
let disk_path = match disk.device_path() {
|
||||
Some(path) => path,
|
||||
None => bail!("disk {:?} has no node in /dev", disk.syspath()),
|
||||
};
|
||||
|
||||
let fs_type = fs_type.to_string();
|
||||
|
||||
let mut command = std::process::Command::new("mkfs");
|
||||
command.args(&["-t", &fs_type]);
|
||||
command.arg(disk_path);
|
||||
|
||||
crate::tools::run_command(command, None)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Block device name completion helper
|
||||
pub fn complete_disk_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
let mut list = Vec::new();
|
||||
|
||||
let dir = match crate::tools::fs::scan_subdir(libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX) {
|
||||
Ok(dir) => dir,
|
||||
Err(_) => return list,
|
||||
};
|
||||
|
||||
for item in dir {
|
||||
if let Ok(item) = item {
|
||||
let name = item.file_name().to_str().unwrap().to_string();
|
||||
list.push(name);
|
||||
}
|
||||
}
|
||||
|
||||
list
|
||||
}
|
||||
|
||||
/// Read the FS UUID (parse blkid output)
|
||||
///
|
||||
/// Note: Calling blkid is more reliable than using the udev ID_FS_UUID property.
|
||||
pub fn get_fs_uuid(disk: &Disk) -> Result<String, Error> {
|
||||
|
||||
let disk_path = match disk.device_path() {
|
||||
Some(path) => path,
|
||||
None => bail!("disk {:?} has no node in /dev", disk.syspath()),
|
||||
};
|
||||
|
||||
let mut command = std::process::Command::new("blkid");
|
||||
command.args(&["-o", "export"]);
|
||||
command.arg(disk_path);
|
||||
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
|
||||
for line in output.lines() {
|
||||
if line.starts_with("UUID=") {
|
||||
return Ok(line[5..].to_string());
|
||||
}
|
||||
}
|
||||
|
||||
bail!("get_fs_uuid failed - missing UUID");
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use anyhow::{Error};
|
||||
use serde_json::Value;
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
@ -12,29 +13,28 @@ lazy_static!{
|
||||
};
|
||||
}
|
||||
|
||||
/// Get list of devices used by LVM (pvs).
|
||||
/// Get set of devices used by LVM (pvs).
|
||||
///
|
||||
/// The set is indexed by using the unix raw device number (dev_t is u64)
|
||||
pub fn get_lvm_devices(
|
||||
partition_type_map: &HashMap<String, Vec<String>>,
|
||||
) -> Result<HashSet<String>, Error> {
|
||||
) -> Result<HashSet<u64>, Error> {
|
||||
|
||||
const PVS_BIN_PATH: &str = "/sbin/pvs";
|
||||
const PVS_BIN_PATH: &str = "pvs";
|
||||
|
||||
let mut command = std::process::Command::new(PVS_BIN_PATH);
|
||||
command.args(&["--reportformat", "json", "--noheadings", "--readonly", "-o", "pv_name"]);
|
||||
|
||||
let output = command.output()
|
||||
.map_err(|err| format_err!("failed to execute '{}' - {}", PVS_BIN_PATH, err))?;
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
|
||||
let output = crate::tools::command_output(output, None)
|
||||
.map_err(|err| format_err!("pvs command failed: {}", err))?;
|
||||
|
||||
let mut device_set: HashSet<String> = HashSet::new();
|
||||
let mut device_set: HashSet<u64> = HashSet::new();
|
||||
|
||||
for device_list in partition_type_map.iter()
|
||||
.filter_map(|(uuid, list)| if LVM_UUIDS.contains(uuid.as_str()) { Some(list) } else { None })
|
||||
{
|
||||
for device in device_list {
|
||||
device_set.insert(device.clone());
|
||||
let meta = std::fs::metadata(device)?;
|
||||
device_set.insert(meta.rdev());
|
||||
}
|
||||
}
|
||||
|
||||
@ -44,7 +44,8 @@ pub fn get_lvm_devices(
|
||||
Some(list) => {
|
||||
for info in list {
|
||||
if let Some(pv_name) = info["pv_name"].as_str() {
|
||||
device_set.insert(pv_name.to_string());
|
||||
let meta = std::fs::metadata(pv_name)?;
|
||||
device_set.insert(meta.rdev());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
212
src/tools/disks/smart.rs
Normal file
212
src/tools/disks/smart.rs
Normal file
@ -0,0 +1,212 @@
|
||||
use anyhow::{bail, Error};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all="lowercase")]
|
||||
/// SMART status
|
||||
pub enum SmartStatus {
|
||||
/// Smart tests passed - everything is OK
|
||||
Passed,
|
||||
/// Smart tests failed - disk has problems
|
||||
Failed,
|
||||
/// Unknown status
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
/// SMART Attribute
|
||||
pub struct SmartAttribute {
|
||||
/// Attribute name
|
||||
name: String,
|
||||
/// Attribute raw value
|
||||
value: String,
|
||||
// the rest of the values is available for ATA type
|
||||
/// ATA Attribute ID
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
id: Option<u64>,
|
||||
/// ATA Flags
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
flags: Option<String>,
|
||||
/// ATA normalized value (0..100)
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
normalized: Option<f64>,
|
||||
/// ATA worst
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
worst: Option<f64>,
|
||||
/// ATA threshold
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
threshold: Option<f64>,
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
status: {
|
||||
type: SmartStatus,
|
||||
},
|
||||
wearout: {
|
||||
description: "Wearout level.",
|
||||
type: f64,
|
||||
optional: true,
|
||||
},
|
||||
attributes: {
|
||||
description: "SMART attributes.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: SmartAttribute,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
/// Data from smartctl
|
||||
pub struct SmartData {
|
||||
pub status: SmartStatus,
|
||||
pub wearout: Option<f64>,
|
||||
pub attributes: Vec<SmartAttribute>,
|
||||
}
|
||||
|
||||
/// Read smartctl data for a disk (/dev/XXX).
|
||||
pub fn get_smart_data(
|
||||
disk: &super::Disk,
|
||||
health_only: bool,
|
||||
) -> Result<SmartData, Error> {
|
||||
|
||||
const SMARTCTL_BIN_PATH: &str = "smartctl";
|
||||
|
||||
let mut command = std::process::Command::new(SMARTCTL_BIN_PATH);
|
||||
command.arg("-H");
|
||||
if !health_only { command.args(&["-A", "-j"]); }
|
||||
|
||||
let disk_path = match disk.device_path() {
|
||||
Some(path) => path,
|
||||
None => bail!("disk {:?} has no node in /dev", disk.syspath()),
|
||||
};
|
||||
command.arg(disk_path);
|
||||
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
|
||||
let output: serde_json::Value = output.parse()?;
|
||||
|
||||
let mut wearout = None;
|
||||
|
||||
let mut attributes = Vec::new();
|
||||
|
||||
// ATA devices
|
||||
if let Some(list) = output["ata_smart_attributes"]["table"].as_array() {
|
||||
let wearout_id = lookup_vendor_wearout_id(disk);
|
||||
for item in list {
|
||||
let id = match item["id"].as_u64() {
|
||||
Some(id) => id,
|
||||
None => continue, // skip attributes without id
|
||||
};
|
||||
|
||||
let name = match item["name"].as_str() {
|
||||
Some(name) => name.to_string(),
|
||||
None => continue, // skip attributes without name
|
||||
};
|
||||
|
||||
let raw_value = match item["raw"]["string"].as_str() {
|
||||
Some(value) => value.to_string(),
|
||||
None => continue, // skip attributes without raw value
|
||||
};
|
||||
|
||||
let flags = match item["flags"]["string"].as_str() {
|
||||
Some(flags) => flags.to_string(),
|
||||
None => continue, // skip attributes without flags
|
||||
};
|
||||
|
||||
let normalized = match item["value"].as_f64() {
|
||||
Some(v) => v,
|
||||
None => continue, // skip attributes without normalize value
|
||||
};
|
||||
|
||||
let worst = match item["worst"].as_f64() {
|
||||
Some(v) => v,
|
||||
None => continue, // skip attributes without worst entry
|
||||
};
|
||||
|
||||
let threshold = match item["thresh"].as_f64() {
|
||||
Some(v) => v,
|
||||
None => continue, // skip attributes without threshold entry
|
||||
};
|
||||
|
||||
if id == wearout_id {
|
||||
wearout = Some(normalized);
|
||||
}
|
||||
|
||||
attributes.push(SmartAttribute {
|
||||
name,
|
||||
value: raw_value,
|
||||
id: Some(id),
|
||||
flags: Some(flags),
|
||||
normalized: Some(normalized),
|
||||
worst: Some(worst),
|
||||
threshold: Some(threshold),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// NVME devices
|
||||
if let Some(list) = output["nvme_smart_health_information_log"].as_object() {
|
||||
for (name, value) in list {
|
||||
if name == "percentage_used" {
|
||||
// extract wearout from nvme text, allow for decimal values
|
||||
if let Some(v) = value.as_f64() {
|
||||
if v <= 100.0 {
|
||||
wearout = Some(100.0 - v);
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(value) = value.as_f64() {
|
||||
attributes.push(SmartAttribute {
|
||||
name: name.to_string(),
|
||||
value: value.to_string(),
|
||||
id: None,
|
||||
flags: None,
|
||||
normalized: None,
|
||||
worst: None,
|
||||
threshold: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let status = match output["smart_status"]["passed"].as_bool() {
|
||||
None => SmartStatus::Unknown,
|
||||
Some(true) => SmartStatus::Passed,
|
||||
Some(false) => SmartStatus::Failed,
|
||||
};
|
||||
|
||||
|
||||
Ok(SmartData { status, wearout, attributes })
|
||||
}
|
||||
|
||||
fn lookup_vendor_wearout_id(disk: &super::Disk) -> u64 {
|
||||
|
||||
static VENDOR_MAP: &[(&str, u64)] = &[
|
||||
("kingston", 231),
|
||||
("samsung", 177),
|
||||
("intel", 233),
|
||||
("sandisk", 233),
|
||||
("crucial", 202),
|
||||
];
|
||||
|
||||
let result = 233; // default
|
||||
let model = match disk.model() {
|
||||
Some(model) => model.to_string_lossy().to_lowercase(),
|
||||
None => return result,
|
||||
};
|
||||
|
||||
for (vendor, attr_id) in VENDOR_MAP {
|
||||
if model.contains(vendor) {
|
||||
return *attr_id;
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
@ -1,18 +1,10 @@
|
||||
use std::path::PathBuf;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use nom::{
|
||||
error::VerboseError,
|
||||
bytes::complete::{take_while, take_while1, take_till, take_till1},
|
||||
combinator::{map_res, all_consuming, recognize},
|
||||
sequence::{preceded, tuple},
|
||||
character::complete::{space1, digit1, char, line_ending},
|
||||
multi::{many0, many1},
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
lazy_static!{
|
||||
@ -24,22 +16,6 @@ lazy_static!{
|
||||
};
|
||||
}
|
||||
|
||||
type IResult<I, O, E = VerboseError<I>> = Result<(I, O), nom::Err<E>>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ZFSPoolUsage {
|
||||
total: u64,
|
||||
used: u64,
|
||||
free: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ZFSPoolStatus {
|
||||
name: String,
|
||||
usage: Option<ZFSPoolUsage>,
|
||||
devices: Vec<String>,
|
||||
}
|
||||
|
||||
/// Returns kernel IO-stats for zfs pools
|
||||
pub fn zfs_pool_stats(pool: &OsStr) -> Result<Option<BlockDevStat>, Error> {
|
||||
|
||||
@ -80,117 +56,22 @@ pub fn zfs_pool_stats(pool: &OsStr) -> Result<Option<BlockDevStat>, Error> {
|
||||
Ok(Some(stat))
|
||||
}
|
||||
|
||||
/// Recognizes zero or more spaces and tabs (but not carage returns or line feeds)
|
||||
fn multispace0(i: &str) -> IResult<&str, &str> {
|
||||
take_while(|c| c == ' ' || c == '\t')(i)
|
||||
}
|
||||
|
||||
/// Recognizes one or more spaces and tabs (but not carage returns or line feeds)
|
||||
fn multispace1(i: &str) -> IResult<&str, &str> {
|
||||
take_while1(|c| c == ' ' || c == '\t')(i)
|
||||
}
|
||||
|
||||
fn parse_optional_u64(i: &str) -> IResult<&str, Option<u64>> {
|
||||
if i.starts_with('-') {
|
||||
Ok((&i[1..], None))
|
||||
} else {
|
||||
let (i, value) = map_res(recognize(digit1), str::parse)(i)?;
|
||||
Ok((i, Some(value)))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_pool_device(i: &str) -> IResult<&str, String> {
|
||||
let (i, (device, _, _rest)) = tuple((
|
||||
preceded(multispace1, take_till1(|c| c == ' ' || c == '\t')),
|
||||
multispace1,
|
||||
preceded(take_till(|c| c == '\n'), char('\n')),
|
||||
))(i)?;
|
||||
|
||||
Ok((i, device.to_string()))
|
||||
}
|
||||
|
||||
fn parse_pool_header(i: &str) -> IResult<&str, ZFSPoolStatus> {
|
||||
let (i, (text, total, used, free, _, _eol)) = tuple((
|
||||
take_while1(|c| char::is_alphanumeric(c)),
|
||||
preceded(multispace1, parse_optional_u64),
|
||||
preceded(multispace1, parse_optional_u64),
|
||||
preceded(multispace1, parse_optional_u64),
|
||||
preceded(space1, take_till(|c| c == '\n')),
|
||||
line_ending,
|
||||
))(i)?;
|
||||
|
||||
let status = if let (Some(total), Some(used), Some(free)) = (total, used, free) {
|
||||
ZFSPoolStatus {
|
||||
name: text.into(),
|
||||
usage: Some(ZFSPoolUsage { total, used, free }),
|
||||
devices: Vec::new(),
|
||||
}
|
||||
} else {
|
||||
ZFSPoolStatus {
|
||||
name: text.into(), usage: None, devices: Vec::new(),
|
||||
}
|
||||
};
|
||||
|
||||
Ok((i, status))
|
||||
}
|
||||
|
||||
fn parse_pool_status(i: &str) -> IResult<&str, ZFSPoolStatus> {
|
||||
|
||||
let (i, mut stat) = parse_pool_header(i)?;
|
||||
let (i, devices) = many1(parse_pool_device)(i)?;
|
||||
|
||||
for device_path in devices.into_iter().filter(|n| n.starts_with("/dev/")) {
|
||||
stat.devices.push(device_path);
|
||||
}
|
||||
|
||||
let (i, _) = many0(tuple((multispace0, char('\n'))))(i)?; // skip empty lines
|
||||
|
||||
Ok((i, stat))
|
||||
}
|
||||
|
||||
/// Parse zpool list outout
|
||||
/// Get set of devices used by zfs (or a specific zfs pool)
|
||||
///
|
||||
/// Note: This does not reveal any details on how the pool uses the devices, because
|
||||
/// the zpool list output format is not really defined...
|
||||
pub fn parse_zfs_list(i: &str) -> Result<Vec<ZFSPoolStatus>, Error> {
|
||||
match all_consuming(many1(parse_pool_status))(i) {
|
||||
Err(nom::Err::Error(err)) |
|
||||
Err(nom::Err::Failure(err)) => {
|
||||
bail!("unable to parse zfs list output - {}", nom::error::convert_error(i, err));
|
||||
}
|
||||
Err(err) => {
|
||||
bail!("unable to parse calendar event: {}", err);
|
||||
}
|
||||
Ok((_, ce)) => Ok(ce),
|
||||
}
|
||||
}
|
||||
|
||||
/// List devices used by zfs (or a specific zfs pool)
|
||||
/// The set is indexed by using the unix raw device number (dev_t is u64)
|
||||
pub fn zfs_devices(
|
||||
partition_type_map: &HashMap<String, Vec<String>>,
|
||||
pool: Option<&OsStr>,
|
||||
) -> Result<HashSet<String>, Error> {
|
||||
pool: Option<String>,
|
||||
) -> Result<HashSet<u64>, Error> {
|
||||
|
||||
// Note: zpools list output can include entries for 'special', 'cache' and 'logs'
|
||||
// and maybe other things.
|
||||
|
||||
let mut command = std::process::Command::new("/sbin/zpool");
|
||||
command.args(&["list", "-H", "-v", "-p", "-P"]);
|
||||
|
||||
if let Some(pool) = pool { command.arg(pool); }
|
||||
|
||||
let output = command.output()
|
||||
.map_err(|err| format_err!("failed to execute '/sbin/zpool' - {}", err))?;
|
||||
|
||||
let output = crate::tools::command_output(output, None)
|
||||
.map_err(|err| format_err!("zpool list command failed: {}", err))?;
|
||||
|
||||
let list = parse_zfs_list(&output)?;
|
||||
let list = zpool_list(pool, true)?;
|
||||
|
||||
let mut device_set = HashSet::new();
|
||||
for entry in list {
|
||||
for device in entry.devices {
|
||||
device_set.insert(device.clone());
|
||||
let meta = std::fs::metadata(device)?;
|
||||
device_set.insert(meta.rdev());
|
||||
}
|
||||
}
|
||||
|
||||
@ -198,9 +79,11 @@ pub fn zfs_devices(
|
||||
.filter_map(|(uuid, list)| if ZFS_UUIDS.contains(uuid.as_str()) { Some(list) } else { None })
|
||||
{
|
||||
for device in device_list {
|
||||
device_set.insert(device.clone());
|
||||
let meta = std::fs::metadata(device)?;
|
||||
device_set.insert(meta.rdev());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(device_set)
|
||||
}
|
||||
|
||||
|
265
src/tools/disks/zpool_list.rs
Normal file
265
src/tools/disks/zpool_list.rs
Normal file
@ -0,0 +1,265 @@
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use crate::tools::nom::{
|
||||
multispace0, multispace1, notspace1, IResult,
|
||||
};
|
||||
|
||||
use nom::{
|
||||
bytes::complete::{take_while1, take_till, take_till1},
|
||||
combinator::{map_res, all_consuming, recognize, opt},
|
||||
sequence::{preceded, tuple},
|
||||
character::complete::{digit1, char, line_ending},
|
||||
multi::{many0},
|
||||
};
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct ZFSPoolUsage {
|
||||
pub size: u64,
|
||||
pub alloc: u64,
|
||||
pub free: u64,
|
||||
pub dedup: f64,
|
||||
pub frag: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct ZFSPoolInfo {
|
||||
pub name: String,
|
||||
pub health: String,
|
||||
pub usage: Option<ZFSPoolUsage>,
|
||||
pub devices: Vec<String>,
|
||||
}
|
||||
|
||||
|
||||
fn parse_optional_u64(i: &str) -> IResult<&str, Option<u64>> {
|
||||
if i.starts_with('-') {
|
||||
Ok((&i[1..], None))
|
||||
} else {
|
||||
let (i, value) = map_res(recognize(digit1), str::parse)(i)?;
|
||||
Ok((i, Some(value)))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_optional_f64(i: &str) -> IResult<&str, Option<f64>> {
|
||||
if i.starts_with('-') {
|
||||
Ok((&i[1..], None))
|
||||
} else {
|
||||
let (i, value) = nom::number::complete::double(i)?;
|
||||
Ok((i, Some(value)))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_pool_device(i: &str) -> IResult<&str, String> {
|
||||
let (i, (device, _, _rest)) = tuple((
|
||||
preceded(multispace1, take_till1(|c| c == ' ' || c == '\t')),
|
||||
multispace1,
|
||||
preceded(take_till(|c| c == '\n'), char('\n')),
|
||||
))(i)?;
|
||||
|
||||
Ok((i, device.to_string()))
|
||||
}
|
||||
|
||||
fn parse_zpool_list_header(i: &str) -> IResult<&str, ZFSPoolInfo> {
|
||||
// name, size, allocated, free, checkpoint, expandsize, fragmentation, capacity, dedupratio, health, altroot.
|
||||
|
||||
let (i, (text, size, alloc, free, _, _,
|
||||
frag, _, dedup, health,
|
||||
_altroot, _eol)) = tuple((
|
||||
take_while1(|c| char::is_alphanumeric(c)), // name
|
||||
preceded(multispace1, parse_optional_u64), // size
|
||||
preceded(multispace1, parse_optional_u64), // allocated
|
||||
preceded(multispace1, parse_optional_u64), // free
|
||||
preceded(multispace1, notspace1), // checkpoint
|
||||
preceded(multispace1, notspace1), // expandsize
|
||||
preceded(multispace1, parse_optional_u64), // fragmentation
|
||||
preceded(multispace1, notspace1), // capacity
|
||||
preceded(multispace1, parse_optional_f64), // dedup
|
||||
preceded(multispace1, notspace1), // health
|
||||
opt(preceded(multispace1, notspace1)), // optional altroot
|
||||
line_ending,
|
||||
))(i)?;
|
||||
|
||||
let status = if let (Some(size), Some(alloc), Some(free), Some(frag), Some(dedup)) = (size, alloc, free, frag, dedup) {
|
||||
ZFSPoolInfo {
|
||||
name: text.into(),
|
||||
health: health.into(),
|
||||
usage: Some(ZFSPoolUsage { size, alloc, free, frag, dedup }),
|
||||
devices: Vec::new(),
|
||||
}
|
||||
} else {
|
||||
ZFSPoolInfo {
|
||||
name: text.into(),
|
||||
health: health.into(),
|
||||
usage: None,
|
||||
devices: Vec::new(),
|
||||
}
|
||||
};
|
||||
|
||||
Ok((i, status))
|
||||
}
|
||||
|
||||
fn parse_zpool_list_item(i: &str) -> IResult<&str, ZFSPoolInfo> {
|
||||
|
||||
let (i, mut stat) = parse_zpool_list_header(i)?;
|
||||
let (i, devices) = many0(parse_pool_device)(i)?;
|
||||
|
||||
for device_path in devices.into_iter().filter(|n| n.starts_with("/dev/")) {
|
||||
stat.devices.push(device_path);
|
||||
}
|
||||
|
||||
let (i, _) = many0(tuple((multispace0, char('\n'))))(i)?; // skip empty lines
|
||||
|
||||
Ok((i, stat))
|
||||
}
|
||||
|
||||
/// Parse zpool list outout
|
||||
///
|
||||
/// Note: This does not reveal any details on how the pool uses the devices, because
|
||||
/// the zpool list output format is not really defined...
|
||||
fn parse_zpool_list(i: &str) -> Result<Vec<ZFSPoolInfo>, Error> {
|
||||
match all_consuming(many0(parse_zpool_list_item))(i) {
|
||||
Err(nom::Err::Error(err)) |
|
||||
Err(nom::Err::Failure(err)) => {
|
||||
bail!("unable to parse zfs list output - {}", nom::error::convert_error(i, err));
|
||||
}
|
||||
Err(err) => {
|
||||
bail!("unable to parse zfs list output - {}", err);
|
||||
}
|
||||
Ok((_, ce)) => Ok(ce),
|
||||
}
|
||||
}
|
||||
|
||||
/// Run zpool list and return parsed output
|
||||
///
|
||||
/// Devices are only included when run with verbose flags
|
||||
/// set. Without, device lists are empty.
|
||||
pub fn zpool_list(pool: Option<String>, verbose: bool) -> Result<Vec<ZFSPoolInfo>, Error> {
|
||||
|
||||
// Note: zpools list verbose output can include entries for 'special', 'cache' and 'logs'
|
||||
// and maybe other things.
|
||||
|
||||
let mut command = std::process::Command::new("zpool");
|
||||
command.args(&["list", "-H", "-p", "-P"]);
|
||||
|
||||
// Note: We do not use -o to define output properties, because zpool command ignores
|
||||
// that completely for special vdevs and devices
|
||||
|
||||
if verbose { command.arg("-v"); }
|
||||
|
||||
if let Some(pool) = pool { command.arg(pool); }
|
||||
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
|
||||
parse_zpool_list(&output)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zfs_parse_list() -> Result<(), Error> {
|
||||
|
||||
let output = "";
|
||||
|
||||
let data = parse_zpool_list(&output)?;
|
||||
let expect = Vec::new();
|
||||
|
||||
assert_eq!(data, expect);
|
||||
|
||||
let output = "btest 427349245952 405504 427348840448 - - 0 0 1.00 ONLINE -\n";
|
||||
let data = parse_zpool_list(&output)?;
|
||||
let expect = vec![
|
||||
ZFSPoolInfo {
|
||||
name: "btest".to_string(),
|
||||
health: "ONLINE".to_string(),
|
||||
devices: Vec::new(),
|
||||
usage: Some(ZFSPoolUsage {
|
||||
size: 427349245952,
|
||||
alloc: 405504,
|
||||
free: 427348840448,
|
||||
dedup: 1.0,
|
||||
frag: 0,
|
||||
}),
|
||||
}];
|
||||
|
||||
assert_eq!(data, expect);
|
||||
|
||||
let output = "\
|
||||
rpool 535260299264 402852388864 132407910400 - - 22 75 1.00 ONLINE -
|
||||
/dev/disk/by-id/ata-Crucial_CT500MX200SSD1_154210EB4078-part3 498216206336 392175546368 106040659968 - - 22 78 - ONLINE
|
||||
special - - - - - - - - -
|
||||
/dev/sda2 37044092928 10676842496 26367250432 - - 63 28 - ONLINE
|
||||
logs - - - - - - - - -
|
||||
/dev/sda3 4831838208 1445888 4830392320 - - 0 0 - ONLINE
|
||||
|
||||
";
|
||||
|
||||
let data = parse_zpool_list(&output)?;
|
||||
let expect = vec![
|
||||
ZFSPoolInfo {
|
||||
name: String::from("rpool"),
|
||||
health: String::from("ONLINE"),
|
||||
devices: vec![String::from("/dev/disk/by-id/ata-Crucial_CT500MX200SSD1_154210EB4078-part3")],
|
||||
usage: Some(ZFSPoolUsage {
|
||||
size: 535260299264,
|
||||
alloc:402852388864 ,
|
||||
free: 132407910400,
|
||||
dedup: 1.0,
|
||||
frag: 22,
|
||||
}),
|
||||
},
|
||||
ZFSPoolInfo {
|
||||
name: String::from("special"),
|
||||
health: String::from("-"),
|
||||
devices: vec![String::from("/dev/sda2")],
|
||||
usage: None,
|
||||
},
|
||||
ZFSPoolInfo {
|
||||
name: String::from("logs"),
|
||||
health: String::from("-"),
|
||||
devices: vec![String::from("/dev/sda3")],
|
||||
usage: None,
|
||||
},
|
||||
];
|
||||
|
||||
assert_eq!(data, expect);
|
||||
|
||||
let output = "\
|
||||
btest 427349245952 761856 427348484096 - - 0 0 1.00 ONLINE -
|
||||
mirror 213674622976 438272 213674184704 - - 0 0 - ONLINE
|
||||
/dev/sda1 - - - - - - - - ONLINE
|
||||
/dev/sda2 - - - - - - - - ONLINE
|
||||
mirror 213674622976 323584 213674299392 - - 0 0 - ONLINE
|
||||
/dev/sda3 - - - - - - - - ONLINE
|
||||
/dev/sda4 - - - - - - - - ONLINE
|
||||
logs - - - - - - - - -
|
||||
/dev/sda5 213674622976 0 213674622976 - - 0 0 - ONLINE
|
||||
";
|
||||
|
||||
let data = parse_zpool_list(&output)?;
|
||||
let expect = vec![
|
||||
ZFSPoolInfo {
|
||||
name: String::from("btest"),
|
||||
health: String::from("ONLINE"),
|
||||
usage: Some(ZFSPoolUsage {
|
||||
size: 427349245952,
|
||||
alloc: 761856,
|
||||
free: 427348484096,
|
||||
dedup: 1.0,
|
||||
frag: 0,
|
||||
}),
|
||||
devices: vec![
|
||||
String::from("/dev/sda1"),
|
||||
String::from("/dev/sda2"),
|
||||
String::from("/dev/sda3"),
|
||||
String::from("/dev/sda4"),
|
||||
]
|
||||
},
|
||||
ZFSPoolInfo {
|
||||
name: String::from("logs"),
|
||||
health: String::from("-"),
|
||||
usage: None,
|
||||
devices: vec![String::from("/dev/sda5")],
|
||||
},
|
||||
];
|
||||
|
||||
assert_eq!(data, expect);
|
||||
|
||||
Ok(())
|
||||
}
|
432
src/tools/disks/zpool_status.rs
Normal file
432
src/tools/disks/zpool_status.rs
Normal file
@ -0,0 +1,432 @@
|
||||
use std::mem::replace;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{Map, Value};
|
||||
|
||||
use crate::tools::nom::{
|
||||
parse_complete, parse_error, parse_failure,
|
||||
multispace0, multispace1, notspace1, parse_u64, IResult,
|
||||
};
|
||||
|
||||
use nom::{
|
||||
bytes::complete::{tag, take_while, take_while1},
|
||||
combinator::{opt},
|
||||
sequence::{preceded},
|
||||
character::complete::{line_ending},
|
||||
multi::{many0,many1},
|
||||
};
|
||||
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ZFSPoolVDevState {
|
||||
pub name: String,
|
||||
pub lvl: u64,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub state: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub read: Option<u64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub write: Option<u64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub cksum: Option<u64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub msg: Option<String>,
|
||||
}
|
||||
|
||||
fn expand_tab_length(input: &str) -> usize {
|
||||
input.chars().map(|c| if c == '\t' { 8 } else { 1 }).sum()
|
||||
}
|
||||
|
||||
fn parse_zpool_status_vdev(i: &str) -> IResult<&str, ZFSPoolVDevState> {
|
||||
|
||||
let (n, indent) = multispace0(i)?;
|
||||
|
||||
let indent_len = expand_tab_length(indent);
|
||||
|
||||
if (indent_len & 1) != 0 {
|
||||
return Err(parse_failure(n, "wrong indent length"));
|
||||
}
|
||||
let i = n;
|
||||
|
||||
let indent_level = (indent_len as u64)/2;
|
||||
|
||||
let (i, vdev_name) = notspace1(i)?;
|
||||
|
||||
if let Ok((n, _)) = preceded(multispace0, line_ending)(i) { // sepecial device
|
||||
let vdev = ZFSPoolVDevState {
|
||||
name: vdev_name.to_string(),
|
||||
lvl: indent_level,
|
||||
state: None,
|
||||
read: None,
|
||||
write: None,
|
||||
cksum: None,
|
||||
msg: None,
|
||||
};
|
||||
return Ok((n, vdev));
|
||||
}
|
||||
|
||||
let (i, state) = preceded(multispace1, notspace1)(i)?;
|
||||
let (i, read) = preceded(multispace1, parse_u64)(i)?;
|
||||
let (i, write) = preceded(multispace1, parse_u64)(i)?;
|
||||
let (i, cksum) = preceded(multispace1, parse_u64)(i)?;
|
||||
let (i, msg) = opt(preceded(multispace1, take_while(|c| c != '\n')))(i)?;
|
||||
let (i, _) = line_ending(i)?;
|
||||
|
||||
let vdev = ZFSPoolVDevState {
|
||||
name: vdev_name.to_string(),
|
||||
lvl: indent_level,
|
||||
state: Some(state.to_string()),
|
||||
read: Some(read),
|
||||
write: Some(write),
|
||||
cksum: Some(cksum),
|
||||
msg: msg.map(String::from),
|
||||
};
|
||||
|
||||
Ok((i, vdev))
|
||||
}
|
||||
|
||||
fn parse_zpool_status_tree(i: &str) -> IResult<&str, Vec<ZFSPoolVDevState>> {
|
||||
|
||||
// skip header
|
||||
let (i, _) = tag("NAME")(i)?;
|
||||
let (i, _) = multispace1(i)?;
|
||||
let (i, _) = tag("STATE")(i)?;
|
||||
let (i, _) = multispace1(i)?;
|
||||
let (i, _) = tag("READ")(i)?;
|
||||
let (i, _) = multispace1(i)?;
|
||||
let (i, _) = tag("WRITE")(i)?;
|
||||
let (i, _) = multispace1(i)?;
|
||||
let (i, _) = tag("CKSUM")(i)?;
|
||||
let (i, _) = line_ending(i)?;
|
||||
|
||||
// parse vdev list
|
||||
many1(parse_zpool_status_vdev)(i)
|
||||
}
|
||||
|
||||
fn space_indented_line(indent: usize) -> impl Fn(&str) -> IResult<&str, &str> {
|
||||
move |i| {
|
||||
let mut len = 0;
|
||||
let mut n = i;
|
||||
loop {
|
||||
if n.starts_with('\t') {
|
||||
len += 8;
|
||||
n = &n[1..];
|
||||
} else if n.starts_with(' ') {
|
||||
len += 1;
|
||||
n = &n[1..];
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
if len >= indent { break; }
|
||||
};
|
||||
if len != indent {
|
||||
return Err(parse_error(i, "not correctly indented"));
|
||||
}
|
||||
|
||||
take_while1(|c| c != '\n')(n)
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_zpool_status_field(i: &str) -> IResult<&str, (String, String)> {
|
||||
let (i, prefix) = take_while1(|c| c != ':')(i)?;
|
||||
let (i, _) = tag(":")(i)?;
|
||||
let (i, mut value) = take_while(|c| c != '\n')(i)?;
|
||||
if value.starts_with(' ') { value = &value[1..]; }
|
||||
|
||||
let (mut i, _) = line_ending(i)?;
|
||||
|
||||
let field = prefix.trim().to_string();
|
||||
|
||||
let prefix_len = expand_tab_length(prefix);
|
||||
|
||||
let indent: usize = prefix_len + 2;
|
||||
|
||||
let parse_continuation = opt(space_indented_line(indent));
|
||||
|
||||
let mut value = value.to_string();
|
||||
|
||||
if field == "config" {
|
||||
let (n, _) = line_ending(i)?;
|
||||
i = n;
|
||||
}
|
||||
|
||||
loop {
|
||||
let (n, cont) = parse_continuation(i)?;
|
||||
|
||||
if let Some(cont) = cont {
|
||||
let (n, _) = line_ending(n)?;
|
||||
i = n;
|
||||
if !value.is_empty() { value.push('\n'); }
|
||||
value.push_str(cont);
|
||||
} else {
|
||||
if field == "config" {
|
||||
let (n, _) = line_ending(i)?;
|
||||
value.push('\n');
|
||||
i = n;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok((i, (field, value)))
|
||||
}
|
||||
|
||||
pub fn parse_zpool_status_config_tree(i: &str) -> Result<Vec<ZFSPoolVDevState>, Error> {
|
||||
parse_complete("zfs status config tree", i, parse_zpool_status_tree)
|
||||
}
|
||||
|
||||
fn parse_zpool_status(input: &str) -> Result<Vec<(String, String)>, Error> {
|
||||
parse_complete("zfs status output", &input, many0(parse_zpool_status_field))
|
||||
}
|
||||
|
||||
pub fn vdev_list_to_tree(vdev_list: &[ZFSPoolVDevState]) -> Result<Value, Error> {
|
||||
indented_list_to_tree(vdev_list, |vdev| {
|
||||
let node = serde_json::to_value(vdev).unwrap();
|
||||
(node, vdev.lvl)
|
||||
})
|
||||
}
|
||||
|
||||
fn indented_list_to_tree<'a, T, F, I>(items: I, to_node: F) -> Result<Value, Error>
|
||||
where
|
||||
T: 'a,
|
||||
I: IntoIterator<Item = &'a T>,
|
||||
F: Fn(&T) -> (Value, u64),
|
||||
{
|
||||
struct StackItem {
|
||||
node: Map<String, Value>,
|
||||
level: u64,
|
||||
children_of_parent: Vec<Value>,
|
||||
}
|
||||
|
||||
let mut stack = Vec::<StackItem>::new();
|
||||
// hold current node and the children of the current parent (as that's where we insert)
|
||||
let mut cur = StackItem {
|
||||
node: Map::<String, Value>::new(),
|
||||
level: 0,
|
||||
children_of_parent: Vec::new(),
|
||||
};
|
||||
|
||||
for item in items {
|
||||
let (node, node_level) = to_node(&item);
|
||||
let vdev_level = 1 + node_level;
|
||||
let mut node = match node {
|
||||
Value::Object(map) => map,
|
||||
_ => bail!("to_node returned wrong type"),
|
||||
};
|
||||
|
||||
node.insert("leaf".to_string(), Value::Bool(true));
|
||||
|
||||
// if required, go back up (possibly multiple levels):
|
||||
while vdev_level < cur.level {
|
||||
cur.children_of_parent.push(Value::Object(cur.node));
|
||||
let mut parent = stack.pop().unwrap();
|
||||
parent.node.insert("children".to_string(), Value::Array(cur.children_of_parent));
|
||||
parent.node.insert("leaf".to_string(), Value::Bool(false));
|
||||
cur = parent;
|
||||
|
||||
if vdev_level > cur.level {
|
||||
// when we encounter misimatching levels like "0, 2, 1" instead of "0, 1, 2, 1"
|
||||
bail!("broken indentation between levels");
|
||||
}
|
||||
}
|
||||
|
||||
if vdev_level > cur.level {
|
||||
// indented further, push our current state and start a new "map"
|
||||
stack.push(StackItem {
|
||||
node: replace(&mut cur.node, node),
|
||||
level: replace(&mut cur.level, vdev_level),
|
||||
children_of_parent: replace(&mut cur.children_of_parent, Vec::new()),
|
||||
});
|
||||
} else {
|
||||
// same indentation level, add to children of the previous level:
|
||||
cur.children_of_parent.push(Value::Object(
|
||||
replace(&mut cur.node, node),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
while !stack.is_empty() {
|
||||
cur.children_of_parent.push(Value::Object(cur.node));
|
||||
let mut parent = stack.pop().unwrap();
|
||||
parent.node.insert("children".to_string(), Value::Array(cur.children_of_parent));
|
||||
parent.node.insert("leaf".to_string(), Value::Bool(false));
|
||||
cur = parent;
|
||||
}
|
||||
|
||||
Ok(Value::Object(cur.node))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vdev_list_to_tree() {
|
||||
const DEFAULT: ZFSPoolVDevState = ZFSPoolVDevState {
|
||||
name: String::new(),
|
||||
lvl: 0,
|
||||
state: None,
|
||||
read: None,
|
||||
write: None,
|
||||
cksum: None,
|
||||
msg: None,
|
||||
};
|
||||
|
||||
let input = vec![
|
||||
//ZFSPoolVDevState { name: "root".to_string(), lvl: 0, ..DEFAULT },
|
||||
ZFSPoolVDevState { name: "vdev1".to_string(), lvl: 1, ..DEFAULT },
|
||||
ZFSPoolVDevState { name: "vdev1-disk1".to_string(), lvl: 2, ..DEFAULT },
|
||||
ZFSPoolVDevState { name: "vdev1-disk2".to_string(), lvl: 2, ..DEFAULT },
|
||||
ZFSPoolVDevState { name: "vdev2".to_string(), lvl: 1, ..DEFAULT },
|
||||
ZFSPoolVDevState { name: "vdev2-g1".to_string(), lvl: 2, ..DEFAULT },
|
||||
ZFSPoolVDevState { name: "vdev2-g1-d1".to_string(), lvl: 3, ..DEFAULT },
|
||||
ZFSPoolVDevState { name: "vdev2-g1-d2".to_string(), lvl: 3, ..DEFAULT },
|
||||
ZFSPoolVDevState { name: "vdev2-g2".to_string(), lvl: 2, ..DEFAULT },
|
||||
ZFSPoolVDevState { name: "vdev3".to_string(), lvl: 1, ..DEFAULT },
|
||||
ZFSPoolVDevState { name: "vdev4".to_string(), lvl: 1, ..DEFAULT },
|
||||
ZFSPoolVDevState { name: "vdev4-g1".to_string(), lvl: 2, ..DEFAULT },
|
||||
ZFSPoolVDevState { name: "vdev4-g1-d1".to_string(), lvl: 3, ..DEFAULT },
|
||||
ZFSPoolVDevState { name: "vdev4-g1-d1-x1".to_string(), lvl: 4, ..DEFAULT },
|
||||
ZFSPoolVDevState { name: "vdev4-g2".to_string(), lvl: 2, ..DEFAULT }, // up by 2
|
||||
];
|
||||
|
||||
const EXPECTED: &str = "{\
|
||||
\"children\":[{\
|
||||
\"children\":[{\
|
||||
\"leaf\":true,\
|
||||
\"lvl\":2,\"name\":\"vdev1-disk1\"\
|
||||
},{\
|
||||
\"leaf\":true,\
|
||||
\"lvl\":2,\"name\":\"vdev1-disk2\"\
|
||||
}],\
|
||||
\"leaf\":false,\
|
||||
\"lvl\":1,\"name\":\"vdev1\"\
|
||||
},{\
|
||||
\"children\":[{\
|
||||
\"children\":[{\
|
||||
\"leaf\":true,\
|
||||
\"lvl\":3,\"name\":\"vdev2-g1-d1\"\
|
||||
},{\
|
||||
\"leaf\":true,\
|
||||
\"lvl\":3,\"name\":\"vdev2-g1-d2\"\
|
||||
}],\
|
||||
\"leaf\":false,\
|
||||
\"lvl\":2,\"name\":\"vdev2-g1\"\
|
||||
},{\
|
||||
\"leaf\":true,\
|
||||
\"lvl\":2,\"name\":\"vdev2-g2\"\
|
||||
}],\
|
||||
\"leaf\":false,\
|
||||
\"lvl\":1,\"name\":\"vdev2\"\
|
||||
},{\
|
||||
\"leaf\":true,\
|
||||
\"lvl\":1,\"name\":\"vdev3\"\
|
||||
},{\
|
||||
\"children\":[{\
|
||||
\"children\":[{\
|
||||
\"children\":[{\
|
||||
\"leaf\":true,\
|
||||
\"lvl\":4,\"name\":\"vdev4-g1-d1-x1\"\
|
||||
}],\
|
||||
\"leaf\":false,\
|
||||
\"lvl\":3,\"name\":\"vdev4-g1-d1\"\
|
||||
}],\
|
||||
\"leaf\":false,\
|
||||
\"lvl\":2,\"name\":\"vdev4-g1\"\
|
||||
},{\
|
||||
\"leaf\":true,\
|
||||
\"lvl\":2,\"name\":\"vdev4-g2\"\
|
||||
}],\
|
||||
\"leaf\":false,\
|
||||
\"lvl\":1,\"name\":\"vdev4\"\
|
||||
}],\
|
||||
\"leaf\":false\
|
||||
}";
|
||||
let expected: Value = serde_json::from_str(EXPECTED)
|
||||
.expect("failed to parse expected json value");
|
||||
|
||||
let tree = vdev_list_to_tree(&input)
|
||||
.expect("failed to turn valid vdev list into a tree");
|
||||
assert_eq!(tree, expected);
|
||||
}
|
||||
|
||||
pub fn zpool_status(pool: &str) -> Result<Vec<(String, String)>, Error> {
|
||||
|
||||
let mut command = std::process::Command::new("zpool");
|
||||
command.args(&["status", "-p", "-P", pool]);
|
||||
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
|
||||
parse_zpool_status(&output)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zpool_status_parser() -> Result<(), Error> {
|
||||
|
||||
let output = r###" pool: tank
|
||||
state: DEGRADED
|
||||
status: One or more devices could not be opened. Sufficient replicas exist for
|
||||
the pool to continue functioning in a degraded state.
|
||||
action: Attach the missing device and online it using 'zpool online'.
|
||||
see: http://www.sun.com/msg/ZFS-8000-2Q
|
||||
scrub: none requested
|
||||
config:
|
||||
|
||||
NAME STATE READ WRITE CKSUM
|
||||
tank DEGRADED 0 0 0
|
||||
mirror-0 DEGRADED 0 0 0
|
||||
c1t0d0 ONLINE 0 0 0
|
||||
c1t2d0 ONLINE 0 0 0
|
||||
c1t1d0 UNAVAIL 0 0 0 cannot open
|
||||
mirror-1 DEGRADED 0 0 0
|
||||
tank1 DEGRADED 0 0 0
|
||||
tank2 DEGRADED 0 0 0
|
||||
|
||||
errors: No known data errors
|
||||
"###;
|
||||
|
||||
let key_value_list = parse_zpool_status(&output)?;
|
||||
for (k, v) in key_value_list {
|
||||
println!("{} => {}", k,v);
|
||||
if k == "config" {
|
||||
let vdev_list = parse_zpool_status_config_tree(&v)?;
|
||||
let _tree = vdev_list_to_tree(&vdev_list);
|
||||
//println!("TREE1 {}", serde_json::to_string_pretty(&tree)?);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zpool_status_parser2() -> Result<(), Error> {
|
||||
|
||||
// Note: this input create TABS
|
||||
let output = r###" pool: btest
|
||||
state: ONLINE
|
||||
scan: none requested
|
||||
config:
|
||||
|
||||
NAME STATE READ WRITE CKSUM
|
||||
btest ONLINE 0 0 0
|
||||
mirror-0 ONLINE 0 0 0
|
||||
/dev/sda1 ONLINE 0 0 0
|
||||
/dev/sda2 ONLINE 0 0 0
|
||||
mirror-1 ONLINE 0 0 0
|
||||
/dev/sda3 ONLINE 0 0 0
|
||||
/dev/sda4 ONLINE 0 0 0
|
||||
logs
|
||||
/dev/sda5 ONLINE 0 0 0
|
||||
|
||||
errors: No known data errors
|
||||
"###;
|
||||
|
||||
let key_value_list = parse_zpool_status(&output)?;
|
||||
for (k, v) in key_value_list {
|
||||
println!("{} => {}", k,v);
|
||||
if k == "config" {
|
||||
let vdev_list = parse_zpool_status_config_tree(&v)?;
|
||||
let _tree = vdev_list_to_tree(&vdev_list);
|
||||
//println!("TREE1 {}", serde_json::to_string_pretty(&tree)?);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
@ -100,7 +100,8 @@ pub struct LruCache<K, V> {
|
||||
_marker: PhantomData<Box<CacheNode<K, V>>>,
|
||||
}
|
||||
|
||||
unsafe impl<K, V> Send for LruCache<K, V> {}
|
||||
// trivial: if our contents are Send, the whole cache is Send
|
||||
unsafe impl<K: Send, V: Send> Send for LruCache<K, V> {}
|
||||
|
||||
impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
|
||||
/// Create LRU cache instance which holds up to `capacity` nodes at once.
|
||||
|
79
src/tools/nom.rs
Normal file
79
src/tools/nom.rs
Normal file
@ -0,0 +1,79 @@
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use nom::{
|
||||
error::{ParseError, VerboseError},
|
||||
bytes::complete::{take_while, take_while1},
|
||||
combinator::{map_res, all_consuming, recognize},
|
||||
character::complete::{digit1},
|
||||
};
|
||||
|
||||
pub type IResult<I, O, E = VerboseError<I>> = Result<(I, O), nom::Err<E>>;
|
||||
|
||||
pub fn parse_error<'a>(i: &'a str, context: &'static str) -> nom::Err<VerboseError<&'a str>> {
|
||||
let err = VerboseError { errors: Vec::new() };
|
||||
let err = VerboseError::add_context(i, context, err);
|
||||
nom::Err::Error(err)
|
||||
}
|
||||
|
||||
pub fn parse_failure<'a>(i: &'a str, context: &'static str) -> nom::Err<VerboseError<&'a str>> {
|
||||
let err = VerboseError { errors: Vec::new() };
|
||||
let err = VerboseError::add_context(i, context, err);
|
||||
nom::Err::Failure(err)
|
||||
}
|
||||
|
||||
/// Recognizes zero or more spaces and tabs (but not carage returns or line feeds)
|
||||
pub fn multispace0(i: &str) -> IResult<&str, &str> {
|
||||
take_while(|c| c == ' ' || c == '\t')(i)
|
||||
}
|
||||
|
||||
/// Recognizes one or more spaces and tabs (but not carage returns or line feeds)
|
||||
pub fn multispace1(i: &str) -> IResult<&str, &str> {
|
||||
take_while1(|c| c == ' ' || c == '\t')(i)
|
||||
}
|
||||
|
||||
/// Recognizes one or more non-whitespace-characters
|
||||
pub fn notspace1(i: &str) -> IResult<&str, &str> {
|
||||
take_while1(|c| !(c == ' ' || c == '\t' || c == '\n'))(i)
|
||||
}
|
||||
|
||||
/// Parse a 64 bit unsigned integer
|
||||
pub fn parse_u64(i: &str) -> IResult<&str, u64> {
|
||||
map_res(recognize(digit1), str::parse)(i)
|
||||
}
|
||||
|
||||
/// Parse complete input, generate verbose error message with line numbers
|
||||
pub fn parse_complete<'a, F, O>(what: &str, i: &'a str, parser: F) -> Result<O, Error>
|
||||
where F: Fn(&'a str) -> IResult<&'a str, O>,
|
||||
{
|
||||
match all_consuming(parser)(i) {
|
||||
Err(nom::Err::Error(err)) |
|
||||
Err(nom::Err::Failure(err)) => {
|
||||
bail!("unable to parse {} - {}", what, nom::error::convert_error(i, err));
|
||||
}
|
||||
Err(err) => {
|
||||
bail!("unable to parse {} - {}", what, err);
|
||||
}
|
||||
Ok((_, data)) => Ok(data),
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/// Parse complete input, generate simple error message (use this for sinple line input).
|
||||
pub fn parse_complete_line<'a, F, O>(what: &str, i: &'a str, parser: F) -> Result<O, Error>
|
||||
where F: Fn(&'a str) -> IResult<&'a str, O>,
|
||||
{
|
||||
match all_consuming(parser)(i) {
|
||||
Err(nom::Err::Error(VerboseError { errors })) |
|
||||
Err(nom::Err::Failure(VerboseError { errors })) => {
|
||||
if errors.is_empty() {
|
||||
bail!("unable to parse {}", what);
|
||||
} else {
|
||||
bail!("unable to parse {} at '{}' - {:?}", what, errors[0].0, errors[0].1);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
bail!("unable to parse {} - {}", what, err);
|
||||
}
|
||||
Ok((_, data)) => Ok(data),
|
||||
}
|
||||
}
|
123
src/tools/statistics.rs
Normal file
123
src/tools/statistics.rs
Normal file
@ -0,0 +1,123 @@
|
||||
//! Helpers for common statistics tasks
|
||||
use num_traits::NumAssignRef;
|
||||
use num_traits::cast::ToPrimitive;
|
||||
|
||||
/// Calculates the sum of a list of numbers
|
||||
/// ```
|
||||
/// # use proxmox_backup::tools::statistics::sum;
|
||||
/// # use num_traits::cast::ToPrimitive;
|
||||
///
|
||||
/// assert_eq!(sum(&[0,1,2,3,4,5]), 15);
|
||||
/// assert_eq!(sum(&[-1,1,-2,2]), 0);
|
||||
/// assert!((sum(&[0.0, 0.1,0.2]).to_f64().unwrap() - 0.3).abs() < 0.001);
|
||||
/// assert!((sum(&[0.0, -0.1,0.2]).to_f64().unwrap() - 0.1).abs() < 0.001);
|
||||
/// ```
|
||||
pub fn sum<T>(list: &[T]) -> T
|
||||
where
|
||||
T: NumAssignRef + ToPrimitive
|
||||
{
|
||||
let mut sum = T::zero();
|
||||
for num in list {
|
||||
sum += num;
|
||||
}
|
||||
sum
|
||||
}
|
||||
|
||||
/// Calculates the mean of a variable x
|
||||
/// ```
|
||||
/// # use proxmox_backup::tools::statistics::mean;
|
||||
///
|
||||
/// assert!((mean(&[0,1,2,3,4,5]).unwrap() - 2.5).abs() < 0.001);
|
||||
/// assert_eq!(mean::<u64>(&[]), None)
|
||||
/// ```
|
||||
pub fn mean<T>(list: &[T]) -> Option<f64>
|
||||
where
|
||||
T: NumAssignRef + ToPrimitive
|
||||
{
|
||||
let len = list.len();
|
||||
if len == 0 {
|
||||
return None
|
||||
}
|
||||
Some(sum(list).to_f64()?/(list.len() as f64))
|
||||
}
|
||||
|
||||
/// Calculates the variance of a variable x
|
||||
/// ```
|
||||
/// # use proxmox_backup::tools::statistics::variance;
|
||||
///
|
||||
/// assert!((variance(&[1,2,3,4]).unwrap() - 1.25).abs() < 0.001);
|
||||
/// assert_eq!(variance::<u64>(&[]), None)
|
||||
/// ```
|
||||
pub fn variance<T>(list: &[T]) -> Option<f64>
|
||||
where
|
||||
T: NumAssignRef + ToPrimitive
|
||||
{
|
||||
covariance(list, list)
|
||||
}
|
||||
|
||||
/// Calculates the (non-corrected) covariance of two variables x,y
|
||||
pub fn covariance<X, Y> (x: &[X], y: &[Y]) -> Option<f64>
|
||||
where
|
||||
X: NumAssignRef + ToPrimitive,
|
||||
Y: NumAssignRef + ToPrimitive,
|
||||
{
|
||||
let len_x = x.len();
|
||||
let len_y = y.len();
|
||||
if len_x == 0 || len_y == 0 || len_x != len_y {
|
||||
return None
|
||||
}
|
||||
|
||||
let mean_x = mean(x)?;
|
||||
let mean_y = mean(y)?;
|
||||
|
||||
let covariance: f64 = (0..len_x).map(|i| {
|
||||
let x = x[i].to_f64().unwrap_or(0.0);
|
||||
let y = y[i].to_f64().unwrap_or(0.0);
|
||||
(x - mean_x)*(y - mean_y)
|
||||
}).sum();
|
||||
|
||||
Some(covariance/(len_x as f64))
|
||||
}
|
||||
|
||||
/// Returns the factors (a,b) of a linear regression y = a + bx
|
||||
/// for the variables [x,y] or None if the lists are not the same length
|
||||
/// ```
|
||||
/// # use proxmox_backup::tools::statistics::linear_regression;
|
||||
///
|
||||
/// let x = &[0,1,2,3,4];
|
||||
/// let y = &[-4,-2,0,2,4];
|
||||
/// let (a,b) = linear_regression(x,y).unwrap();
|
||||
/// assert!((a - -4.0).abs() < 0.001);
|
||||
/// assert!((b - 2.0).abs() < 0.001);
|
||||
/// ```
|
||||
pub fn linear_regression<X, Y> (x: &[X], y: &[Y]) -> Option<(f64, f64)>
|
||||
where
|
||||
X: NumAssignRef + ToPrimitive,
|
||||
Y: NumAssignRef + ToPrimitive
|
||||
{
|
||||
let len_x = x.len();
|
||||
let len_y = y.len();
|
||||
if len_x == 0 || len_y == 0 || len_x != len_y {
|
||||
return None
|
||||
}
|
||||
|
||||
let mean_x = mean(x)?;
|
||||
let mean_y = mean(y)?;
|
||||
|
||||
let mut covariance = 0.0;
|
||||
let mut variance = 0.0;
|
||||
|
||||
for i in 0..len_x {
|
||||
let x = x[i].to_f64()?;
|
||||
let y = y[i].to_f64()?;
|
||||
|
||||
let x_mean_x = x - mean_x;
|
||||
|
||||
covariance += x_mean_x*(y - mean_y);
|
||||
variance += x_mean_x * x_mean_x;
|
||||
}
|
||||
|
||||
let beta = covariance/variance;
|
||||
let alpha = mean_y - beta*mean_x;
|
||||
Some((alpha,beta))
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user