Compare commits
221 Commits
Author | SHA1 | Date | |
---|---|---|---|
e9764238df | |||
26f499b17b | |||
cc7995ac40 | |||
43abba4b4f | |||
58f950c546 | |||
c426e65893 | |||
caea8d611f | |||
7d0754a6d2 | |||
5afa0755ea | |||
40b63186a6 | |||
8f6088c130 | |||
2162e2c15d | |||
0d5ab04a90 | |||
4059285649 | |||
2e079b8bf2 | |||
4ff2c9b832 | |||
a8e2940ff3 | |||
d5d5f2174e | |||
2311238450 | |||
2ea501ffdf | |||
4eb4e94918 | |||
817bcda848 | |||
f6de2c7359 | |||
3f0b9c10ec | |||
2b66abbfab | |||
402c8861d8 | |||
3f683799a8 | |||
573bcd9a92 | |||
90779237ae | |||
1f82f9b7b5 | |||
19b5c3c43e | |||
fe3e65c3ea | |||
fdaab0df4e | |||
b957aa81bd | |||
8ea00f6e49 | |||
4bd789b0fa | |||
2f050cf2ed | |||
e22f4882e7 | |||
c65bc99a41 | |||
355c055e81 | |||
c2009e5309 | |||
23f74c190e | |||
a6f8728339 | |||
c1769a749c | |||
facd9801cf | |||
21302088de | |||
8268c9d161 | |||
b91b7d9ffd | |||
6e1f0c138f | |||
8567c0d29c | |||
d33d8f4e6a | |||
5b1cfa01f1 | |||
05d18b907a | |||
e44fe0c9f5 | |||
4cf0ced950 | |||
98425309b0 | |||
7b1e26699d | |||
676b0fde49 | |||
60f9a6ea8f | |||
1090fd4424 | |||
92c3fd2e22 | |||
e3efaa1972 | |||
0cf2b6441e | |||
d6d3b353be | |||
a67f7d0a07 | |||
c8137518fe | |||
cbef49bf4f | |||
0b99e5aebc | |||
29c55e5fc4 | |||
f386f512d0 | |||
3ddb14889a | |||
00c2327564 | |||
d79926795a | |||
c08fac4d69 | |||
c40440092d | |||
dc2ef2b54f | |||
b28253d650 | |||
f28cfb322a | |||
3bbe291c51 | |||
42d19fdf69 | |||
215968e033 | |||
eddd1a1b9c | |||
d2ce211899 | |||
1cb46c6f65 | |||
5d88c3a1c8 | |||
07fb504943 | |||
f675c5e978 | |||
4e37d9ce67 | |||
e303077132 | |||
6ef9bb59eb | |||
eeaa2c212b | |||
4a3adc3de8 | |||
abdb976340 | |||
3b62116ce6 | |||
e005f953d9 | |||
1c090810f5 | |||
e181d2f6da | |||
16021f6ab7 | |||
ba694720fc | |||
bde8e243cf | |||
3352ee5656 | |||
b29cbc414d | |||
026dc1d11f | |||
9438aca6c9 | |||
547f0c97e4 | |||
177a2de992 | |||
0686b1f4db | |||
0727e56a06 | |||
2fd3d57490 | |||
3f851d1321 | |||
1aef491e24 | |||
d0eccae37d | |||
a34154d900 | |||
c2cc32b4dd | |||
46405fa35d | |||
66af7f51bc | |||
c72ccd4e33 | |||
902b2cc278 | |||
8ecd7c9c21 | |||
7f17f7444a | |||
fb5a066500 | |||
d19c96d507 | |||
929a13b357 | |||
36c65ee0b0 | |||
3378fd9fe5 | |||
58c51cf3d9 | |||
5509b199fb | |||
bb59df9134 | |||
2564b0834f | |||
9321bbd1f5 | |||
4264e52220 | |||
6988b29bdc | |||
98c54240e6 | |||
d30c192589 | |||
67908b47fa | |||
ac7513e368 | |||
fbbcd85839 | |||
7a6b549270 | |||
0196b9bf5b | |||
739a51459a | |||
195d7c90ce | |||
6f3146c08c | |||
4b12879289 | |||
20b3094bcb | |||
df528ee6fa | |||
57e50fb906 | |||
3136792c95 | |||
3d571d5509 | |||
8e6e18b77c | |||
4d16badf6f | |||
a609cf210e | |||
1498659b4e | |||
4482f3fe11 | |||
5d85847f91 | |||
476b4acadc | |||
cf1bd08131 | |||
ec8f042459 | |||
431cc7b185 | |||
e693818afc | |||
3d68536fc2 | |||
26e78a2efb | |||
5444fa940b | |||
d4f2397d4c | |||
fab2413741 | |||
669c137fec | |||
fc6047fcb1 | |||
3014088684 | |||
144006fade | |||
b9cf6ee797 | |||
cdde66d277 | |||
239e49f927 | |||
ae66873ce9 | |||
bda48e04da | |||
ba97479848 | |||
6cad8ce4ce | |||
34020b929e | |||
33070956af | |||
da84cc52f4 | |||
9825748e5e | |||
2179359f40 | |||
9bb161c881 | |||
297e600730 | |||
ed7b3a7de2 | |||
0f358204bd | |||
ca6124d5fa | |||
7eacdc765b | |||
c443f58b09 | |||
ab1092392f | |||
1e3d9b103d | |||
386990ba09 | |||
bc853b028f | |||
d406de299b | |||
dfb31de8f0 | |||
7c3aa258f8 | |||
044055062c | |||
2b388026f8 | |||
707974fdb3 | |||
9069debcd8 | |||
fa2bdc1309 | |||
8e40aa63c1 | |||
d2522b2db6 | |||
ce8e3de401 | |||
7fa2779559 | |||
042afd6e52 | |||
ff30caeaf8 | |||
553cd12ba6 | |||
de1e1a9d95 | |||
91960d6162 | |||
4c24a48eb3 | |||
484e761dab | |||
059b7a252e | |||
1278aeec36 | |||
e53a4c4577 | |||
98ad58fbd2 | |||
98bb3b9016 | |||
eb80aac288 | |||
c26aad405f | |||
f03a0e509e | |||
4c1e8855cc | |||
85a9a5b68c | |||
f856e0774e |
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "0.2.3"
|
version = "0.5.0"
|
||||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3"
|
license = "AGPL-3"
|
||||||
@ -30,15 +30,20 @@ lazy_static = "1.4"
|
|||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
nix = "0.16"
|
nix = "0.16"
|
||||||
|
num-traits = "0.2"
|
||||||
once_cell = "1.3.1"
|
once_cell = "1.3.1"
|
||||||
openssl = "0.10"
|
openssl = "0.10"
|
||||||
pam = "0.7"
|
pam = "0.7"
|
||||||
pam-sys = "0.5"
|
pam-sys = "0.5"
|
||||||
percent-encoding = "2.1"
|
percent-encoding = "2.1"
|
||||||
pin-utils = "0.1.0"
|
pin-utils = "0.1.0"
|
||||||
proxmox = { version = "0.1.38", features = [ "sortable-macro", "api-macro" ] }
|
pathpatterns = "0.1.1"
|
||||||
|
proxmox = { version = "0.1.41", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
||||||
|
proxmox-fuse = "0.1.0"
|
||||||
|
pxar = { version = "0.2.0", features = [ "tokio-io", "futures-io" ] }
|
||||||
|
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
||||||
regex = "1.2"
|
regex = "1.2"
|
||||||
rustyline = "6"
|
rustyline = "6"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
2
TODO.rst
2
TODO.rst
@ -30,8 +30,6 @@ Chores:
|
|||||||
|
|
||||||
* move tools/xattr.rs and tools/acl.rs to proxmox/sys/linux/
|
* move tools/xattr.rs and tools/acl.rs to proxmox/sys/linux/
|
||||||
|
|
||||||
* recompute PXAR_ header types from strings: avoid using numbers from casync
|
|
||||||
|
|
||||||
* remove pbs-* systemd timers and services on package purge
|
* remove pbs-* systemd timers and services on package purge
|
||||||
|
|
||||||
|
|
||||||
|
28
debian/changelog
vendored
28
debian/changelog
vendored
@ -1,3 +1,31 @@
|
|||||||
|
rust-proxmox-backup (0.5.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* partially revert commit 1f82f9b7b5d231da22a541432d5617cb303c0000
|
||||||
|
|
||||||
|
* ui: allow to Forget (delete) backup snapshots
|
||||||
|
|
||||||
|
* pxar: deal with files changing size during archiving
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 29 Jun 2020 13:00:54 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.4.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* change api for incremental backups mode
|
||||||
|
|
||||||
|
* zfs disk management gui
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 26 Jun 2020 10:43:27 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.3.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* support incremental backups mode
|
||||||
|
|
||||||
|
* new disk management
|
||||||
|
|
||||||
|
* single file restore for container backups
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 24 Jun 2020 10:12:57 +0200
|
||||||
|
|
||||||
rust-proxmox-backup (0.2.3-1) unstable; urgency=medium
|
rust-proxmox-backup (0.2.3-1) unstable; urgency=medium
|
||||||
|
|
||||||
* tools/systemd/time: fix compute_next_event for weekdays
|
* tools/systemd/time: fix compute_next_event for weekdays
|
||||||
|
28
debian/postinst
vendored
Normal file
28
debian/postinst
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
#DEBHELPER#
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
configure)
|
||||||
|
# modeled after dh_systemd_start output
|
||||||
|
systemctl --system daemon-reload >/dev/null || true
|
||||||
|
if [ -n "$2" ]; then
|
||||||
|
_dh_action=try-reload-or-restart
|
||||||
|
else
|
||||||
|
_dh_action=start
|
||||||
|
fi
|
||||||
|
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
||||||
|
;;
|
||||||
|
|
||||||
|
abort-upgrade|abort-remove|abort-deconfigure)
|
||||||
|
;;
|
||||||
|
|
||||||
|
*)
|
||||||
|
echo "postinst called with unknown argument \`$1'" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
exit 0
|
10
debian/prerm
vendored
Normal file
10
debian/prerm
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
#DEBHELPER#
|
||||||
|
|
||||||
|
# modeled after dh_systemd_start output
|
||||||
|
if [ -d /run/systemd/system ] && [ "$1" = remove ]; then
|
||||||
|
deb-systemd-invoke stop 'proxmox-backup-banner.service' 'proxmox-backup-proxy.service' 'proxmox-backup.service' >/dev/null || true
|
||||||
|
fi
|
6
debian/rules
vendored
6
debian/rules
vendored
@ -37,9 +37,9 @@ override_dh_auto_install:
|
|||||||
PROXY_USER=backup \
|
PROXY_USER=backup \
|
||||||
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
|
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
|
||||||
|
|
||||||
override_dh_installinit:
|
override_dh_installsystemd:
|
||||||
dh_installinit
|
# note: we start/try-reload-restart services manually in postinst
|
||||||
dh_installinit --name proxmox-backup-proxy
|
dh_installsystemd --no-start --no-restart-after-upgrade
|
||||||
|
|
||||||
# workaround https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=933541
|
# workaround https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=933541
|
||||||
# TODO: remove once available (Debian 11 ?)
|
# TODO: remove once available (Debian 11 ?)
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
Description=Proxmox Backup API Proxy Server
|
Description=Proxmox Backup API Proxy Server
|
||||||
Wants=network-online.target
|
Wants=network-online.target
|
||||||
After=network.target
|
After=network.target
|
||||||
Requires=proxmox-backup.service
|
Wants=proxmox-backup.service
|
||||||
After=proxmox-backup.service
|
After=proxmox-backup.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
|
@ -44,8 +44,8 @@ async fn run() -> Result<(), Error> {
|
|||||||
|
|
||||||
let mut bytes = 0;
|
let mut bytes = 0;
|
||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
let writer = DummyWriter { bytes: 0 };
|
let mut writer = DummyWriter { bytes: 0 };
|
||||||
let writer = client.speedtest(writer).await?;
|
client.speedtest(&mut writer).await?;
|
||||||
println!("Received {} bytes", writer.bytes);
|
println!("Received {} bytes", writer.bytes);
|
||||||
bytes += writer.bytes;
|
bytes += writer.bytes;
|
||||||
}
|
}
|
||||||
@ -59,8 +59,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::main]
|
fn main() {
|
||||||
async fn main() {
|
|
||||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||||
eprintln!("ERROR: {}", err);
|
eprintln!("ERROR: {}", err);
|
||||||
}
|
}
|
@ -17,7 +17,7 @@ async fn upload_speed() -> Result<usize, Error> {
|
|||||||
|
|
||||||
let backup_time = chrono::Utc::now();
|
let backup_time = chrono::Utc::now();
|
||||||
|
|
||||||
let client = BackupWriter::start(client, datastore, "host", "speedtest", backup_time, false).await?;
|
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false).await?;
|
||||||
|
|
||||||
println!("start upload speed test");
|
println!("start upload speed test");
|
||||||
let res = client.upload_speedtest().await?;
|
let res = client.upload_speedtest().await?;
|
@ -5,9 +5,11 @@ pub mod config;
|
|||||||
pub mod node;
|
pub mod node;
|
||||||
pub mod reader;
|
pub mod reader;
|
||||||
mod subscription;
|
mod subscription;
|
||||||
|
pub mod status;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
pub mod version;
|
pub mod version;
|
||||||
pub mod pull;
|
pub mod pull;
|
||||||
|
mod helpers;
|
||||||
|
|
||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
use proxmox::api::Router;
|
use proxmox::api::Router;
|
||||||
@ -23,6 +25,7 @@ pub const SUBDIRS: SubdirMap = &[
|
|||||||
("nodes", &NODES_ROUTER),
|
("nodes", &NODES_ROUTER),
|
||||||
("pull", &pull::ROUTER),
|
("pull", &pull::ROUTER),
|
||||||
("reader", &reader::ROUTER),
|
("reader", &reader::ROUTER),
|
||||||
|
("status", &status::ROUTER),
|
||||||
("subscription", &subscription::ROUTER),
|
("subscription", &subscription::ROUTER),
|
||||||
("version", &version::ROUTER),
|
("version", &version::ROUTER),
|
||||||
];
|
];
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
use std::collections::{HashSet, HashMap};
|
use std::collections::{HashSet, HashMap};
|
||||||
use std::convert::TryFrom;
|
use std::ffi::OsStr;
|
||||||
|
use std::os::unix::ffi::OsStrExt;
|
||||||
|
|
||||||
use chrono::{TimeZone, Local};
|
use anyhow::{bail, format_err, Error};
|
||||||
use anyhow::{bail, Error};
|
|
||||||
use futures::*;
|
use futures::*;
|
||||||
use hyper::http::request::Parts;
|
use hyper::http::request::Parts;
|
||||||
use hyper::{header, Body, Response, StatusCode};
|
use hyper::{header, Body, Response, StatusCode};
|
||||||
@ -13,17 +13,21 @@ use proxmox::api::{
|
|||||||
RpcEnvironment, RpcEnvironmentType, Permission, UserInformation};
|
RpcEnvironment, RpcEnvironmentType, Permission, UserInformation};
|
||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
use proxmox::api::schema::*;
|
use proxmox::api::schema::*;
|
||||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||||
use proxmox::try_block;
|
use proxmox::try_block;
|
||||||
use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
|
use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
|
||||||
|
|
||||||
|
use pxar::accessor::aio::Accessor;
|
||||||
|
use pxar::EntryKind;
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
|
use crate::api2::node::rrd::create_value_from_rrd;
|
||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
use crate::config::datastore;
|
use crate::config::datastore;
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
|
|
||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
use crate::tools;
|
use crate::tools::{self, AsyncReaderStream, WrappedReaderStream};
|
||||||
use crate::config::acl::{
|
use crate::config::acl::{
|
||||||
PRIV_DATASTORE_AUDIT,
|
PRIV_DATASTORE_AUDIT,
|
||||||
PRIV_DATASTORE_MODIFY,
|
PRIV_DATASTORE_MODIFY,
|
||||||
@ -42,32 +46,45 @@ fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> R
|
|||||||
|
|
||||||
fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
|
fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
|
||||||
|
|
||||||
let mut path = store.base_path();
|
let (manifest, index_size) = store.load_manifest(backup_dir)?;
|
||||||
path.push(backup_dir.relative_path());
|
|
||||||
path.push(MANIFEST_BLOB_NAME);
|
|
||||||
|
|
||||||
let raw_data = file_get_contents(&path)?;
|
|
||||||
let index_size = raw_data.len() as u64;
|
|
||||||
let blob = DataBlob::from_raw(raw_data)?;
|
|
||||||
|
|
||||||
let manifest = BackupManifest::try_from(blob)?;
|
|
||||||
|
|
||||||
let mut result = Vec::new();
|
let mut result = Vec::new();
|
||||||
for item in manifest.files() {
|
for item in manifest.files() {
|
||||||
result.push(BackupContent {
|
result.push(BackupContent {
|
||||||
filename: item.filename.clone(),
|
filename: item.filename.clone(),
|
||||||
|
encrypted: item.encrypted,
|
||||||
size: Some(item.size),
|
size: Some(item.size),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
result.push(BackupContent {
|
result.push(BackupContent {
|
||||||
filename: MANIFEST_BLOB_NAME.to_string(),
|
filename: MANIFEST_BLOB_NAME.to_string(),
|
||||||
|
encrypted: Some(false),
|
||||||
size: Some(index_size),
|
size: Some(index_size),
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_all_snapshot_files(
|
||||||
|
store: &DataStore,
|
||||||
|
info: &BackupInfo,
|
||||||
|
) -> Result<Vec<BackupContent>, Error> {
|
||||||
|
let mut files = read_backup_index(&store, &info.backup_dir)?;
|
||||||
|
|
||||||
|
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
|
||||||
|
acc.insert(item.filename.clone());
|
||||||
|
acc
|
||||||
|
});
|
||||||
|
|
||||||
|
for file in &info.files {
|
||||||
|
if file_set.contains(file) { continue; }
|
||||||
|
files.push(BackupContent { filename: file.to_string(), size: None, encrypted: None });
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(files)
|
||||||
|
}
|
||||||
|
|
||||||
fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
|
fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
|
||||||
|
|
||||||
let mut group_hash = HashMap::new();
|
let mut group_hash = HashMap::new();
|
||||||
@ -201,21 +218,9 @@ pub fn list_snapshot_files(
|
|||||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
|
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
|
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
|
||||||
|
|
||||||
let mut files = read_backup_index(&datastore, &snapshot)?;
|
|
||||||
|
|
||||||
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
|
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
|
||||||
|
|
||||||
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
|
get_all_snapshot_files(&datastore, &info)
|
||||||
acc.insert(item.filename.clone());
|
|
||||||
acc
|
|
||||||
});
|
|
||||||
|
|
||||||
for file in info.files {
|
|
||||||
if file_set.contains(&file) { continue; }
|
|
||||||
files.push(BackupContent { filename: file, size: None });
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(files)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -336,25 +341,28 @@ pub fn list_snapshots (
|
|||||||
if owner != username { continue; }
|
if owner != username { continue; }
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut result_item = SnapshotListItem {
|
let mut size = None;
|
||||||
|
|
||||||
|
let files = match get_all_snapshot_files(&datastore, &info) {
|
||||||
|
Ok(files) => {
|
||||||
|
size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
|
||||||
|
files
|
||||||
|
},
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("error during snapshot file listing: '{}'", err);
|
||||||
|
info.files.iter().map(|x| BackupContent { filename: x.to_string(), size: None, encrypted: None }).collect()
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let result_item = SnapshotListItem {
|
||||||
backup_type: group.backup_type().to_string(),
|
backup_type: group.backup_type().to_string(),
|
||||||
backup_id: group.backup_id().to_string(),
|
backup_id: group.backup_id().to_string(),
|
||||||
backup_time: info.backup_dir.backup_time().timestamp(),
|
backup_time: info.backup_dir.backup_time().timestamp(),
|
||||||
files: info.files,
|
files,
|
||||||
size: None,
|
size,
|
||||||
owner: Some(owner),
|
owner: Some(owner),
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) {
|
|
||||||
let mut backup_size = 0;
|
|
||||||
for item in index.iter() {
|
|
||||||
if let Some(item_size) = item.size {
|
|
||||||
backup_size += item_size;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
result_item.size = Some(backup_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
snapshots.push(result_item);
|
snapshots.push(result_item);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -382,25 +390,92 @@ pub fn status(
|
|||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<StorageStatus, Error> {
|
) -> Result<StorageStatus, Error> {
|
||||||
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
crate::tools::disks::disk_usage(&datastore.base_path())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-type": {
|
||||||
|
schema: BACKUP_TYPE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"backup-id": {
|
||||||
|
schema: BACKUP_ID_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"backup-time": {
|
||||||
|
schema: BACKUP_TIME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
schema: UPID_SCHEMA,
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Verify backups.
|
||||||
|
///
|
||||||
|
/// This function can verify a single backup snapshot, all backup from a backup group,
|
||||||
|
/// or all backups in the datastore.
|
||||||
|
pub fn verify(
|
||||||
|
store: String,
|
||||||
|
backup_type: Option<String>,
|
||||||
|
backup_id: Option<String>,
|
||||||
|
backup_time: Option<i64>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
let base_path = datastore.base_path();
|
let worker_id;
|
||||||
|
|
||||||
let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
|
let mut backup_dir = None;
|
||||||
|
let mut backup_group = None;
|
||||||
|
|
||||||
use nix::NixPath;
|
match (backup_type, backup_id, backup_time) {
|
||||||
|
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
|
||||||
|
worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
|
||||||
|
let dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||||
|
backup_dir = Some(dir);
|
||||||
|
}
|
||||||
|
(Some(backup_type), Some(backup_id), None) => {
|
||||||
|
worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
|
||||||
|
let group = BackupGroup::new(backup_type, backup_id);
|
||||||
|
backup_group = Some(group);
|
||||||
|
}
|
||||||
|
(None, None, None) => {
|
||||||
|
worker_id = store.clone();
|
||||||
|
}
|
||||||
|
_ => bail!("parameters do not spefify a backup group or snapshot"),
|
||||||
|
}
|
||||||
|
|
||||||
let res = base_path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
|
let username = rpcenv.get_user().unwrap();
|
||||||
nix::errno::Errno::result(res)?;
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
|
|
||||||
let bsize = stat.f_bsize as u64;
|
let upid_str = WorkerTask::new_thread(
|
||||||
|
"verify", Some(worker_id.clone()), &username, to_stdout, move |worker|
|
||||||
|
{
|
||||||
|
let success = if let Some(backup_dir) = backup_dir {
|
||||||
|
verify_backup_dir(&datastore, &backup_dir, &worker)?
|
||||||
|
} else if let Some(backup_group) = backup_group {
|
||||||
|
verify_backup_group(&datastore, &backup_group, &worker)?
|
||||||
|
} else {
|
||||||
|
verify_all_backups(&datastore, &worker)?
|
||||||
|
};
|
||||||
|
if !success {
|
||||||
|
bail!("verfication failed - please check the log for details");
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
})?;
|
||||||
|
|
||||||
Ok(StorageStatus {
|
Ok(json!(upid_str))
|
||||||
total: stat.f_blocks*bsize,
|
|
||||||
used: (stat.f_blocks-stat.f_bfree)*bsize,
|
|
||||||
avail: stat.f_bavail*bsize,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
@ -752,19 +827,22 @@ fn download_file(
|
|||||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||||
|
|
||||||
println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
|
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
|
||||||
backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
|
|
||||||
|
|
||||||
let mut path = datastore.base_path();
|
let mut path = datastore.base_path();
|
||||||
path.push(backup_dir.relative_path());
|
path.push(backup_dir.relative_path());
|
||||||
path.push(&file_name);
|
path.push(&file_name);
|
||||||
|
|
||||||
let file = tokio::fs::File::open(path)
|
let file = tokio::fs::File::open(&path)
|
||||||
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
|
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
|
||||||
|
.map_err(move |err| {
|
||||||
|
eprintln!("error during streaming of '{:?}' - {}", &path, err);
|
||||||
|
err
|
||||||
|
});
|
||||||
let body = Body::wrap_stream(payload);
|
let body = Body::wrap_stream(payload);
|
||||||
|
|
||||||
// fixme: set other headers ?
|
// fixme: set other headers ?
|
||||||
@ -776,6 +854,118 @@ fn download_file(
|
|||||||
}.boxed()
|
}.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[sortable]
|
||||||
|
pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
|
||||||
|
&ApiHandler::AsyncHttp(&download_file_decoded),
|
||||||
|
&ObjectSchema::new(
|
||||||
|
"Download single decoded file from backup snapshot. Only works if it's not encrypted.",
|
||||||
|
&sorted!([
|
||||||
|
("store", false, &DATASTORE_SCHEMA),
|
||||||
|
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||||
|
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||||
|
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||||
|
("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
|
||||||
|
]),
|
||||||
|
)
|
||||||
|
).access(None, &Permission::Privilege(
|
||||||
|
&["datastore", "{store}"],
|
||||||
|
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
|
||||||
|
true)
|
||||||
|
);
|
||||||
|
|
||||||
|
fn download_file_decoded(
|
||||||
|
_parts: Parts,
|
||||||
|
_req_body: Body,
|
||||||
|
param: Value,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
rpcenv: Box<dyn RpcEnvironment>,
|
||||||
|
) -> ApiResponseFuture {
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let store = tools::required_string_param(¶m, "store")?;
|
||||||
|
let datastore = DataStore::lookup_datastore(store)?;
|
||||||
|
|
||||||
|
let username = rpcenv.get_user().unwrap();
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||||
|
|
||||||
|
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
||||||
|
|
||||||
|
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||||
|
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||||
|
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||||
|
|
||||||
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||||
|
|
||||||
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||||
|
|
||||||
|
let files = read_backup_index(&datastore, &backup_dir)?;
|
||||||
|
for file in files {
|
||||||
|
if file.filename == file_name && file.encrypted == Some(true) {
|
||||||
|
bail!("cannot decode '{}' - is encrypted", file_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
|
||||||
|
|
||||||
|
let mut path = datastore.base_path();
|
||||||
|
path.push(backup_dir.relative_path());
|
||||||
|
path.push(&file_name);
|
||||||
|
|
||||||
|
let extension = file_name.rsplitn(2, '.').next().unwrap();
|
||||||
|
|
||||||
|
let body = match extension {
|
||||||
|
"didx" => {
|
||||||
|
let index = DynamicIndexReader::open(&path)
|
||||||
|
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||||
|
|
||||||
|
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||||
|
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||||
|
Body::wrap_stream(AsyncReaderStream::new(reader)
|
||||||
|
.map_err(move |err| {
|
||||||
|
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
||||||
|
err
|
||||||
|
}))
|
||||||
|
},
|
||||||
|
"fidx" => {
|
||||||
|
let index = FixedIndexReader::open(&path)
|
||||||
|
.map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
|
||||||
|
|
||||||
|
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||||
|
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||||
|
Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
|
||||||
|
.map_err(move |err| {
|
||||||
|
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
||||||
|
err
|
||||||
|
}))
|
||||||
|
},
|
||||||
|
"blob" => {
|
||||||
|
let file = std::fs::File::open(&path)
|
||||||
|
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))?;
|
||||||
|
|
||||||
|
Body::wrap_stream(
|
||||||
|
WrappedReaderStream::new(DataBlobReader::new(file, None)?)
|
||||||
|
.map_err(move |err| {
|
||||||
|
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
||||||
|
err
|
||||||
|
})
|
||||||
|
)
|
||||||
|
},
|
||||||
|
extension => {
|
||||||
|
bail!("cannot download '{}' files", extension);
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
// fixme: set other headers ?
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||||
|
.body(body)
|
||||||
|
.unwrap())
|
||||||
|
}.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
#[sortable]
|
#[sortable]
|
||||||
pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
|
pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
|
||||||
&ApiHandler::AsyncHttp(&upload_backup_log),
|
&ApiHandler::AsyncHttp(&upload_backup_log),
|
||||||
@ -846,6 +1036,212 @@ fn upload_backup_log(
|
|||||||
}.boxed()
|
}.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-type": {
|
||||||
|
schema: BACKUP_TYPE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-id": {
|
||||||
|
schema: BACKUP_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-time": {
|
||||||
|
schema: BACKUP_TIME_SCHEMA,
|
||||||
|
},
|
||||||
|
"filepath": {
|
||||||
|
description: "Base64 encoded path.",
|
||||||
|
type: String,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get the entries of the given path of the catalog
|
||||||
|
fn catalog(
|
||||||
|
store: String,
|
||||||
|
backup_type: String,
|
||||||
|
backup_id: String,
|
||||||
|
backup_time: i64,
|
||||||
|
filepath: String,
|
||||||
|
_param: Value,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
|
let username = rpcenv.get_user().unwrap();
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||||
|
|
||||||
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||||
|
|
||||||
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||||
|
|
||||||
|
let mut path = datastore.base_path();
|
||||||
|
path.push(backup_dir.relative_path());
|
||||||
|
path.push(CATALOG_NAME);
|
||||||
|
|
||||||
|
let index = DynamicIndexReader::open(&path)
|
||||||
|
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||||
|
|
||||||
|
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||||
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
|
||||||
|
let mut catalog_reader = CatalogReader::new(reader);
|
||||||
|
let mut current = catalog_reader.root()?;
|
||||||
|
let mut components = vec![];
|
||||||
|
|
||||||
|
|
||||||
|
if filepath != "root" {
|
||||||
|
components = base64::decode(filepath)?;
|
||||||
|
if components.len() > 0 && components[0] == '/' as u8 {
|
||||||
|
components.remove(0);
|
||||||
|
}
|
||||||
|
for component in components.split(|c| *c == '/' as u8) {
|
||||||
|
if let Some(entry) = catalog_reader.lookup(¤t, component)? {
|
||||||
|
current = entry;
|
||||||
|
} else {
|
||||||
|
bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut res = Vec::new();
|
||||||
|
|
||||||
|
for direntry in catalog_reader.read_dir(¤t)? {
|
||||||
|
let mut components = components.clone();
|
||||||
|
components.push('/' as u8);
|
||||||
|
components.extend(&direntry.name);
|
||||||
|
let path = base64::encode(components);
|
||||||
|
let text = String::from_utf8_lossy(&direntry.name);
|
||||||
|
let mut entry = json!({
|
||||||
|
"filepath": path,
|
||||||
|
"text": text,
|
||||||
|
"type": CatalogEntryType::from(&direntry.attr).to_string(),
|
||||||
|
"leaf": true,
|
||||||
|
});
|
||||||
|
match direntry.attr {
|
||||||
|
DirEntryAttribute::Directory { start: _ } => {
|
||||||
|
entry["leaf"] = false.into();
|
||||||
|
},
|
||||||
|
DirEntryAttribute::File { size, mtime } => {
|
||||||
|
entry["size"] = size.into();
|
||||||
|
entry["mtime"] = mtime.into();
|
||||||
|
},
|
||||||
|
_ => {},
|
||||||
|
}
|
||||||
|
res.push(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(res.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[sortable]
|
||||||
|
pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
|
||||||
|
&ApiHandler::AsyncHttp(&pxar_file_download),
|
||||||
|
&ObjectSchema::new(
|
||||||
|
"Download single file from pxar file of a bacup snapshot. Only works if it's not encrypted.",
|
||||||
|
&sorted!([
|
||||||
|
("store", false, &DATASTORE_SCHEMA),
|
||||||
|
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||||
|
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||||
|
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||||
|
("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
|
||||||
|
]),
|
||||||
|
)
|
||||||
|
).access(None, &Permission::Privilege(
|
||||||
|
&["datastore", "{store}"],
|
||||||
|
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
|
||||||
|
true)
|
||||||
|
);
|
||||||
|
|
||||||
|
fn pxar_file_download(
|
||||||
|
_parts: Parts,
|
||||||
|
_req_body: Body,
|
||||||
|
param: Value,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
rpcenv: Box<dyn RpcEnvironment>,
|
||||||
|
) -> ApiResponseFuture {
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let store = tools::required_string_param(¶m, "store")?;
|
||||||
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
|
let username = rpcenv.get_user().unwrap();
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||||
|
|
||||||
|
let filepath = tools::required_string_param(¶m, "filepath")?.to_owned();
|
||||||
|
|
||||||
|
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||||
|
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||||
|
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||||
|
|
||||||
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||||
|
|
||||||
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||||
|
|
||||||
|
let mut path = datastore.base_path();
|
||||||
|
path.push(backup_dir.relative_path());
|
||||||
|
|
||||||
|
let mut components = base64::decode(&filepath)?;
|
||||||
|
if components.len() > 0 && components[0] == '/' as u8 {
|
||||||
|
components.remove(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut split = components.splitn(2, |c| *c == '/' as u8);
|
||||||
|
let pxar_name = split.next().unwrap();
|
||||||
|
let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
|
||||||
|
|
||||||
|
path.push(OsStr::from_bytes(&pxar_name));
|
||||||
|
|
||||||
|
let index = DynamicIndexReader::open(&path)
|
||||||
|
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||||
|
|
||||||
|
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||||
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
let archive_size = reader.archive_size();
|
||||||
|
let reader = LocalDynamicReadAt::new(reader);
|
||||||
|
|
||||||
|
let decoder = Accessor::new(reader, archive_size).await?;
|
||||||
|
let root = decoder.open_root().await?;
|
||||||
|
let file = root
|
||||||
|
.lookup(OsStr::from_bytes(file_path)).await?
|
||||||
|
.ok_or(format_err!("error opening '{:?}'", file_path))?;
|
||||||
|
|
||||||
|
let file = match file.kind() {
|
||||||
|
EntryKind::File { .. } => file,
|
||||||
|
EntryKind::Hardlink(_) => {
|
||||||
|
decoder.follow_hardlink(&file).await?
|
||||||
|
},
|
||||||
|
// TODO symlink
|
||||||
|
other => bail!("cannot download file of type {:?}", other),
|
||||||
|
};
|
||||||
|
|
||||||
|
let body = Body::wrap_stream(
|
||||||
|
AsyncReaderStream::new(file.contents().await?)
|
||||||
|
.map_err(move |err| {
|
||||||
|
eprintln!("error during streaming of '{:?}' - {}", filepath, err);
|
||||||
|
err
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
// fixme: set other headers ?
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||||
|
.body(body)
|
||||||
|
.unwrap())
|
||||||
|
}.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -872,10 +1268,8 @@ fn get_rrd_stats(
|
|||||||
_param: Value,
|
_param: Value,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let rrd_dir = format!("datastore/{}", store);
|
create_value_from_rrd(
|
||||||
|
&format!("datastore/{}", store),
|
||||||
crate::rrd::extract_data(
|
|
||||||
&rrd_dir,
|
|
||||||
&[
|
&[
|
||||||
"total", "used",
|
"total", "used",
|
||||||
"read_ios", "read_bytes",
|
"read_ios", "read_bytes",
|
||||||
@ -889,11 +1283,21 @@ fn get_rrd_stats(
|
|||||||
|
|
||||||
#[sortable]
|
#[sortable]
|
||||||
const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||||
|
(
|
||||||
|
"catalog",
|
||||||
|
&Router::new()
|
||||||
|
.get(&API_METHOD_CATALOG)
|
||||||
|
),
|
||||||
(
|
(
|
||||||
"download",
|
"download",
|
||||||
&Router::new()
|
&Router::new()
|
||||||
.download(&API_METHOD_DOWNLOAD_FILE)
|
.download(&API_METHOD_DOWNLOAD_FILE)
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
"download-decoded",
|
||||||
|
&Router::new()
|
||||||
|
.download(&API_METHOD_DOWNLOAD_FILE_DECODED)
|
||||||
|
),
|
||||||
(
|
(
|
||||||
"files",
|
"files",
|
||||||
&Router::new()
|
&Router::new()
|
||||||
@ -915,6 +1319,11 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
|||||||
&Router::new()
|
&Router::new()
|
||||||
.post(&API_METHOD_PRUNE)
|
.post(&API_METHOD_PRUNE)
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
"pxar-file-download",
|
||||||
|
&Router::new()
|
||||||
|
.download(&API_METHOD_PXAR_FILE_DOWNLOAD)
|
||||||
|
),
|
||||||
(
|
(
|
||||||
"rrd",
|
"rrd",
|
||||||
&Router::new()
|
&Router::new()
|
||||||
@ -936,6 +1345,11 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
|||||||
&Router::new()
|
&Router::new()
|
||||||
.upload(&API_METHOD_UPLOAD_BACKUP_LOG)
|
.upload(&API_METHOD_UPLOAD_BACKUP_LOG)
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
"verify",
|
||||||
|
&Router::new()
|
||||||
|
.post(&API_METHOD_VERIFY)
|
||||||
|
),
|
||||||
];
|
];
|
||||||
|
|
||||||
const DATASTORE_INFO_ROUTER: Router = Router::new()
|
const DATASTORE_INFO_ROUTER: Router = Router::new()
|
||||||
|
@ -10,7 +10,7 @@ use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironm
|
|||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
use proxmox::api::schema::*;
|
use proxmox::api::schema::*;
|
||||||
|
|
||||||
use crate::tools::{self, WrappedReaderStream};
|
use crate::tools;
|
||||||
use crate::server::{WorkerTask, H2Service};
|
use crate::server::{WorkerTask, H2Service};
|
||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
@ -199,7 +199,6 @@ pub const BACKUP_API_SUBDIRS: SubdirMap = &[
|
|||||||
),
|
),
|
||||||
(
|
(
|
||||||
"dynamic_index", &Router::new()
|
"dynamic_index", &Router::new()
|
||||||
.download(&API_METHOD_DYNAMIC_CHUNK_INDEX)
|
|
||||||
.post(&API_METHOD_CREATE_DYNAMIC_INDEX)
|
.post(&API_METHOD_CREATE_DYNAMIC_INDEX)
|
||||||
.put(&API_METHOD_DYNAMIC_APPEND)
|
.put(&API_METHOD_DYNAMIC_APPEND)
|
||||||
),
|
),
|
||||||
@ -222,10 +221,13 @@ pub const BACKUP_API_SUBDIRS: SubdirMap = &[
|
|||||||
),
|
),
|
||||||
(
|
(
|
||||||
"fixed_index", &Router::new()
|
"fixed_index", &Router::new()
|
||||||
.download(&API_METHOD_FIXED_CHUNK_INDEX)
|
|
||||||
.post(&API_METHOD_CREATE_FIXED_INDEX)
|
.post(&API_METHOD_CREATE_FIXED_INDEX)
|
||||||
.put(&API_METHOD_FIXED_APPEND)
|
.put(&API_METHOD_FIXED_APPEND)
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
"previous", &Router::new()
|
||||||
|
.download(&API_METHOD_DOWNLOAD_PREVIOUS)
|
||||||
|
),
|
||||||
(
|
(
|
||||||
"speedtest", &Router::new()
|
"speedtest", &Router::new()
|
||||||
.upload(&API_METHOD_UPLOAD_SPEEDTEST)
|
.upload(&API_METHOD_UPLOAD_SPEEDTEST)
|
||||||
@ -284,6 +286,8 @@ pub const API_METHOD_CREATE_FIXED_INDEX: ApiMethod = ApiMethod::new(
|
|||||||
.minimum(1)
|
.minimum(1)
|
||||||
.schema()
|
.schema()
|
||||||
),
|
),
|
||||||
|
("reuse-csum", true, &StringSchema::new("If set, compare last backup's \
|
||||||
|
csum and reuse index for incremental backup if it matches.").schema()),
|
||||||
]),
|
]),
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
@ -296,10 +300,9 @@ fn create_fixed_index(
|
|||||||
|
|
||||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||||
|
|
||||||
println!("PARAM: {:?}", param);
|
|
||||||
|
|
||||||
let name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
let name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||||
let size = tools::required_integer_param(¶m, "size")? as usize;
|
let size = tools::required_integer_param(¶m, "size")? as usize;
|
||||||
|
let reuse_csum = param["reuse-csum"].as_str();
|
||||||
|
|
||||||
let archive_name = name.clone();
|
let archive_name = name.clone();
|
||||||
if !archive_name.ends_with(".fidx") {
|
if !archive_name.ends_with(".fidx") {
|
||||||
@ -307,12 +310,49 @@ fn create_fixed_index(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut path = env.backup_dir.relative_path();
|
let mut path = env.backup_dir.relative_path();
|
||||||
path.push(archive_name);
|
path.push(&archive_name);
|
||||||
|
|
||||||
let chunk_size = 4096*1024; // todo: ??
|
let chunk_size = 4096*1024; // todo: ??
|
||||||
|
|
||||||
let index = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
|
// do incremental backup if csum is set
|
||||||
let wid = env.register_fixed_writer(index, name, size, chunk_size as u32)?;
|
let mut reader = None;
|
||||||
|
let mut incremental = false;
|
||||||
|
if let Some(csum) = reuse_csum {
|
||||||
|
incremental = true;
|
||||||
|
let last_backup = match &env.last_backup {
|
||||||
|
Some(info) => info,
|
||||||
|
None => {
|
||||||
|
bail!("cannot reuse index - no previous backup exists");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut last_path = last_backup.backup_dir.relative_path();
|
||||||
|
last_path.push(&archive_name);
|
||||||
|
|
||||||
|
let index = match env.datastore.open_fixed_reader(last_path) {
|
||||||
|
Ok(index) => index,
|
||||||
|
Err(_) => {
|
||||||
|
bail!("cannot reuse index - no previous backup exists for archive");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let (old_csum, _) = index.compute_csum();
|
||||||
|
let old_csum = proxmox::tools::digest_to_hex(&old_csum);
|
||||||
|
if old_csum != csum {
|
||||||
|
bail!("expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
|
||||||
|
csum, old_csum);
|
||||||
|
}
|
||||||
|
|
||||||
|
reader = Some(index);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut writer = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
|
||||||
|
|
||||||
|
if let Some(reader) = reader {
|
||||||
|
writer.clone_data_from(&reader)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let wid = env.register_fixed_writer(writer, name, size, chunk_size as u32, incremental)?;
|
||||||
|
|
||||||
env.log(format!("created new fixed index {} ({:?})", wid, path));
|
env.log(format!("created new fixed index {} ({:?})", wid, path));
|
||||||
|
|
||||||
@ -520,15 +560,15 @@ pub const API_METHOD_CLOSE_FIXED_INDEX: ApiMethod = ApiMethod::new(
|
|||||||
(
|
(
|
||||||
"chunk-count",
|
"chunk-count",
|
||||||
false,
|
false,
|
||||||
&IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
|
&IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks. Ignored for incremental backups.")
|
||||||
.minimum(1)
|
.minimum(0)
|
||||||
.schema()
|
.schema()
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"size",
|
"size",
|
||||||
false,
|
false,
|
||||||
&IntegerSchema::new("File size. This is used to verify that the server got all data.")
|
&IntegerSchema::new("File size. This is used to verify that the server got all data. Ignored for incremental backups.")
|
||||||
.minimum(1)
|
.minimum(0)
|
||||||
.schema()
|
.schema()
|
||||||
),
|
),
|
||||||
("csum", false, &StringSchema::new("Digest list checksum.").schema()),
|
("csum", false, &StringSchema::new("Digest list checksum.").schema()),
|
||||||
@ -572,20 +612,17 @@ fn finish_backup (
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[sortable]
|
#[sortable]
|
||||||
pub const API_METHOD_DYNAMIC_CHUNK_INDEX: ApiMethod = ApiMethod::new(
|
pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
|
||||||
&ApiHandler::AsyncHttp(&dynamic_chunk_index),
|
&ApiHandler::AsyncHttp(&download_previous),
|
||||||
&ObjectSchema::new(
|
&ObjectSchema::new(
|
||||||
r###"
|
"Download archive from previous backup.",
|
||||||
Download the dynamic chunk index from the previous backup.
|
|
||||||
Simply returns an empty list if this is the first backup.
|
|
||||||
"### ,
|
|
||||||
&sorted!([
|
&sorted!([
|
||||||
("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA)
|
("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA)
|
||||||
]),
|
]),
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
fn dynamic_chunk_index(
|
fn download_previous(
|
||||||
_parts: Parts,
|
_parts: Parts,
|
||||||
_req_body: Body,
|
_req_body: Body,
|
||||||
param: Value,
|
param: Value,
|
||||||
@ -598,130 +635,38 @@ fn dynamic_chunk_index(
|
|||||||
|
|
||||||
let archive_name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
let archive_name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||||
|
|
||||||
if !archive_name.ends_with(".didx") {
|
|
||||||
bail!("wrong archive extension: '{}'", archive_name);
|
|
||||||
}
|
|
||||||
|
|
||||||
let empty_response = {
|
|
||||||
Response::builder()
|
|
||||||
.status(StatusCode::OK)
|
|
||||||
.body(Body::empty())?
|
|
||||||
};
|
|
||||||
|
|
||||||
let last_backup = match &env.last_backup {
|
let last_backup = match &env.last_backup {
|
||||||
Some(info) => info,
|
Some(info) => info,
|
||||||
None => return Ok(empty_response),
|
None => bail!("no previous backup"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut path = last_backup.backup_dir.relative_path();
|
let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);
|
||||||
path.push(&archive_name);
|
path.push(&archive_name);
|
||||||
|
|
||||||
let index = match env.datastore.open_dynamic_reader(path) {
|
{
|
||||||
Ok(index) => index,
|
let index: Option<Box<dyn IndexFile>> = match archive_type(&archive_name)? {
|
||||||
Err(_) => {
|
ArchiveType::FixedIndex => {
|
||||||
env.log(format!("there is no last backup for archive '{}'", archive_name));
|
let index = env.datastore.open_fixed_reader(&path)?;
|
||||||
return Ok(empty_response);
|
Some(Box::new(index))
|
||||||
|
}
|
||||||
|
ArchiveType::DynamicIndex => {
|
||||||
|
let index = env.datastore.open_dynamic_reader(&path)?;
|
||||||
|
Some(Box::new(index))
|
||||||
|
}
|
||||||
|
_ => { None }
|
||||||
|
};
|
||||||
|
if let Some(index) = index {
|
||||||
|
env.log(format!("register chunks in '{}' from previous backup.", archive_name));
|
||||||
|
|
||||||
|
for pos in 0..index.index_count() {
|
||||||
|
let info = index.chunk_info(pos).unwrap();
|
||||||
|
let size = info.range.end - info.range.start;
|
||||||
|
env.register_chunk(info.digest, size as u32)?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
env.log(format!("download last backup index for archive '{}'", archive_name));
|
|
||||||
|
|
||||||
let count = index.index_count();
|
|
||||||
for pos in 0..count {
|
|
||||||
let (start, end, digest) = index.chunk_info(pos)?;
|
|
||||||
let size = (end - start) as u32;
|
|
||||||
env.register_chunk(digest, size)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let reader = DigestListEncoder::new(Box::new(index));
|
env.log(format!("download '{}' from previous backup.", archive_name));
|
||||||
|
crate::api2::helpers::create_download_response(path).await
|
||||||
let stream = WrappedReaderStream::new(reader);
|
|
||||||
|
|
||||||
// fixme: set size, content type?
|
|
||||||
let response = http::Response::builder()
|
|
||||||
.status(200)
|
|
||||||
.body(Body::wrap_stream(stream))?;
|
|
||||||
|
|
||||||
Ok(response)
|
|
||||||
}.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[sortable]
|
|
||||||
pub const API_METHOD_FIXED_CHUNK_INDEX: ApiMethod = ApiMethod::new(
|
|
||||||
&ApiHandler::AsyncHttp(&fixed_chunk_index),
|
|
||||||
&ObjectSchema::new(
|
|
||||||
r###"
|
|
||||||
Download the fixed chunk index from the previous backup.
|
|
||||||
Simply returns an empty list if this is the first backup.
|
|
||||||
"### ,
|
|
||||||
&sorted!([
|
|
||||||
("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA)
|
|
||||||
]),
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
fn fixed_chunk_index(
|
|
||||||
_parts: Parts,
|
|
||||||
_req_body: Body,
|
|
||||||
param: Value,
|
|
||||||
_info: &ApiMethod,
|
|
||||||
rpcenv: Box<dyn RpcEnvironment>,
|
|
||||||
) -> ApiResponseFuture {
|
|
||||||
|
|
||||||
async move {
|
|
||||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
|
||||||
|
|
||||||
let archive_name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
|
||||||
|
|
||||||
if !archive_name.ends_with(".fidx") {
|
|
||||||
bail!("wrong archive extension: '{}'", archive_name);
|
|
||||||
}
|
|
||||||
|
|
||||||
let empty_response = {
|
|
||||||
Response::builder()
|
|
||||||
.status(StatusCode::OK)
|
|
||||||
.body(Body::empty())?
|
|
||||||
};
|
|
||||||
|
|
||||||
let last_backup = match &env.last_backup {
|
|
||||||
Some(info) => info,
|
|
||||||
None => return Ok(empty_response),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut path = last_backup.backup_dir.relative_path();
|
|
||||||
path.push(&archive_name);
|
|
||||||
|
|
||||||
let index = match env.datastore.open_fixed_reader(path) {
|
|
||||||
Ok(index) => index,
|
|
||||||
Err(_) => {
|
|
||||||
env.log(format!("there is no last backup for archive '{}'", archive_name));
|
|
||||||
return Ok(empty_response);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
env.log(format!("download last backup index for archive '{}'", archive_name));
|
|
||||||
|
|
||||||
let count = index.index_count();
|
|
||||||
let image_size = index.index_bytes();
|
|
||||||
for pos in 0..count {
|
|
||||||
let digest = index.index_digest(pos).unwrap();
|
|
||||||
// Note: last chunk can be smaller
|
|
||||||
let start = (pos*index.chunk_size) as u64;
|
|
||||||
let mut end = start + index.chunk_size as u64;
|
|
||||||
if end > image_size { end = image_size; }
|
|
||||||
let size = (end - start) as u32;
|
|
||||||
env.register_chunk(*digest, size)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let reader = DigestListEncoder::new(Box::new(index));
|
|
||||||
|
|
||||||
let stream = WrappedReaderStream::new(reader);
|
|
||||||
|
|
||||||
// fixme: set size, content type?
|
|
||||||
let response = http::Response::builder()
|
|
||||||
.status(200)
|
|
||||||
.body(Body::wrap_stream(stream))?;
|
|
||||||
|
|
||||||
Ok(response)
|
|
||||||
}.boxed()
|
}.boxed()
|
||||||
}
|
}
|
||||||
|
@ -47,6 +47,7 @@ struct FixedWriterState {
|
|||||||
chunk_count: u64,
|
chunk_count: u64,
|
||||||
small_chunk_count: usize, // allow 0..1 small chunks (last chunk may be smaller)
|
small_chunk_count: usize, // allow 0..1 small chunks (last chunk may be smaller)
|
||||||
upload_stat: UploadStatistic,
|
upload_stat: UploadStatistic,
|
||||||
|
incremental: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct SharedBackupState {
|
struct SharedBackupState {
|
||||||
@ -237,7 +238,7 @@ impl BackupEnvironment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Store the writer with an unique ID
|
/// Store the writer with an unique ID
|
||||||
pub fn register_fixed_writer(&self, index: FixedIndexWriter, name: String, size: usize, chunk_size: u32) -> Result<usize, Error> {
|
pub fn register_fixed_writer(&self, index: FixedIndexWriter, name: String, size: usize, chunk_size: u32, incremental: bool) -> Result<usize, Error> {
|
||||||
let mut state = self.state.lock().unwrap();
|
let mut state = self.state.lock().unwrap();
|
||||||
|
|
||||||
state.ensure_unfinished()?;
|
state.ensure_unfinished()?;
|
||||||
@ -245,7 +246,7 @@ impl BackupEnvironment {
|
|||||||
let uid = state.next_uid();
|
let uid = state.next_uid();
|
||||||
|
|
||||||
state.fixed_writers.insert(uid, FixedWriterState {
|
state.fixed_writers.insert(uid, FixedWriterState {
|
||||||
index, name, chunk_count: 0, size, chunk_size, small_chunk_count: 0, upload_stat: UploadStatistic::new(),
|
index, name, chunk_count: 0, size, chunk_size, small_chunk_count: 0, upload_stat: UploadStatistic::new(), incremental,
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(uid)
|
Ok(uid)
|
||||||
@ -310,7 +311,13 @@ impl BackupEnvironment {
|
|||||||
|
|
||||||
self.log(format!("Upload size: {} ({}%)", upload_stat.size, (upload_stat.size*100)/size));
|
self.log(format!("Upload size: {} ({}%)", upload_stat.size, (upload_stat.size*100)/size));
|
||||||
|
|
||||||
let client_side_duplicates = chunk_count - upload_stat.count;
|
// account for zero chunk, which might be uploaded but never used
|
||||||
|
let client_side_duplicates = if chunk_count < upload_stat.count {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
chunk_count - upload_stat.count
|
||||||
|
};
|
||||||
|
|
||||||
let server_side_duplicates = upload_stat.duplicates;
|
let server_side_duplicates = upload_stat.duplicates;
|
||||||
|
|
||||||
if (client_side_duplicates + server_side_duplicates) > 0 {
|
if (client_side_duplicates + server_side_duplicates) > 0 {
|
||||||
@ -373,21 +380,22 @@ impl BackupEnvironment {
|
|||||||
bail!("fixed writer '{}' close failed - received wrong number of chunk ({} != {})", data.name, data.chunk_count, chunk_count);
|
bail!("fixed writer '{}' close failed - received wrong number of chunk ({} != {})", data.name, data.chunk_count, chunk_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
let expected_count = data.index.index_length();
|
if !data.incremental {
|
||||||
|
let expected_count = data.index.index_length();
|
||||||
|
|
||||||
if chunk_count != (expected_count as u64) {
|
if chunk_count != (expected_count as u64) {
|
||||||
bail!("fixed writer '{}' close failed - unexpected chunk count ({} != {})", data.name, expected_count, chunk_count);
|
bail!("fixed writer '{}' close failed - unexpected chunk count ({} != {})", data.name, expected_count, chunk_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
if size != (data.size as u64) {
|
if size != (data.size as u64) {
|
||||||
bail!("fixed writer '{}' close failed - unexpected file size ({} != {})", data.name, data.size, size);
|
bail!("fixed writer '{}' close failed - unexpected file size ({} != {})", data.name, data.size, size);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let uuid = data.index.uuid;
|
let uuid = data.index.uuid;
|
||||||
|
|
||||||
let expected_csum = data.index.close()?;
|
let expected_csum = data.index.close()?;
|
||||||
|
|
||||||
println!("server checksum {:?} client: {:?}", expected_csum, csum);
|
println!("server checksum: {:?} client: {:?} (incremental: {})", expected_csum, csum, data.incremental);
|
||||||
if csum != expected_csum {
|
if csum != expected_csum {
|
||||||
bail!("fixed writer '{}' close failed - got unexpected checksum", data.name);
|
bail!("fixed writer '{}' close failed - got unexpected checksum", data.name);
|
||||||
}
|
}
|
||||||
@ -430,8 +438,6 @@ impl BackupEnvironment {
|
|||||||
|
|
||||||
state.ensure_unfinished()?;
|
state.ensure_unfinished()?;
|
||||||
|
|
||||||
state.finished = true;
|
|
||||||
|
|
||||||
if state.dynamic_writers.len() != 0 {
|
if state.dynamic_writers.len() != 0 {
|
||||||
bail!("found open index writer - unable to finish backup");
|
bail!("found open index writer - unable to finish backup");
|
||||||
}
|
}
|
||||||
@ -440,6 +446,8 @@ impl BackupEnvironment {
|
|||||||
bail!("backup does not contain valid files (file count == 0)");
|
bail!("backup does not contain valid files (file count == 0)");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
state.finished = true;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
23
src/api2/helpers.rs
Normal file
23
src/api2/helpers.rs
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
use std::path::PathBuf;
|
||||||
|
use anyhow::Error;
|
||||||
|
use futures::*;
|
||||||
|
use hyper::{Body, Response, StatusCode, header};
|
||||||
|
use proxmox::http_err;
|
||||||
|
|
||||||
|
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
|
||||||
|
let file = tokio::fs::File::open(path.clone())
|
||||||
|
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path.clone(), err)))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||||
|
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||||
|
|
||||||
|
let body = Body::wrap_stream(payload);
|
||||||
|
|
||||||
|
// fixme: set other headers ?
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||||
|
.body(body)
|
||||||
|
.unwrap())
|
||||||
|
}
|
@ -9,9 +9,11 @@ mod syslog;
|
|||||||
mod journal;
|
mod journal;
|
||||||
mod services;
|
mod services;
|
||||||
mod status;
|
mod status;
|
||||||
mod rrd;
|
pub(crate) mod rrd;
|
||||||
|
pub mod disks;
|
||||||
|
|
||||||
pub const SUBDIRS: SubdirMap = &[
|
pub const SUBDIRS: SubdirMap = &[
|
||||||
|
("disks", &disks::ROUTER),
|
||||||
("dns", &dns::ROUTER),
|
("dns", &dns::ROUTER),
|
||||||
("journal", &journal::ROUTER),
|
("journal", &journal::ROUTER),
|
||||||
("network", &network::ROUTER),
|
("network", &network::ROUTER),
|
||||||
|
188
src/api2/node/disks.rs
Normal file
188
src/api2/node/disks.rs
Normal file
@ -0,0 +1,188 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
|
use proxmox::api::{api, Permission, RpcEnvironment, RpcEnvironmentType};
|
||||||
|
use proxmox::api::router::{Router, SubdirMap};
|
||||||
|
use proxmox::{sortable, identity};
|
||||||
|
use proxmox::{list_subdirs_api_method};
|
||||||
|
|
||||||
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||||
|
use crate::tools::disks::{
|
||||||
|
DiskUsageInfo, DiskUsageType, DiskManage, SmartData,
|
||||||
|
get_disks, get_smart_data, get_disk_usage_info, inititialize_gpt_disk,
|
||||||
|
};
|
||||||
|
use crate::server::WorkerTask;
|
||||||
|
|
||||||
|
use crate::api2::types::{UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
|
||||||
|
|
||||||
|
pub mod directory;
|
||||||
|
pub mod zfs;
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
skipsmart: {
|
||||||
|
description: "Skip smart checks.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
"usage-type": {
|
||||||
|
type: DiskUsageType,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "Local disk list.",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: DiskUsageInfo,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List local disks
|
||||||
|
pub fn list_disks(
|
||||||
|
skipsmart: bool,
|
||||||
|
usage_type: Option<DiskUsageType>,
|
||||||
|
) -> Result<Vec<DiskUsageInfo>, Error> {
|
||||||
|
|
||||||
|
let mut list = Vec::new();
|
||||||
|
|
||||||
|
for (_, info) in get_disks(None, skipsmart)? {
|
||||||
|
if let Some(ref usage_type) = usage_type {
|
||||||
|
if info.used == *usage_type {
|
||||||
|
list.push(info);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
list.push(info);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
disk: {
|
||||||
|
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
healthonly: {
|
||||||
|
description: "If true returns only the health status.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: SmartData,
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get SMART attributes and health of a disk.
|
||||||
|
pub fn smart_status(
|
||||||
|
disk: String,
|
||||||
|
healthonly: Option<bool>,
|
||||||
|
) -> Result<SmartData, Error> {
|
||||||
|
|
||||||
|
let healthonly = healthonly.unwrap_or(false);
|
||||||
|
|
||||||
|
let manager = DiskManage::new();
|
||||||
|
let disk = manager.disk_by_name(&disk)?;
|
||||||
|
get_smart_data(&disk, healthonly)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
disk: {
|
||||||
|
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
uuid: {
|
||||||
|
description: "UUID for the GPT table.",
|
||||||
|
type: String,
|
||||||
|
optional: true,
|
||||||
|
max_length: 36,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
schema: UPID_SCHEMA,
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Initialize empty Disk with GPT
|
||||||
|
pub fn initialize_disk(
|
||||||
|
disk: String,
|
||||||
|
uuid: Option<String>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
|
|
||||||
|
let username = rpcenv.get_user().unwrap();
|
||||||
|
|
||||||
|
let info = get_disk_usage_info(&disk, true)?;
|
||||||
|
|
||||||
|
if info.used != DiskUsageType::Unused {
|
||||||
|
bail!("disk '{}' is already in use.", disk);
|
||||||
|
}
|
||||||
|
|
||||||
|
let upid_str = WorkerTask::new_thread(
|
||||||
|
"diskinit", Some(disk.clone()), &username.clone(), to_stdout, move |worker|
|
||||||
|
{
|
||||||
|
worker.log(format!("initialize disk {}", disk));
|
||||||
|
|
||||||
|
let disk_manager = DiskManage::new();
|
||||||
|
let disk_info = disk_manager.disk_by_name(&disk)?;
|
||||||
|
|
||||||
|
inititialize_gpt_disk(&disk_info, uuid.as_deref())?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(json!(upid_str))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[sortable]
|
||||||
|
const SUBDIRS: SubdirMap = &sorted!([
|
||||||
|
// ("lvm", &lvm::ROUTER),
|
||||||
|
("directory", &directory::ROUTER),
|
||||||
|
("zfs", &zfs::ROUTER),
|
||||||
|
(
|
||||||
|
"initgpt", &Router::new()
|
||||||
|
.post(&API_METHOD_INITIALIZE_DISK)
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"list", &Router::new()
|
||||||
|
.get(&API_METHOD_LIST_DISKS)
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"smart", &Router::new()
|
||||||
|
.get(&API_METHOD_SMART_STATUS)
|
||||||
|
),
|
||||||
|
]);
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||||
|
.subdirs(SUBDIRS);
|
221
src/api2/node/disks/directory.rs
Normal file
221
src/api2/node/disks/directory.rs
Normal file
@ -0,0 +1,221 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
use serde_json::json;
|
||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::{api, Permission, RpcEnvironment, RpcEnvironmentType};
|
||||||
|
use proxmox::api::section_config::SectionConfigData;
|
||||||
|
use proxmox::api::router::Router;
|
||||||
|
|
||||||
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||||
|
use crate::tools::disks::{
|
||||||
|
DiskManage, FileSystemType, DiskUsageType,
|
||||||
|
create_file_system, create_single_linux_partition, get_fs_uuid, get_disk_usage_info,
|
||||||
|
};
|
||||||
|
use crate::tools::systemd::{self, types::*};
|
||||||
|
|
||||||
|
use crate::server::WorkerTask;
|
||||||
|
|
||||||
|
use crate::api2::types::*;
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"filesystem": {
|
||||||
|
type: FileSystemType,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Datastore mount info.
|
||||||
|
pub struct DatastoreMountInfo {
|
||||||
|
/// The path of the mount unit.
|
||||||
|
pub unitfile: String,
|
||||||
|
/// The mount path.
|
||||||
|
pub path: String,
|
||||||
|
/// The mounted device.
|
||||||
|
pub device: String,
|
||||||
|
/// File system type
|
||||||
|
pub filesystem: Option<String>,
|
||||||
|
/// Mount options
|
||||||
|
pub options: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "List of systemd datastore mount units.",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: DatastoreMountInfo,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List systemd datastore mount units.
|
||||||
|
pub fn list_datastore_mounts() -> Result<Vec<DatastoreMountInfo>, Error> {
|
||||||
|
|
||||||
|
lazy_static::lazy_static! {
|
||||||
|
static ref MOUNT_NAME_REGEX: regex::Regex = regex::Regex::new(r"^mnt-datastore-(.+)\.mount$").unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut list = Vec::new();
|
||||||
|
|
||||||
|
let basedir = "/etc/systemd/system";
|
||||||
|
for item in crate::tools::fs::scan_subdir(libc::AT_FDCWD, basedir, &MOUNT_NAME_REGEX)? {
|
||||||
|
let item = item?;
|
||||||
|
let name = item.file_name().to_string_lossy().to_string();
|
||||||
|
|
||||||
|
let unitfile = format!("{}/{}", basedir, name);
|
||||||
|
let config = systemd::config::parse_systemd_mount(&unitfile)?;
|
||||||
|
let data: SystemdMountSection = config.lookup("Mount", "Mount")?;
|
||||||
|
|
||||||
|
list.push(DatastoreMountInfo {
|
||||||
|
unitfile,
|
||||||
|
device: data.What,
|
||||||
|
path: data.Where,
|
||||||
|
filesystem: data.Type,
|
||||||
|
options: data.Options,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
name: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
disk: {
|
||||||
|
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
"add-datastore": {
|
||||||
|
description: "Configure a datastore using the directory.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
filesystem: {
|
||||||
|
type: FileSystemType,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
schema: UPID_SCHEMA,
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Create a Filesystem on an unused disk. Will be mounted under '/mnt/datastore/<name>'.".
|
||||||
|
pub fn create_datastore_disk(
|
||||||
|
name: String,
|
||||||
|
disk: String,
|
||||||
|
add_datastore: Option<bool>,
|
||||||
|
filesystem: Option<FileSystemType>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
|
|
||||||
|
let username = rpcenv.get_user().unwrap();
|
||||||
|
|
||||||
|
let info = get_disk_usage_info(&disk, true)?;
|
||||||
|
|
||||||
|
if info.used != DiskUsageType::Unused {
|
||||||
|
bail!("disk '{}' is already in use.", disk);
|
||||||
|
}
|
||||||
|
|
||||||
|
let upid_str = WorkerTask::new_thread(
|
||||||
|
"dircreate", Some(name.clone()), &username.clone(), to_stdout, move |worker|
|
||||||
|
{
|
||||||
|
worker.log(format!("create datastore '{}' on disk {}", name, disk));
|
||||||
|
|
||||||
|
let add_datastore = add_datastore.unwrap_or(false);
|
||||||
|
let filesystem = filesystem.unwrap_or(FileSystemType::Ext4);
|
||||||
|
|
||||||
|
let manager = DiskManage::new();
|
||||||
|
|
||||||
|
let disk = manager.clone().disk_by_name(&disk)?;
|
||||||
|
|
||||||
|
let partition = create_single_linux_partition(&disk)?;
|
||||||
|
create_file_system(&partition, filesystem)?;
|
||||||
|
|
||||||
|
let uuid = get_fs_uuid(&partition)?;
|
||||||
|
let uuid_path = format!("/dev/disk/by-uuid/{}", uuid);
|
||||||
|
|
||||||
|
let (mount_unit_name, mount_point) = create_datastore_mount_unit(&name, filesystem, &uuid_path)?;
|
||||||
|
|
||||||
|
systemd::reload_daemon()?;
|
||||||
|
systemd::enable_unit(&mount_unit_name)?;
|
||||||
|
systemd::start_unit(&mount_unit_name)?;
|
||||||
|
|
||||||
|
if add_datastore {
|
||||||
|
crate::api2::config::datastore::create_datastore(json!({ "name": name, "path": mount_point }))?
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(upid_str)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_DATASTORE_MOUNTS)
|
||||||
|
.post(&API_METHOD_CREATE_DATASTORE_DISK);
|
||||||
|
|
||||||
|
|
||||||
|
fn create_datastore_mount_unit(
|
||||||
|
datastore_name: &str,
|
||||||
|
fs_type: FileSystemType,
|
||||||
|
what: &str,
|
||||||
|
) -> Result<(String, String), Error> {
|
||||||
|
|
||||||
|
let mount_point = format!("/mnt/datastore/{}", datastore_name);
|
||||||
|
let mut mount_unit_name = systemd::escape_unit(&mount_point, true);
|
||||||
|
mount_unit_name.push_str(".mount");
|
||||||
|
|
||||||
|
let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
|
||||||
|
|
||||||
|
let unit = SystemdUnitSection {
|
||||||
|
Description: format!("Mount datatstore '{}' under '{}'", datastore_name, mount_point),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let install = SystemdInstallSection {
|
||||||
|
WantedBy: Some(vec!["multi-user.target".to_string()]),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let mount = SystemdMountSection {
|
||||||
|
What: what.to_string(),
|
||||||
|
Where: mount_point.clone(),
|
||||||
|
Type: Some(fs_type.to_string()),
|
||||||
|
Options: Some(String::from("defaults")),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut config = SectionConfigData::new();
|
||||||
|
config.set_data("Unit", "Unit", unit)?;
|
||||||
|
config.set_data("Install", "Install", install)?;
|
||||||
|
config.set_data("Mount", "Mount", mount)?;
|
||||||
|
|
||||||
|
systemd::config::save_systemd_mount(&mount_unit_path, &config)?;
|
||||||
|
|
||||||
|
Ok((mount_unit_name, mount_point))
|
||||||
|
}
|
380
src/api2/node/disks/zfs.rs
Normal file
380
src/api2/node/disks/zfs.rs
Normal file
@ -0,0 +1,380 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::{
|
||||||
|
api, Permission, RpcEnvironment, RpcEnvironmentType,
|
||||||
|
schema::{
|
||||||
|
Schema,
|
||||||
|
StringSchema,
|
||||||
|
ArraySchema,
|
||||||
|
IntegerSchema,
|
||||||
|
ApiStringFormat,
|
||||||
|
parse_property_string,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use proxmox::api::router::Router;
|
||||||
|
|
||||||
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||||
|
use crate::tools::disks::{
|
||||||
|
zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree,
|
||||||
|
DiskUsageType,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::server::WorkerTask;
|
||||||
|
|
||||||
|
use crate::api2::types::*;
|
||||||
|
|
||||||
|
pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||||
|
"Disk name list.", &BLOCKDEVICE_NAME_SCHEMA)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DISK_LIST_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"A list of disk names, comma separated.")
|
||||||
|
.format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new(
|
||||||
|
"Pool sector size exponent.")
|
||||||
|
.minimum(9)
|
||||||
|
.maximum(16)
|
||||||
|
.default(12)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
default: "On",
|
||||||
|
)]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// The ZFS compression algorithm to use.
|
||||||
|
pub enum ZfsCompressionType {
|
||||||
|
/// Gnu Zip
|
||||||
|
Gzip,
|
||||||
|
/// LZ4
|
||||||
|
Lz4,
|
||||||
|
/// LZJB
|
||||||
|
Lzjb,
|
||||||
|
/// ZLE
|
||||||
|
Zle,
|
||||||
|
/// Enable compression using the default algorithm.
|
||||||
|
On,
|
||||||
|
/// Disable compression.
|
||||||
|
Off,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// The ZFS RAID level to use.
|
||||||
|
pub enum ZfsRaidLevel {
|
||||||
|
/// Single Disk
|
||||||
|
Single,
|
||||||
|
/// Mirror
|
||||||
|
Mirror,
|
||||||
|
/// Raid10
|
||||||
|
Raid10,
|
||||||
|
/// RaidZ
|
||||||
|
RaidZ,
|
||||||
|
/// RaidZ2
|
||||||
|
RaidZ2,
|
||||||
|
/// RaidZ3
|
||||||
|
RaidZ3,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// zpool list item
|
||||||
|
pub struct ZpoolListItem {
|
||||||
|
/// zpool name
|
||||||
|
pub name: String,
|
||||||
|
/// Health
|
||||||
|
pub health: String,
|
||||||
|
/// Total size
|
||||||
|
pub size: u64,
|
||||||
|
/// Used size
|
||||||
|
pub alloc: u64,
|
||||||
|
/// Free space
|
||||||
|
pub free: u64,
|
||||||
|
/// ZFS fragnentation level
|
||||||
|
pub frag: u64,
|
||||||
|
/// ZFS deduplication ratio
|
||||||
|
pub dedup: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "List of zpools.",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: ZpoolListItem,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List zfs pools.
|
||||||
|
pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> {
|
||||||
|
|
||||||
|
let data = zpool_list(None, false)?;
|
||||||
|
|
||||||
|
let mut list = Vec::new();
|
||||||
|
|
||||||
|
for item in data {
|
||||||
|
if let Some(usage) = item.usage {
|
||||||
|
list.push(ZpoolListItem {
|
||||||
|
name: item.name,
|
||||||
|
health: item.health,
|
||||||
|
size: usage.size,
|
||||||
|
alloc: usage.alloc,
|
||||||
|
free: usage.free,
|
||||||
|
frag: usage.frag,
|
||||||
|
dedup: usage.dedup,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
name: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "zpool vdev tree with status",
|
||||||
|
properties: {
|
||||||
|
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get zpool status details.
|
||||||
|
pub fn zpool_details(
|
||||||
|
name: String,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let key_value_list = zpool_status(&name)?;
|
||||||
|
|
||||||
|
let config = match key_value_list.iter().find(|(k, _)| k == "config") {
|
||||||
|
Some((_, v)) => v,
|
||||||
|
None => bail!("got zpool status without config key"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let vdev_list = parse_zpool_status_config_tree(config)?;
|
||||||
|
let mut tree = vdev_list_to_tree(&vdev_list)?;
|
||||||
|
|
||||||
|
for (k, v) in key_value_list {
|
||||||
|
if k != "config" {
|
||||||
|
tree[k] = v.into();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tree["name"] = tree.as_object_mut().unwrap()
|
||||||
|
.remove("pool")
|
||||||
|
.unwrap_or_else(|| name.into());
|
||||||
|
|
||||||
|
|
||||||
|
Ok(tree)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
name: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
devices: {
|
||||||
|
schema: DISK_LIST_SCHEMA,
|
||||||
|
},
|
||||||
|
raidlevel: {
|
||||||
|
type: ZfsRaidLevel,
|
||||||
|
},
|
||||||
|
ashift: {
|
||||||
|
schema: ZFS_ASHIFT_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
compression: {
|
||||||
|
type: ZfsCompressionType,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"add-datastore": {
|
||||||
|
description: "Configure a datastore using the zpool.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
schema: UPID_SCHEMA,
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Create a new ZFS pool.
|
||||||
|
pub fn create_zpool(
|
||||||
|
name: String,
|
||||||
|
devices: String,
|
||||||
|
raidlevel: ZfsRaidLevel,
|
||||||
|
compression: Option<String>,
|
||||||
|
ashift: Option<usize>,
|
||||||
|
add_datastore: Option<bool>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
|
|
||||||
|
let username = rpcenv.get_user().unwrap();
|
||||||
|
|
||||||
|
let add_datastore = add_datastore.unwrap_or(false);
|
||||||
|
|
||||||
|
let ashift = ashift.unwrap_or(12);
|
||||||
|
|
||||||
|
let devices_text = devices.clone();
|
||||||
|
let devices = parse_property_string(&devices, &DISK_ARRAY_SCHEMA)?;
|
||||||
|
let devices: Vec<String> = devices.as_array().unwrap().iter()
|
||||||
|
.map(|v| v.as_str().unwrap().to_string()).collect();
|
||||||
|
|
||||||
|
let disk_map = crate::tools::disks::get_disks(None, true)?;
|
||||||
|
for disk in devices.iter() {
|
||||||
|
match disk_map.get(disk) {
|
||||||
|
Some(info) => {
|
||||||
|
if info.used != DiskUsageType::Unused {
|
||||||
|
bail!("disk '{}' is already in use.", disk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
bail!("no such disk '{}'", disk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let min_disks = match raidlevel {
|
||||||
|
ZfsRaidLevel::Single => 1,
|
||||||
|
ZfsRaidLevel::Mirror => 2,
|
||||||
|
ZfsRaidLevel::Raid10 => 4,
|
||||||
|
ZfsRaidLevel::RaidZ => 3,
|
||||||
|
ZfsRaidLevel::RaidZ2 => 4,
|
||||||
|
ZfsRaidLevel::RaidZ3 => 5,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Sanity checks
|
||||||
|
if raidlevel == ZfsRaidLevel::Raid10 && devices.len() % 2 != 0 {
|
||||||
|
bail!("Raid10 needs an even number of disks.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if raidlevel == ZfsRaidLevel::Single && devices.len() > 1 {
|
||||||
|
bail!("Please give only one disk for single disk mode.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if devices.len() < min_disks {
|
||||||
|
bail!("{:?} needs at least {} disks.", raidlevel, min_disks);
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if the default path does exist already and bail if it does
|
||||||
|
// otherwise we get an error on mounting
|
||||||
|
let mut default_path = std::path::PathBuf::from("/");
|
||||||
|
default_path.push(&name);
|
||||||
|
|
||||||
|
match std::fs::metadata(&default_path) {
|
||||||
|
Err(_) => {}, // path does not exist
|
||||||
|
Ok(_) => {
|
||||||
|
bail!("path {:?} already exists", default_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let upid_str = WorkerTask::new_thread(
|
||||||
|
"zfscreate", Some(name.clone()), &username.clone(), to_stdout, move |worker|
|
||||||
|
{
|
||||||
|
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));
|
||||||
|
|
||||||
|
|
||||||
|
let mut command = std::process::Command::new("zpool");
|
||||||
|
command.args(&["create", "-o", &format!("ashift={}", ashift), &name]);
|
||||||
|
|
||||||
|
match raidlevel {
|
||||||
|
ZfsRaidLevel::Single => {
|
||||||
|
command.arg(&devices[0]);
|
||||||
|
}
|
||||||
|
ZfsRaidLevel::Mirror => {
|
||||||
|
command.arg("mirror");
|
||||||
|
command.args(devices);
|
||||||
|
}
|
||||||
|
ZfsRaidLevel::Raid10 => {
|
||||||
|
devices.chunks(2).for_each(|pair| {
|
||||||
|
command.arg("mirror");
|
||||||
|
command.args(pair);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
ZfsRaidLevel::RaidZ => {
|
||||||
|
command.arg("raidz");
|
||||||
|
command.args(devices);
|
||||||
|
}
|
||||||
|
ZfsRaidLevel::RaidZ2 => {
|
||||||
|
command.arg("raidz2");
|
||||||
|
command.args(devices);
|
||||||
|
}
|
||||||
|
ZfsRaidLevel::RaidZ3 => {
|
||||||
|
command.arg("raidz3");
|
||||||
|
command.args(devices);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
worker.log(format!("# {:?}", command));
|
||||||
|
|
||||||
|
let output = crate::tools::run_command(command, None)?;
|
||||||
|
worker.log(output);
|
||||||
|
|
||||||
|
if let Some(compression) = compression {
|
||||||
|
let mut command = std::process::Command::new("zfs");
|
||||||
|
command.args(&["set", &format!("compression={}", compression), &name]);
|
||||||
|
worker.log(format!("# {:?}", command));
|
||||||
|
let output = crate::tools::run_command(command, None)?;
|
||||||
|
worker.log(output);
|
||||||
|
}
|
||||||
|
|
||||||
|
if add_datastore {
|
||||||
|
let mount_point = format!("/{}", name);
|
||||||
|
crate::api2::config::datastore::create_datastore(json!({ "name": name, "path": mount_point }))?
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(upid_str)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const POOL_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_ZPOOL_DETAILS);
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_ZPOOLS)
|
||||||
|
.post(&API_METHOD_CREATE_ZPOOL)
|
||||||
|
.match_all("name", &POOL_ROUTER);
|
@ -94,7 +94,7 @@ fn get_journal(
|
|||||||
|
|
||||||
let mut lines: Vec<String> = vec![];
|
let mut lines: Vec<String> = vec![];
|
||||||
|
|
||||||
let mut child = Command::new("/usr/bin/mini-journalreader")
|
let mut child = Command::new("mini-journalreader")
|
||||||
.args(&args)
|
.args(&args)
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
.spawn()?;
|
.spawn()?;
|
||||||
|
@ -1,9 +1,47 @@
|
|||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
use serde_json::Value;
|
use serde_json::{Value, json};
|
||||||
|
|
||||||
use proxmox::api::{api, Router};
|
use proxmox::api::{api, Router};
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
|
use crate::tools::epoch_now_f64;
|
||||||
|
use crate::rrd::{extract_cached_data, RRD_DATA_ENTRIES};
|
||||||
|
|
||||||
|
pub fn create_value_from_rrd(
|
||||||
|
basedir: &str,
|
||||||
|
list: &[&str],
|
||||||
|
timeframe: RRDTimeFrameResolution,
|
||||||
|
cf: RRDMode,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let mut result = Vec::new();
|
||||||
|
let now = epoch_now_f64()?;
|
||||||
|
|
||||||
|
for name in list {
|
||||||
|
let (start, reso, list) = match extract_cached_data(basedir, name, now, timeframe, cf) {
|
||||||
|
Some(result) => result,
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut t = start;
|
||||||
|
for index in 0..RRD_DATA_ENTRIES {
|
||||||
|
if result.len() <= index {
|
||||||
|
if let Some(value) = list[index] {
|
||||||
|
result.push(json!({ "time": t, *name: value }));
|
||||||
|
} else {
|
||||||
|
result.push(json!({ "time": t }));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if let Some(value) = list[index] {
|
||||||
|
result[index][name] = value.into();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t += reso;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result.into())
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
@ -27,7 +65,7 @@ fn get_node_stats(
|
|||||||
_param: Value,
|
_param: Value,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
crate::rrd::extract_data(
|
create_value_from_rrd(
|
||||||
"host",
|
"host",
|
||||||
&[
|
&[
|
||||||
"cpu", "iowait",
|
"cpu", "iowait",
|
||||||
|
@ -38,7 +38,7 @@ fn get_full_service_state(service: &str) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let real_service_name = real_service_name(service);
|
let real_service_name = real_service_name(service);
|
||||||
|
|
||||||
let mut child = Command::new("/bin/systemctl")
|
let mut child = Command::new("systemctl")
|
||||||
.args(&["show", real_service_name])
|
.args(&["show", real_service_name])
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
.spawn()?;
|
.spawn()?;
|
||||||
@ -196,7 +196,7 @@ fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let real_service_name = real_service_name(service);
|
let real_service_name = real_service_name(service);
|
||||||
|
|
||||||
let status = Command::new("/bin/systemctl")
|
let status = Command::new("systemctl")
|
||||||
.args(&[cmd, real_service_name])
|
.args(&[cmd, real_service_name])
|
||||||
.status()?;
|
.status()?;
|
||||||
|
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
use anyhow::{Error, format_err, bail};
|
use anyhow::{Error, format_err, bail};
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
@ -60,6 +61,7 @@ fn get_usage(
|
|||||||
|
|
||||||
let meminfo: procfs::ProcFsMemInfo = procfs::read_meminfo()?;
|
let meminfo: procfs::ProcFsMemInfo = procfs::read_meminfo()?;
|
||||||
let kstat: procfs::ProcFsStat = procfs::read_proc_stat()?;
|
let kstat: procfs::ProcFsStat = procfs::read_proc_stat()?;
|
||||||
|
let disk_usage = crate::tools::disks::disk_usage(Path::new("/"))?;
|
||||||
|
|
||||||
Ok(json!({
|
Ok(json!({
|
||||||
"memory": {
|
"memory": {
|
||||||
@ -68,6 +70,11 @@ fn get_usage(
|
|||||||
"free": meminfo.memfree,
|
"free": meminfo.memfree,
|
||||||
},
|
},
|
||||||
"cpu": kstat.cpu,
|
"cpu": kstat.cpu,
|
||||||
|
"root": {
|
||||||
|
"total": disk_usage.total,
|
||||||
|
"used": disk_usage.used,
|
||||||
|
"free": disk_usage.avail,
|
||||||
|
}
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,7 +102,7 @@ fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> {
|
|||||||
NodePowerCommand::Shutdown => "poweroff",
|
NodePowerCommand::Shutdown => "poweroff",
|
||||||
};
|
};
|
||||||
|
|
||||||
let output = Command::new("/bin/systemctl")
|
let output = Command::new("systemctl")
|
||||||
.arg(systemctl_command)
|
.arg(systemctl_command)
|
||||||
.output()
|
.output()
|
||||||
.map_err(|err| format_err!("failed to execute systemctl - {}", err))?;
|
.map_err(|err| format_err!("failed to execute systemctl - {}", err))?;
|
||||||
|
@ -27,7 +27,7 @@ fn dump_journal(
|
|||||||
let start = start.unwrap_or(0);
|
let start = start.unwrap_or(0);
|
||||||
let mut count: u64 = 0;
|
let mut count: u64 = 0;
|
||||||
|
|
||||||
let mut child = Command::new("/bin/journalctl")
|
let mut child = Command::new("journalctl")
|
||||||
.args(&args)
|
.args(&args)
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
.spawn()?;
|
.spawn()?;
|
||||||
|
@ -323,21 +323,9 @@ pub fn list_tasks(
|
|||||||
|
|
||||||
let mut count = 0;
|
let mut count = 0;
|
||||||
|
|
||||||
for info in list.iter() {
|
for info in list {
|
||||||
if !list_all && info.upid.username != username { continue; }
|
if !list_all && info.upid.username != username { continue; }
|
||||||
|
|
||||||
let mut entry = TaskListItem {
|
|
||||||
upid: info.upid_str.clone(),
|
|
||||||
node: "localhost".to_string(),
|
|
||||||
pid: info.upid.pid as i64,
|
|
||||||
pstart: info.upid.pstart,
|
|
||||||
starttime: info.upid.starttime,
|
|
||||||
worker_type: info.upid.worker_type.clone(),
|
|
||||||
worker_id: info.upid.worker_id.clone(),
|
|
||||||
user: info.upid.username.clone(),
|
|
||||||
endtime: None,
|
|
||||||
status: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(username) = userfilter {
|
if let Some(username) = userfilter {
|
||||||
if !info.upid.username.contains(username) { continue; }
|
if !info.upid.username.contains(username) { continue; }
|
||||||
@ -367,9 +355,6 @@ pub fn list_tasks(
|
|||||||
if errors && state.1 == "OK" {
|
if errors && state.1 == "OK" {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
entry.endtime = Some(state.0);
|
|
||||||
entry.status = Some(state.1.clone());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (count as u64) < start {
|
if (count as u64) < start {
|
||||||
@ -379,7 +364,7 @@ pub fn list_tasks(
|
|||||||
count += 1;
|
count += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (result.len() as u64) < limit { result.push(entry); };
|
if (result.len() as u64) < limit { result.push(info.into()); };
|
||||||
}
|
}
|
||||||
|
|
||||||
rpcenv["total"] = Value::from(count);
|
rpcenv["total"] = Value::from(count);
|
||||||
|
@ -17,6 +17,7 @@ use crate::server::{WorkerTask, H2Service};
|
|||||||
use crate::tools;
|
use crate::tools;
|
||||||
use crate::config::acl::PRIV_DATASTORE_READ;
|
use crate::config::acl::PRIV_DATASTORE_READ;
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
|
use crate::api2::helpers;
|
||||||
|
|
||||||
mod environment;
|
mod environment;
|
||||||
use environment::*;
|
use environment::*;
|
||||||
@ -187,26 +188,9 @@ fn download_file(
|
|||||||
path.push(env.backup_dir.relative_path());
|
path.push(env.backup_dir.relative_path());
|
||||||
path.push(&file_name);
|
path.push(&file_name);
|
||||||
|
|
||||||
let path2 = path.clone();
|
env.log(format!("download {:?}", path.clone()));
|
||||||
let path3 = path.clone();
|
|
||||||
|
|
||||||
let file = tokio::fs::File::open(path)
|
helpers::create_download_response(path).await
|
||||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path2, err)))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
env.log(format!("download {:?}", path3));
|
|
||||||
|
|
||||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
|
||||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
|
||||||
|
|
||||||
let body = Body::wrap_stream(payload);
|
|
||||||
|
|
||||||
// fixme: set other headers ?
|
|
||||||
Ok(Response::builder()
|
|
||||||
.status(StatusCode::OK)
|
|
||||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
|
||||||
.body(body)
|
|
||||||
.unwrap())
|
|
||||||
}.boxed()
|
}.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
226
src/api2/status.rs
Normal file
226
src/api2/status.rs
Normal file
@ -0,0 +1,226 @@
|
|||||||
|
use proxmox::list_subdirs_api_method;
|
||||||
|
|
||||||
|
use anyhow::{Error};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
|
use proxmox::api::{
|
||||||
|
api,
|
||||||
|
ApiMethod,
|
||||||
|
Permission,
|
||||||
|
Router,
|
||||||
|
RpcEnvironment,
|
||||||
|
SubdirMap,
|
||||||
|
UserInformation,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::api2::types::{
|
||||||
|
DATASTORE_SCHEMA,
|
||||||
|
RRDMode,
|
||||||
|
RRDTimeFrameResolution,
|
||||||
|
TaskListItem
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::server;
|
||||||
|
use crate::backup::{DataStore};
|
||||||
|
use crate::config::datastore;
|
||||||
|
use crate::tools::epoch_now_f64;
|
||||||
|
use crate::tools::statistics::{linear_regression};
|
||||||
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
|
use crate::config::acl::{
|
||||||
|
PRIV_SYS_AUDIT,
|
||||||
|
PRIV_DATASTORE_AUDIT,
|
||||||
|
PRIV_DATASTORE_BACKUP,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
returns: {
|
||||||
|
description: "Lists the Status of the Datastores.",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
description: "Status of a Datastore",
|
||||||
|
type: Object,
|
||||||
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
total: {
|
||||||
|
type: Integer,
|
||||||
|
description: "The Size of the underlying storage in bytes",
|
||||||
|
},
|
||||||
|
used: {
|
||||||
|
type: Integer,
|
||||||
|
description: "The used bytes of the underlying storage",
|
||||||
|
},
|
||||||
|
avail: {
|
||||||
|
type: Integer,
|
||||||
|
description: "The available bytes of the underlying storage",
|
||||||
|
},
|
||||||
|
history: {
|
||||||
|
type: Array,
|
||||||
|
description: "A list of usages of the past (last Month).",
|
||||||
|
items: {
|
||||||
|
type: Number,
|
||||||
|
description: "The usage of a time in the past. Either null or between 0.0 and 1.0.",
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"estimated-full-date": {
|
||||||
|
type: Integer,
|
||||||
|
optional: true,
|
||||||
|
description: "Estimation of the UNIX epoch when the storage will be full.\
|
||||||
|
This is calculated via a simple Linear Regression (Least Squares)\
|
||||||
|
of RRD data of the last Month. Missing if there are not enough data points yet.\
|
||||||
|
If the estimate lies in the past, the usage is decreasing.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List Datastore usages and estimates
|
||||||
|
fn datastore_status(
|
||||||
|
_param: Value,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let (config, _digest) = datastore::config()?;
|
||||||
|
|
||||||
|
let username = rpcenv.get_user().unwrap();
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
let mut list = Vec::new();
|
||||||
|
|
||||||
|
for (store, (_, _)) in &config.sections {
|
||||||
|
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||||
|
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||||
|
if !allowed {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
let status = crate::tools::disks::disk_usage(&datastore.base_path())?;
|
||||||
|
|
||||||
|
let mut entry = json!({
|
||||||
|
"store": store,
|
||||||
|
"total": status.total,
|
||||||
|
"used": status.used,
|
||||||
|
"avail": status.avail,
|
||||||
|
});
|
||||||
|
|
||||||
|
let rrd_dir = format!("datastore/{}", store);
|
||||||
|
let now = epoch_now_f64()?;
|
||||||
|
let rrd_resolution = RRDTimeFrameResolution::Month;
|
||||||
|
let rrd_mode = RRDMode::Average;
|
||||||
|
|
||||||
|
let total_res = crate::rrd::extract_cached_data(
|
||||||
|
&rrd_dir,
|
||||||
|
"total",
|
||||||
|
now,
|
||||||
|
rrd_resolution,
|
||||||
|
rrd_mode,
|
||||||
|
);
|
||||||
|
|
||||||
|
let used_res = crate::rrd::extract_cached_data(
|
||||||
|
&rrd_dir,
|
||||||
|
"used",
|
||||||
|
now,
|
||||||
|
rrd_resolution,
|
||||||
|
rrd_mode,
|
||||||
|
);
|
||||||
|
|
||||||
|
match (total_res, used_res) {
|
||||||
|
(Some((start, reso, total_list)), Some((_, _, used_list))) => {
|
||||||
|
let mut usage_list: Vec<f64> = Vec::new();
|
||||||
|
let mut time_list: Vec<u64> = Vec::new();
|
||||||
|
let mut history = Vec::new();
|
||||||
|
|
||||||
|
for (idx, used) in used_list.iter().enumerate() {
|
||||||
|
let total = if idx < total_list.len() {
|
||||||
|
total_list[idx]
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
match (total, used) {
|
||||||
|
(Some(total), Some(used)) if total != 0.0 => {
|
||||||
|
time_list.push(start + (idx as u64)*reso);
|
||||||
|
let usage = used/total;
|
||||||
|
usage_list.push(usage);
|
||||||
|
history.push(json!(usage));
|
||||||
|
},
|
||||||
|
_ => {
|
||||||
|
history.push(json!(null))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
entry["history"] = history.into();
|
||||||
|
|
||||||
|
// we skip the calculation for datastores with not enough data
|
||||||
|
if usage_list.len() >= 7 {
|
||||||
|
if let Some((a,b)) = linear_regression(&time_list, &usage_list) {
|
||||||
|
if b != 0.0 {
|
||||||
|
let estimate = (1.0 - a) / b;
|
||||||
|
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ => {},
|
||||||
|
}
|
||||||
|
|
||||||
|
list.push(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(list.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
since: {
|
||||||
|
type: u64,
|
||||||
|
description: "Only list tasks since this UNIX epoch.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "A list of tasks.",
|
||||||
|
type: Array,
|
||||||
|
items: { type: TaskListItem },
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
description: "Users can only see there own tasks, unless the have Sys.Audit on /system/tasks.",
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List tasks.
|
||||||
|
pub fn list_tasks(
|
||||||
|
_param: Value,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<TaskListItem>, Error> {
|
||||||
|
|
||||||
|
let username = rpcenv.get_user().unwrap();
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
let user_privs = user_info.lookup_privs(&username, &["system", "tasks"]);
|
||||||
|
|
||||||
|
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||||
|
|
||||||
|
// TODO: replace with call that gets all task since 'since' epoch
|
||||||
|
let list: Vec<TaskListItem> = server::read_task_list()?
|
||||||
|
.into_iter()
|
||||||
|
.map(TaskListItem::from)
|
||||||
|
.filter(|entry| list_all || entry.user == username)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
Ok(list.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
const SUBDIRS: SubdirMap = &[
|
||||||
|
("datastore-usage", &Router::new().get(&API_METHOD_DATASTORE_STATUS)),
|
||||||
|
("tasks", &Router::new().get(&API_METHOD_LIST_TASKS)),
|
||||||
|
];
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||||
|
.subdirs(SUBDIRS);
|
@ -74,6 +74,8 @@ const_regex!{
|
|||||||
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
||||||
|
|
||||||
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
||||||
|
|
||||||
|
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
||||||
@ -133,6 +135,8 @@ pub const CIDR_V6_FORMAT: ApiStringFormat =
|
|||||||
pub const CIDR_FORMAT: ApiStringFormat =
|
pub const CIDR_FORMAT: ApiStringFormat =
|
||||||
ApiStringFormat::Pattern(&CIDR_REGEX);
|
ApiStringFormat::Pattern(&CIDR_REGEX);
|
||||||
|
|
||||||
|
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
|
||||||
|
|
||||||
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
|
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
|
||||||
.format(&PASSWORD_FORMAT)
|
.format(&PASSWORD_FORMAT)
|
||||||
@ -353,6 +357,11 @@ pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID")
|
|||||||
.max_length(64)
|
.max_length(64)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
|
||||||
|
.format(&BLOCKDEVICE_NAME_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
// Complex type definitions
|
// Complex type definitions
|
||||||
|
|
||||||
@ -419,7 +428,7 @@ pub struct SnapshotListItem {
|
|||||||
pub backup_id: String,
|
pub backup_id: String,
|
||||||
pub backup_time: i64,
|
pub backup_time: i64,
|
||||||
/// List of contained archive files.
|
/// List of contained archive files.
|
||||||
pub files: Vec<String>,
|
pub files: Vec<BackupContent>,
|
||||||
/// Overall snapshot size (sum of all archive sizes).
|
/// Overall snapshot size (sum of all archive sizes).
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub size: Option<u64>,
|
pub size: Option<u64>,
|
||||||
@ -494,6 +503,9 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
|
|||||||
/// Basic information about archive files inside a backup snapshot.
|
/// Basic information about archive files inside a backup snapshot.
|
||||||
pub struct BackupContent {
|
pub struct BackupContent {
|
||||||
pub filename: String,
|
pub filename: String,
|
||||||
|
/// Info if file is encrypted (or empty if we do not have that info)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub encrypted: Option<bool>,
|
||||||
/// Archive size (from backup manifest).
|
/// Archive size (from backup manifest).
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub size: Option<u64>,
|
pub size: Option<u64>,
|
||||||
@ -590,6 +602,27 @@ pub struct TaskListItem {
|
|||||||
pub status: Option<String>,
|
pub status: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<crate::server::TaskListInfo> for TaskListItem {
|
||||||
|
fn from(info: crate::server::TaskListInfo) -> Self {
|
||||||
|
let (endtime, status) = info
|
||||||
|
.state
|
||||||
|
.map_or_else(|| (None, None), |(a,b)| (Some(a), Some(b)));
|
||||||
|
|
||||||
|
TaskListItem {
|
||||||
|
upid: info.upid_str,
|
||||||
|
node: "localhost".to_string(),
|
||||||
|
pid: info.upid.pid as i64,
|
||||||
|
pstart: info.upid.pstart,
|
||||||
|
starttime: info.upid.starttime,
|
||||||
|
worker_type: info.upid.worker_type,
|
||||||
|
worker_id: info.upid.worker_id,
|
||||||
|
user: info.upid.username,
|
||||||
|
endtime,
|
||||||
|
status,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[api()]
|
#[api()]
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "lowercase")]
|
#[serde(rename_all = "lowercase")]
|
||||||
|
@ -10,6 +10,8 @@ use std::path::PathBuf;
|
|||||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||||
use proxmox::try_block;
|
use proxmox::try_block;
|
||||||
|
|
||||||
|
use crate::tools::epoch_now_u64;
|
||||||
|
|
||||||
fn compute_csrf_secret_digest(
|
fn compute_csrf_secret_digest(
|
||||||
timestamp: i64,
|
timestamp: i64,
|
||||||
secret: &[u8],
|
secret: &[u8],
|
||||||
@ -29,8 +31,7 @@ pub fn assemble_csrf_prevention_token(
|
|||||||
username: &str,
|
username: &str,
|
||||||
) -> String {
|
) -> String {
|
||||||
|
|
||||||
let epoch = std::time::SystemTime::now().duration_since(
|
let epoch = epoch_now_u64().unwrap() as i64;
|
||||||
std::time::SystemTime::UNIX_EPOCH).unwrap().as_secs() as i64;
|
|
||||||
|
|
||||||
let digest = compute_csrf_secret_digest(epoch, secret, username);
|
let digest = compute_csrf_secret_digest(epoch, secret, username);
|
||||||
|
|
||||||
@ -67,8 +68,7 @@ pub fn verify_csrf_prevention_token(
|
|||||||
bail!("invalid signature.");
|
bail!("invalid signature.");
|
||||||
}
|
}
|
||||||
|
|
||||||
let now = std::time::SystemTime::now().duration_since(
|
let now = epoch_now_u64()? as i64;
|
||||||
std::time::SystemTime::UNIX_EPOCH)?.as_secs() as i64;
|
|
||||||
|
|
||||||
let age = now - ttime;
|
let age = now - ttime;
|
||||||
if age < min_age {
|
if age < min_age {
|
||||||
|
@ -198,5 +198,11 @@ pub use prune::*;
|
|||||||
mod datastore;
|
mod datastore;
|
||||||
pub use datastore::*;
|
pub use datastore::*;
|
||||||
|
|
||||||
|
mod verify;
|
||||||
|
pub use verify::*;
|
||||||
|
|
||||||
mod catalog_shell;
|
mod catalog_shell;
|
||||||
pub use catalog_shell::*;
|
pub use catalog_shell::*;
|
||||||
|
|
||||||
|
mod async_index_reader;
|
||||||
|
pub use async_index_reader::*;
|
||||||
|
127
src/backup/async_index_reader.rs
Normal file
127
src/backup/async_index_reader.rs
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
use std::future::Future;
|
||||||
|
use std::task::{Poll, Context};
|
||||||
|
use std::pin::Pin;
|
||||||
|
|
||||||
|
use anyhow::Error;
|
||||||
|
use futures::future::FutureExt;
|
||||||
|
use futures::ready;
|
||||||
|
use tokio::io::AsyncRead;
|
||||||
|
|
||||||
|
use proxmox::sys::error::io_err_other;
|
||||||
|
use proxmox::io_format_err;
|
||||||
|
|
||||||
|
use super::IndexFile;
|
||||||
|
use super::read_chunk::AsyncReadChunk;
|
||||||
|
|
||||||
|
enum AsyncIndexReaderState<S> {
|
||||||
|
NoData,
|
||||||
|
WaitForData(Pin<Box<dyn Future<Output = Result<(S, Vec<u8>), Error>> + Send + 'static>>),
|
||||||
|
HaveData(usize),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct AsyncIndexReader<S, I: IndexFile> {
|
||||||
|
store: Option<S>,
|
||||||
|
index: I,
|
||||||
|
read_buffer: Vec<u8>,
|
||||||
|
current_chunk_idx: usize,
|
||||||
|
current_chunk_digest: [u8; 32],
|
||||||
|
state: AsyncIndexReaderState<S>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ok because the only public interfaces operates on &mut Self
|
||||||
|
unsafe impl<S: Sync, I: IndexFile + Sync> Sync for AsyncIndexReader<S, I> {}
|
||||||
|
|
||||||
|
impl<S: AsyncReadChunk, I: IndexFile> AsyncIndexReader<S, I> {
|
||||||
|
pub fn new(index: I, store: S) -> Self {
|
||||||
|
Self {
|
||||||
|
store: Some(store),
|
||||||
|
index,
|
||||||
|
read_buffer: Vec::with_capacity(1024*1024),
|
||||||
|
current_chunk_idx: 0,
|
||||||
|
current_chunk_digest: [0u8; 32],
|
||||||
|
state: AsyncIndexReaderState::NoData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, I> AsyncRead for AsyncIndexReader<S, I> where
|
||||||
|
S: AsyncReadChunk + Unpin + Sync + 'static,
|
||||||
|
I: IndexFile + Unpin
|
||||||
|
{
|
||||||
|
fn poll_read(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context,
|
||||||
|
buf: &mut [u8],
|
||||||
|
) -> Poll<tokio::io::Result<usize>> {
|
||||||
|
let this = Pin::get_mut(self);
|
||||||
|
loop {
|
||||||
|
match &mut this.state {
|
||||||
|
AsyncIndexReaderState::NoData => {
|
||||||
|
if this.current_chunk_idx >= this.index.index_count() {
|
||||||
|
return Poll::Ready(Ok(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
let digest = this
|
||||||
|
.index
|
||||||
|
.index_digest(this.current_chunk_idx)
|
||||||
|
.ok_or(io_format_err!("could not get digest"))?
|
||||||
|
.clone();
|
||||||
|
|
||||||
|
if digest == this.current_chunk_digest {
|
||||||
|
this.state = AsyncIndexReaderState::HaveData(0);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.current_chunk_digest = digest;
|
||||||
|
|
||||||
|
let store = match this.store.take() {
|
||||||
|
Some(store) => store,
|
||||||
|
None => {
|
||||||
|
return Poll::Ready(Err(io_format_err!("could not find store")));
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let future = async move {
|
||||||
|
store.read_chunk(&digest)
|
||||||
|
.await
|
||||||
|
.map(move |x| (store, x))
|
||||||
|
};
|
||||||
|
|
||||||
|
this.state = AsyncIndexReaderState::WaitForData(future.boxed());
|
||||||
|
},
|
||||||
|
AsyncIndexReaderState::WaitForData(ref mut future) => {
|
||||||
|
match ready!(future.as_mut().poll(cx)) {
|
||||||
|
Ok((store, mut chunk_data)) => {
|
||||||
|
this.read_buffer.clear();
|
||||||
|
this.read_buffer.append(&mut chunk_data);
|
||||||
|
this.state = AsyncIndexReaderState::HaveData(0);
|
||||||
|
this.store = Some(store);
|
||||||
|
},
|
||||||
|
Err(err) => {
|
||||||
|
return Poll::Ready(Err(io_err_other(err)));
|
||||||
|
},
|
||||||
|
};
|
||||||
|
},
|
||||||
|
AsyncIndexReaderState::HaveData(offset) => {
|
||||||
|
let offset = *offset;
|
||||||
|
let len = this.read_buffer.len();
|
||||||
|
let n = if len - offset < buf.len() {
|
||||||
|
len - offset
|
||||||
|
} else {
|
||||||
|
buf.len()
|
||||||
|
};
|
||||||
|
|
||||||
|
buf[0..n].copy_from_slice(&this.read_buffer[offset..offset+n]);
|
||||||
|
if offset + n == len {
|
||||||
|
this.state = AsyncIndexReaderState::NoData;
|
||||||
|
this.current_chunk_idx += 1;
|
||||||
|
} else {
|
||||||
|
this.state = AsyncIndexReaderState::HaveData(offset + n);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Poll::Ready(Ok(n));
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -59,17 +59,6 @@ impl BackupGroup {
|
|||||||
&self.backup_id
|
&self.backup_id
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse(path: &str) -> Result<Self, Error> {
|
|
||||||
|
|
||||||
let cap = GROUP_PATH_REGEX.captures(path)
|
|
||||||
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
backup_type: cap.get(1).unwrap().as_str().to_owned(),
|
|
||||||
backup_id: cap.get(2).unwrap().as_str().to_owned(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn group_path(&self) -> PathBuf {
|
pub fn group_path(&self) -> PathBuf {
|
||||||
|
|
||||||
let mut relative_path = PathBuf::new();
|
let mut relative_path = PathBuf::new();
|
||||||
@ -152,6 +141,31 @@ impl BackupGroup {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for BackupGroup {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
let backup_type = self.backup_type();
|
||||||
|
let id = self.backup_id();
|
||||||
|
write!(f, "{}/{}", backup_type, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::str::FromStr for BackupGroup {
|
||||||
|
type Err = Error;
|
||||||
|
|
||||||
|
/// Parse a backup group path
|
||||||
|
///
|
||||||
|
/// This parses strings like `vm/100".
|
||||||
|
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||||
|
let cap = GROUP_PATH_REGEX.captures(path)
|
||||||
|
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
backup_type: cap.get(1).unwrap().as_str().to_owned(),
|
||||||
|
backup_id: cap.get(2).unwrap().as_str().to_owned(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Uniquely identify a Backup (relative to data store)
|
/// Uniquely identify a Backup (relative to data store)
|
||||||
///
|
///
|
||||||
/// We also call this a backup snaphost.
|
/// We also call this a backup snaphost.
|
||||||
@ -188,16 +202,6 @@ impl BackupDir {
|
|||||||
self.backup_time
|
self.backup_time
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse(path: &str) -> Result<Self, Error> {
|
|
||||||
|
|
||||||
let cap = SNAPSHOT_PATH_REGEX.captures(path)
|
|
||||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
|
||||||
|
|
||||||
let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str());
|
|
||||||
let backup_time = cap.get(3).unwrap().as_str().parse::<DateTime<Utc>>()?;
|
|
||||||
Ok(BackupDir::from((group, backup_time.timestamp())))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn relative_path(&self) -> PathBuf {
|
pub fn relative_path(&self) -> PathBuf {
|
||||||
|
|
||||||
let mut relative_path = self.group.group_path();
|
let mut relative_path = self.group.group_path();
|
||||||
@ -212,6 +216,31 @@ impl BackupDir {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl std::str::FromStr for BackupDir {
|
||||||
|
type Err = Error;
|
||||||
|
|
||||||
|
/// Parse a snapshot path
|
||||||
|
///
|
||||||
|
/// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
|
||||||
|
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||||
|
let cap = SNAPSHOT_PATH_REGEX.captures(path)
|
||||||
|
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||||
|
|
||||||
|
let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str());
|
||||||
|
let backup_time = cap.get(3).unwrap().as_str().parse::<DateTime<Utc>>()?;
|
||||||
|
Ok(BackupDir::from((group, backup_time.timestamp())))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for BackupDir {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
let backup_type = self.group.backup_type();
|
||||||
|
let id = self.group.backup_id();
|
||||||
|
let time = Self::backup_time_to_string(self.backup_time);
|
||||||
|
write!(f, "{}/{}/{}", backup_type, id, time)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<(BackupGroup, i64)> for BackupDir {
|
impl From<(BackupGroup, i64)> for BackupDir {
|
||||||
fn from((group, timestamp): (BackupGroup, i64)) -> Self {
|
fn from((group, timestamp): (BackupGroup, i64)) -> Self {
|
||||||
Self { group, backup_time: Utc.timestamp(timestamp, 0) }
|
Self { group, backup_time: Utc.timestamp(timestamp, 0) }
|
||||||
|
@ -1,23 +1,21 @@
|
|||||||
use anyhow::{bail, format_err, Error};
|
|
||||||
use std::fmt;
|
|
||||||
use std::ffi::{CStr, CString, OsStr};
|
|
||||||
use std::os::unix::ffi::OsStrExt;
|
|
||||||
use std::io::{Read, Write, Seek, SeekFrom};
|
|
||||||
use std::convert::TryFrom;
|
use std::convert::TryFrom;
|
||||||
|
use std::ffi::{CStr, CString, OsStr};
|
||||||
|
use std::fmt;
|
||||||
|
use std::io::{Read, Write, Seek, SeekFrom};
|
||||||
|
use std::os::unix::ffi::OsStrExt;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
use chrono::offset::{TimeZone, Local};
|
use chrono::offset::{TimeZone, Local};
|
||||||
|
|
||||||
|
use pathpatterns::{MatchList, MatchType};
|
||||||
use proxmox::tools::io::ReadExt;
|
use proxmox::tools::io::ReadExt;
|
||||||
use proxmox::sys::error::io_err_other;
|
|
||||||
|
|
||||||
use crate::pxar::catalog::BackupCatalogWriter;
|
|
||||||
use crate::pxar::{MatchPattern, MatchPatternSlice, MatchType};
|
|
||||||
use crate::backup::file_formats::PROXMOX_CATALOG_FILE_MAGIC_1_0;
|
use crate::backup::file_formats::PROXMOX_CATALOG_FILE_MAGIC_1_0;
|
||||||
use crate::tools::runtime::block_on;
|
use crate::pxar::catalog::BackupCatalogWriter;
|
||||||
|
|
||||||
#[repr(u8)]
|
#[repr(u8)]
|
||||||
#[derive(Copy,Clone,PartialEq)]
|
#[derive(Copy,Clone,PartialEq)]
|
||||||
enum CatalogEntryType {
|
pub(crate) enum CatalogEntryType {
|
||||||
Directory = b'd',
|
Directory = b'd',
|
||||||
File = b'f',
|
File = b'f',
|
||||||
Symlink = b'l',
|
Symlink = b'l',
|
||||||
@ -46,6 +44,21 @@ impl TryFrom<u8> for CatalogEntryType {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<&DirEntryAttribute> for CatalogEntryType {
|
||||||
|
fn from(value: &DirEntryAttribute) -> Self {
|
||||||
|
match value {
|
||||||
|
DirEntryAttribute::Directory { .. } => CatalogEntryType::Directory,
|
||||||
|
DirEntryAttribute::File { .. } => CatalogEntryType::File,
|
||||||
|
DirEntryAttribute::Symlink => CatalogEntryType::Symlink,
|
||||||
|
DirEntryAttribute::Hardlink => CatalogEntryType::Hardlink,
|
||||||
|
DirEntryAttribute::BlockDevice => CatalogEntryType::BlockDevice,
|
||||||
|
DirEntryAttribute::CharDevice => CatalogEntryType::CharDevice,
|
||||||
|
DirEntryAttribute::Fifo => CatalogEntryType::Fifo,
|
||||||
|
DirEntryAttribute::Socket => CatalogEntryType::Socket,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl fmt::Display for CatalogEntryType {
|
impl fmt::Display for CatalogEntryType {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(f, "{}", char::from(*self as u8))
|
write!(f, "{}", char::from(*self as u8))
|
||||||
@ -63,7 +76,7 @@ pub struct DirEntry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Used to specific additional attributes inside DirEntry
|
/// Used to specific additional attributes inside DirEntry
|
||||||
#[derive(Clone, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub enum DirEntryAttribute {
|
pub enum DirEntryAttribute {
|
||||||
Directory { start: u64 },
|
Directory { start: u64 },
|
||||||
File { size: u64, mtime: u64 },
|
File { size: u64, mtime: u64 },
|
||||||
@ -106,6 +119,23 @@ impl DirEntry {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get file mode bits for this entry to be used with the `MatchList` api.
|
||||||
|
pub fn get_file_mode(&self) -> Option<u32> {
|
||||||
|
Some(
|
||||||
|
match self.attr {
|
||||||
|
DirEntryAttribute::Directory { .. } => pxar::mode::IFDIR,
|
||||||
|
DirEntryAttribute::File { .. } => pxar::mode::IFREG,
|
||||||
|
DirEntryAttribute::Symlink => pxar::mode::IFLNK,
|
||||||
|
DirEntryAttribute::Hardlink => return None,
|
||||||
|
DirEntryAttribute::BlockDevice => pxar::mode::IFBLK,
|
||||||
|
DirEntryAttribute::CharDevice => pxar::mode::IFCHR,
|
||||||
|
DirEntryAttribute::Fifo => pxar::mode::IFIFO,
|
||||||
|
DirEntryAttribute::Socket => pxar::mode::IFSOCK,
|
||||||
|
}
|
||||||
|
as u32
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
/// Check if DirEntry is a directory
|
/// Check if DirEntry is a directory
|
||||||
pub fn is_directory(&self) -> bool {
|
pub fn is_directory(&self) -> bool {
|
||||||
match self.attr {
|
match self.attr {
|
||||||
@ -383,32 +413,6 @@ impl <W: Write> BackupCatalogWriter for CatalogWriter<W> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// fixme: move to somehere else?
|
|
||||||
/// Implement Write to tokio mpsc channel Sender
|
|
||||||
pub struct SenderWriter(tokio::sync::mpsc::Sender<Result<Vec<u8>, Error>>);
|
|
||||||
|
|
||||||
impl SenderWriter {
|
|
||||||
pub fn new(sender: tokio::sync::mpsc::Sender<Result<Vec<u8>, Error>>) -> Self {
|
|
||||||
Self(sender)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Write for SenderWriter {
|
|
||||||
fn write(&mut self, buf: &[u8]) -> Result<usize, std::io::Error> {
|
|
||||||
block_on(async move {
|
|
||||||
self.0
|
|
||||||
.send(Ok(buf.to_vec()))
|
|
||||||
.await
|
|
||||||
.map_err(io_err_other)
|
|
||||||
.and(Ok(buf.len()))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn flush(&mut self) -> Result<(), std::io::Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read Catalog files
|
/// Read Catalog files
|
||||||
pub struct CatalogReader<R> {
|
pub struct CatalogReader<R> {
|
||||||
reader: R,
|
reader: R,
|
||||||
@ -476,7 +480,7 @@ impl <R: Read + Seek> CatalogReader<R> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
parent: &DirEntry,
|
parent: &DirEntry,
|
||||||
filename: &[u8],
|
filename: &[u8],
|
||||||
) -> Result<DirEntry, Error> {
|
) -> Result<Option<DirEntry>, Error> {
|
||||||
|
|
||||||
let start = match parent.attr {
|
let start = match parent.attr {
|
||||||
DirEntryAttribute::Directory { start } => start,
|
DirEntryAttribute::Directory { start } => start,
|
||||||
@ -496,10 +500,7 @@ impl <R: Read + Seek> CatalogReader<R> {
|
|||||||
Ok(false) // stop parsing
|
Ok(false) // stop parsing
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
match item {
|
Ok(item)
|
||||||
None => bail!("no such file"),
|
|
||||||
Some(entry) => Ok(entry),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read the raw directory info block from current reader position.
|
/// Read the raw directory info block from current reader position.
|
||||||
@ -532,7 +533,10 @@ impl <R: Read + Seek> CatalogReader<R> {
|
|||||||
self.dump_dir(&path, pos)?;
|
self.dump_dir(&path, pos)?;
|
||||||
}
|
}
|
||||||
CatalogEntryType::File => {
|
CatalogEntryType::File => {
|
||||||
let dt = Local.timestamp(mtime as i64, 0);
|
let dt = Local
|
||||||
|
.timestamp_opt(mtime as i64, 0)
|
||||||
|
.single() // chrono docs say timestamp_opt can only be None or Single!
|
||||||
|
.unwrap_or_else(|| Local.timestamp(0, 0));
|
||||||
|
|
||||||
println!(
|
println!(
|
||||||
"{} {:?} {} {}",
|
"{} {:?} {} {}",
|
||||||
@ -555,38 +559,30 @@ impl <R: Read + Seek> CatalogReader<R> {
|
|||||||
/// provided callback on them.
|
/// provided callback on them.
|
||||||
pub fn find(
|
pub fn find(
|
||||||
&mut self,
|
&mut self,
|
||||||
mut entry: &mut Vec<DirEntry>,
|
parent: &DirEntry,
|
||||||
pattern: &[MatchPatternSlice],
|
file_path: &mut Vec<u8>,
|
||||||
callback: &Box<fn(&[DirEntry])>,
|
match_list: &impl MatchList, //&[MatchEntry],
|
||||||
|
callback: &mut dyn FnMut(&[u8]) -> Result<(), Error>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let parent = entry.last().unwrap();
|
let file_len = file_path.len();
|
||||||
if !parent.is_directory() {
|
|
||||||
return Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
for e in self.read_dir(parent)? {
|
for e in self.read_dir(parent)? {
|
||||||
match MatchPatternSlice::match_filename_include(
|
let is_dir = e.is_directory();
|
||||||
&CString::new(e.name.clone())?,
|
file_path.truncate(file_len);
|
||||||
e.is_directory(),
|
if !e.name.starts_with(b"/") {
|
||||||
pattern,
|
file_path.reserve(e.name.len() + 1);
|
||||||
)? {
|
file_path.push(b'/');
|
||||||
(MatchType::Positive, _) => {
|
}
|
||||||
entry.push(e);
|
file_path.extend(&e.name);
|
||||||
callback(&entry);
|
match match_list.matches(&file_path, e.get_file_mode()) {
|
||||||
let pattern = MatchPattern::from_line(b"**/*").unwrap().unwrap();
|
Some(MatchType::Exclude) => continue,
|
||||||
let child_pattern = vec![pattern.as_slice()];
|
Some(MatchType::Include) => callback(&file_path)?,
|
||||||
self.find(&mut entry, &child_pattern, callback)?;
|
None => (),
|
||||||
entry.pop();
|
}
|
||||||
}
|
if is_dir {
|
||||||
(MatchType::PartialPositive, child_pattern)
|
self.find(&e, file_path, match_list, callback)?;
|
||||||
| (MatchType::PartialNegative, child_pattern) => {
|
|
||||||
entry.push(e);
|
|
||||||
self.find(&mut entry, &child_pattern, callback)?;
|
|
||||||
entry.pop();
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
file_path.truncate(file_len);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -429,6 +429,10 @@ impl ChunkStore {
|
|||||||
full_path
|
full_path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn name(&self) -> &str {
|
||||||
|
&self.name
|
||||||
|
}
|
||||||
|
|
||||||
pub fn base_path(&self) -> PathBuf {
|
pub fn base_path(&self) -> PathBuf {
|
||||||
self.base.clone()
|
self.base.clone()
|
||||||
}
|
}
|
||||||
|
@ -167,7 +167,7 @@ impl DataBlob {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Decode blob data
|
/// Decode blob data
|
||||||
pub fn decode(self, config: Option<&CryptConfig>) -> Result<Vec<u8>, Error> {
|
pub fn decode(&self, config: Option<&CryptConfig>) -> Result<Vec<u8>, Error> {
|
||||||
|
|
||||||
let magic = self.magic();
|
let magic = self.magic();
|
||||||
|
|
||||||
@ -311,7 +311,9 @@ impl DataBlob {
|
|||||||
/// Verify digest and data length for unencrypted chunks.
|
/// Verify digest and data length for unencrypted chunks.
|
||||||
///
|
///
|
||||||
/// To do that, we need to decompress data first. Please note that
|
/// To do that, we need to decompress data first. Please note that
|
||||||
/// this is not possible for encrypted chunks.
|
/// this is not possible for encrypted chunks. This function simply return Ok
|
||||||
|
/// for encrypted chunks.
|
||||||
|
/// Note: This does not call verify_crc
|
||||||
pub fn verify_unencrypted(
|
pub fn verify_unencrypted(
|
||||||
&self,
|
&self,
|
||||||
expected_chunk_size: usize,
|
expected_chunk_size: usize,
|
||||||
@ -320,22 +322,18 @@ impl DataBlob {
|
|||||||
|
|
||||||
let magic = self.magic();
|
let magic = self.magic();
|
||||||
|
|
||||||
let verify_raw_data = |data: &[u8]| {
|
if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||||
if expected_chunk_size != data.len() {
|
return Ok(());
|
||||||
bail!("detected chunk with wrong length ({} != {})", expected_chunk_size, data.len());
|
}
|
||||||
}
|
|
||||||
let digest = openssl::sha::sha256(data);
|
|
||||||
if &digest != expected_digest {
|
|
||||||
bail!("detected chunk with wrong digest.");
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
};
|
|
||||||
|
|
||||||
if magic == &COMPRESSED_BLOB_MAGIC_1_0 {
|
let data = self.decode(None)?;
|
||||||
let data = zstd::block::decompress(&self.raw_data[12..], 16*1024*1024)?;
|
|
||||||
verify_raw_data(&data)?;
|
if expected_chunk_size != data.len() {
|
||||||
} else if magic == &UNCOMPRESSED_BLOB_MAGIC_1_0 {
|
bail!("detected chunk with wrong length ({} != {})", expected_chunk_size, data.len());
|
||||||
verify_raw_data(&self.raw_data[12..])?;
|
}
|
||||||
|
let digest = openssl::sha::sha256(&data);
|
||||||
|
if &digest != expected_digest {
|
||||||
|
bail!("detected chunk with wrong digest.");
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -19,6 +19,10 @@ pub struct DataBlobReader<R: Read> {
|
|||||||
state: BlobReaderState<R>,
|
state: BlobReaderState<R>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// zstd_safe::DCtx is not sync but we are, since
|
||||||
|
// the only public interface is on mutable reference
|
||||||
|
unsafe impl<R: Read> Sync for DataBlobReader<R> {}
|
||||||
|
|
||||||
impl <R: Read> DataBlobReader<R> {
|
impl <R: Read> DataBlobReader<R> {
|
||||||
|
|
||||||
pub fn new(mut reader: R, config: Option<Arc<CryptConfig>>) -> Result<Self, Error> {
|
pub fn new(mut reader: R, config: Option<Arc<CryptConfig>>) -> Result<Self, Error> {
|
||||||
|
@ -2,6 +2,7 @@ use std::collections::{HashSet, HashMap};
|
|||||||
use std::io::{self, Write};
|
use std::io::{self, Write};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::convert::TryFrom;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
@ -134,6 +135,10 @@ impl DataStore {
|
|||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn name(&self) -> &str {
|
||||||
|
self.chunk_store.name()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn base_path(&self) -> PathBuf {
|
pub fn base_path(&self) -> PathBuf {
|
||||||
self.chunk_store.base_path()
|
self.chunk_store.base_path()
|
||||||
}
|
}
|
||||||
@ -470,4 +475,28 @@ impl DataStore {
|
|||||||
) -> Result<(bool, u64), Error> {
|
) -> Result<(bool, u64), Error> {
|
||||||
self.chunk_store.insert_chunk(chunk, digest)
|
self.chunk_store.insert_chunk(chunk, digest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn verify_stored_chunk(&self, digest: &[u8; 32], expected_chunk_size: u64) -> Result<(), Error> {
|
||||||
|
let blob = self.chunk_store.read_chunk(digest)?;
|
||||||
|
blob.verify_crc()?;
|
||||||
|
blob.verify_unencrypted(expected_chunk_size as usize, digest)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<(DataBlob, u64), Error> {
|
||||||
|
let mut path = self.base_path();
|
||||||
|
path.push(backup_dir.relative_path());
|
||||||
|
path.push(filename);
|
||||||
|
|
||||||
|
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
||||||
|
let raw_size = raw_data.len() as u64;
|
||||||
|
let blob = DataBlob::from_raw(raw_data)?;
|
||||||
|
Ok((blob, raw_size))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load_manifest(&self, backup_dir: &BackupDir) -> Result<(BackupManifest, u64), Error> {
|
||||||
|
let (blob, raw_size) = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
||||||
|
let manifest = BackupManifest::try_from(blob)?;
|
||||||
|
Ok((manifest, raw_size))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,23 +1,28 @@
|
|||||||
use std::convert::TryInto;
|
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io::{BufWriter, Seek, SeekFrom, Write};
|
use std::io::{self, BufWriter, Seek, SeekFrom, Write};
|
||||||
|
use std::ops::Range;
|
||||||
use std::os::unix::io::AsRawFd;
|
use std::os::unix::io::AsRawFd;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::task::Context;
|
||||||
|
use std::pin::Pin;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
use proxmox::tools::io::ReadExt;
|
use proxmox::tools::io::ReadExt;
|
||||||
use proxmox::tools::uuid::Uuid;
|
use proxmox::tools::uuid::Uuid;
|
||||||
use proxmox::tools::vec;
|
use proxmox::tools::vec;
|
||||||
|
use proxmox::tools::mmap::Mmap;
|
||||||
|
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||||
|
|
||||||
use super::chunk_stat::ChunkStat;
|
use super::chunk_stat::ChunkStat;
|
||||||
use super::chunk_store::ChunkStore;
|
use super::chunk_store::ChunkStore;
|
||||||
|
use super::index::ChunkReadInfo;
|
||||||
use super::read_chunk::ReadChunk;
|
use super::read_chunk::ReadChunk;
|
||||||
use super::Chunker;
|
use super::Chunker;
|
||||||
use super::IndexFile;
|
use super::IndexFile;
|
||||||
use super::{DataBlob, DataChunkBuilder};
|
use super::{DataBlob, DataChunkBuilder};
|
||||||
use crate::tools;
|
use crate::tools::{self, epoch_now_u64};
|
||||||
|
|
||||||
/// Header format definition for dynamic index files (`.dixd`)
|
/// Header format definition for dynamic index files (`.dixd`)
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
@ -36,34 +41,34 @@ proxmox::static_assert_size!(DynamicIndexHeader, 4096);
|
|||||||
// pub data: DynamicIndexHeaderData,
|
// pub data: DynamicIndexHeaderData,
|
||||||
// }
|
// }
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
#[repr(C)]
|
||||||
|
pub struct DynamicEntry {
|
||||||
|
end_le: u64,
|
||||||
|
digest: [u8; 32],
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DynamicEntry {
|
||||||
|
#[inline]
|
||||||
|
pub fn end(&self) -> u64 {
|
||||||
|
u64::from_le(self.end_le)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct DynamicIndexReader {
|
pub struct DynamicIndexReader {
|
||||||
_file: File,
|
_file: File,
|
||||||
pub size: usize,
|
pub size: usize,
|
||||||
index: *const u8,
|
index: Mmap<DynamicEntry>,
|
||||||
index_entries: usize,
|
|
||||||
pub uuid: [u8; 16],
|
pub uuid: [u8; 16],
|
||||||
pub ctime: u64,
|
pub ctime: u64,
|
||||||
pub index_csum: [u8; 32],
|
pub index_csum: [u8; 32],
|
||||||
}
|
}
|
||||||
|
|
||||||
// `index` is mmap()ed which cannot be thread-local so should be sendable
|
|
||||||
// FIXME: Introduce an mmap wrapper type for this?
|
|
||||||
unsafe impl Send for DynamicIndexReader {}
|
|
||||||
unsafe impl Sync for DynamicIndexReader {}
|
|
||||||
|
|
||||||
impl Drop for DynamicIndexReader {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if let Err(err) = self.unmap() {
|
|
||||||
eprintln!("Unable to unmap dynamic index - {}", err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DynamicIndexReader {
|
impl DynamicIndexReader {
|
||||||
pub fn open(path: &Path) -> Result<Self, Error> {
|
pub fn open(path: &Path) -> Result<Self, Error> {
|
||||||
File::open(path)
|
File::open(path)
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
.and_then(|file| Self::new(file))
|
.and_then(Self::new)
|
||||||
.map_err(|err| format_err!("Unable to open dynamic index {:?} - {}", path, err))
|
.map_err(|err| format_err!("Unable to open dynamic index {:?} - {}", path, err))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -74,6 +79,7 @@ impl DynamicIndexReader {
|
|||||||
bail!("unable to get shared lock - {}", err);
|
bail!("unable to get shared lock - {}", err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME: This is NOT OUR job! Check the callers of this method and remove this!
|
||||||
file.seek(SeekFrom::Start(0))?;
|
file.seek(SeekFrom::Start(0))?;
|
||||||
|
|
||||||
let header_size = std::mem::size_of::<DynamicIndexHeader>();
|
let header_size = std::mem::size_of::<DynamicIndexHeader>();
|
||||||
@ -93,123 +99,49 @@ impl DynamicIndexReader {
|
|||||||
let size = stat.st_size as usize;
|
let size = stat.st_size as usize;
|
||||||
|
|
||||||
let index_size = size - header_size;
|
let index_size = size - header_size;
|
||||||
if (index_size % 40) != 0 {
|
let index_count = index_size / 40;
|
||||||
|
if index_count * 40 != index_size {
|
||||||
bail!("got unexpected file size");
|
bail!("got unexpected file size");
|
||||||
}
|
}
|
||||||
|
|
||||||
let data = unsafe {
|
let index = unsafe {
|
||||||
nix::sys::mman::mmap(
|
Mmap::map_fd(
|
||||||
std::ptr::null_mut(),
|
rawfd,
|
||||||
index_size,
|
header_size as u64,
|
||||||
|
index_count,
|
||||||
nix::sys::mman::ProtFlags::PROT_READ,
|
nix::sys::mman::ProtFlags::PROT_READ,
|
||||||
nix::sys::mman::MapFlags::MAP_PRIVATE,
|
nix::sys::mman::MapFlags::MAP_PRIVATE,
|
||||||
rawfd,
|
)?
|
||||||
header_size as i64,
|
};
|
||||||
)
|
|
||||||
}? as *const u8;
|
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
_file: file,
|
_file: file,
|
||||||
size,
|
size,
|
||||||
index: data,
|
index,
|
||||||
index_entries: index_size / 40,
|
|
||||||
ctime,
|
ctime,
|
||||||
uuid: header.uuid,
|
uuid: header.uuid,
|
||||||
index_csum: header.index_csum,
|
index_csum: header.index_csum,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn unmap(&mut self) -> Result<(), Error> {
|
|
||||||
if self.index == std::ptr::null_mut() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Err(err) = unsafe {
|
|
||||||
nix::sys::mman::munmap(self.index as *mut std::ffi::c_void, self.index_entries * 40)
|
|
||||||
} {
|
|
||||||
bail!("unmap dynamic index failed - {}", err);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.index = std::ptr::null_mut();
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::cast_ptr_alignment)]
|
|
||||||
pub fn chunk_info(&self, pos: usize) -> Result<(u64, u64, [u8; 32]), Error> {
|
|
||||||
if pos >= self.index_entries {
|
|
||||||
bail!("chunk index out of range");
|
|
||||||
}
|
|
||||||
let start = if pos == 0 {
|
|
||||||
0
|
|
||||||
} else {
|
|
||||||
unsafe { *(self.index.add((pos - 1) * 40) as *const u64) }
|
|
||||||
};
|
|
||||||
|
|
||||||
let end = unsafe { *(self.index.add(pos * 40) as *const u64) };
|
|
||||||
|
|
||||||
let mut digest = std::mem::MaybeUninit::<[u8; 32]>::uninit();
|
|
||||||
unsafe {
|
|
||||||
std::ptr::copy_nonoverlapping(
|
|
||||||
self.index.add(pos * 40 + 8),
|
|
||||||
(*digest.as_mut_ptr()).as_mut_ptr(),
|
|
||||||
32,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((start, end, unsafe { digest.assume_init() }))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
#[allow(clippy::cast_ptr_alignment)]
|
#[allow(clippy::cast_ptr_alignment)]
|
||||||
fn chunk_end(&self, pos: usize) -> u64 {
|
fn chunk_end(&self, pos: usize) -> u64 {
|
||||||
if pos >= self.index_entries {
|
if pos >= self.index.len() {
|
||||||
panic!("chunk index out of range");
|
panic!("chunk index out of range");
|
||||||
}
|
}
|
||||||
unsafe { *(self.index.add(pos * 40) as *const u64) }
|
self.index[pos].end()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn chunk_digest(&self, pos: usize) -> &[u8; 32] {
|
fn chunk_digest(&self, pos: usize) -> &[u8; 32] {
|
||||||
if pos >= self.index_entries {
|
if pos >= self.index.len() {
|
||||||
panic!("chunk index out of range");
|
panic!("chunk index out of range");
|
||||||
}
|
}
|
||||||
let slice = unsafe { std::slice::from_raw_parts(self.index.add(pos * 40 + 8), 32) };
|
&self.index[pos].digest
|
||||||
slice.try_into().unwrap()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Compute checksum and data size
|
// TODO: can we use std::slice::binary_search with Mmap now?
|
||||||
pub fn compute_csum(&self) -> ([u8; 32], u64) {
|
|
||||||
let mut csum = openssl::sha::Sha256::new();
|
|
||||||
let mut chunk_end = 0;
|
|
||||||
for pos in 0..self.index_entries {
|
|
||||||
chunk_end = self.chunk_end(pos);
|
|
||||||
let digest = self.chunk_digest(pos);
|
|
||||||
csum.update(&chunk_end.to_le_bytes());
|
|
||||||
csum.update(digest);
|
|
||||||
}
|
|
||||||
let csum = csum.finish();
|
|
||||||
|
|
||||||
(csum, chunk_end)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
pub fn dump_pxar(&self, mut writer: Box<dyn Write>) -> Result<(), Error> {
|
|
||||||
|
|
||||||
for pos in 0..self.index_entries {
|
|
||||||
let _end = self.chunk_end(pos);
|
|
||||||
let digest = self.chunk_digest(pos);
|
|
||||||
//println!("Dump {:08x}", end );
|
|
||||||
let chunk = self.store.read_chunk(digest)?;
|
|
||||||
// fimxe: handle encrypted chunks
|
|
||||||
let data = chunk.decode(None)?;
|
|
||||||
writer.write_all(&data)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
fn binary_search(
|
fn binary_search(
|
||||||
&self,
|
&self,
|
||||||
start_idx: usize,
|
start_idx: usize,
|
||||||
@ -238,11 +170,11 @@ impl DynamicIndexReader {
|
|||||||
|
|
||||||
impl IndexFile for DynamicIndexReader {
|
impl IndexFile for DynamicIndexReader {
|
||||||
fn index_count(&self) -> usize {
|
fn index_count(&self) -> usize {
|
||||||
self.index_entries
|
self.index.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn index_digest(&self, pos: usize) -> Option<&[u8; 32]> {
|
fn index_digest(&self, pos: usize) -> Option<&[u8; 32]> {
|
||||||
if pos >= self.index_entries {
|
if pos >= self.index.len() {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some(unsafe { std::mem::transmute(self.chunk_digest(pos).as_ptr()) })
|
Some(unsafe { std::mem::transmute(self.chunk_digest(pos).as_ptr()) })
|
||||||
@ -250,12 +182,59 @@ impl IndexFile for DynamicIndexReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn index_bytes(&self) -> u64 {
|
fn index_bytes(&self) -> u64 {
|
||||||
if self.index_entries == 0 {
|
if self.index.is_empty() {
|
||||||
0
|
0
|
||||||
} else {
|
} else {
|
||||||
self.chunk_end((self.index_entries - 1) as usize)
|
self.chunk_end(self.index.len() - 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||||
|
let mut csum = openssl::sha::Sha256::new();
|
||||||
|
let mut chunk_end = 0;
|
||||||
|
for pos in 0..self.index_count() {
|
||||||
|
let info = self.chunk_info(pos).unwrap();
|
||||||
|
chunk_end = info.range.end;
|
||||||
|
csum.update(&chunk_end.to_le_bytes());
|
||||||
|
csum.update(&info.digest);
|
||||||
|
}
|
||||||
|
let csum = csum.finish();
|
||||||
|
(csum, chunk_end)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::cast_ptr_alignment)]
|
||||||
|
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo> {
|
||||||
|
if pos >= self.index.len() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let start = if pos == 0 { 0 } else { self.index[pos - 1].end() };
|
||||||
|
|
||||||
|
let end = self.index[pos].end();
|
||||||
|
|
||||||
|
Some(ChunkReadInfo {
|
||||||
|
range: start..end,
|
||||||
|
digest: self.index[pos].digest.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct CachedChunk {
|
||||||
|
range: Range<u64>,
|
||||||
|
data: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CachedChunk {
|
||||||
|
/// Perform sanity checks on the range and data size:
|
||||||
|
pub fn new(range: Range<u64>, data: Vec<u8>) -> Result<Self, Error> {
|
||||||
|
if data.len() as u64 != range.end - range.start {
|
||||||
|
bail!(
|
||||||
|
"read chunk with wrong size ({} != {})",
|
||||||
|
data.len(),
|
||||||
|
range.end - range.start,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(Self { range, data })
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct BufferedDynamicReader<S> {
|
pub struct BufferedDynamicReader<S> {
|
||||||
@ -266,7 +245,7 @@ pub struct BufferedDynamicReader<S> {
|
|||||||
buffered_chunk_idx: usize,
|
buffered_chunk_idx: usize,
|
||||||
buffered_chunk_start: u64,
|
buffered_chunk_start: u64,
|
||||||
read_offset: u64,
|
read_offset: u64,
|
||||||
lru_cache: crate::tools::lru_cache::LruCache<usize, (u64, u64, Vec<u8>)>,
|
lru_cache: crate::tools::lru_cache::LruCache<usize, CachedChunk>,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ChunkCacher<'a, S> {
|
struct ChunkCacher<'a, S> {
|
||||||
@ -274,16 +253,21 @@ struct ChunkCacher<'a, S> {
|
|||||||
index: &'a DynamicIndexReader,
|
index: &'a DynamicIndexReader,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, S: ReadChunk> crate::tools::lru_cache::Cacher<usize, (u64, u64, Vec<u8>)> for ChunkCacher<'a, S> {
|
impl<'a, S: ReadChunk> crate::tools::lru_cache::Cacher<usize, CachedChunk> for ChunkCacher<'a, S> {
|
||||||
fn fetch(&mut self, index: usize) -> Result<Option<(u64, u64, Vec<u8>)>, anyhow::Error> {
|
fn fetch(&mut self, index: usize) -> Result<Option<CachedChunk>, Error> {
|
||||||
let (start, end, digest) = self.index.chunk_info(index)?;
|
let info = match self.index.chunk_info(index) {
|
||||||
self.store.read_chunk(&digest).and_then(|data| Ok(Some((start, end, data))))
|
Some(info) => info,
|
||||||
|
None => bail!("chunk index out of range"),
|
||||||
|
};
|
||||||
|
let range = info.range;
|
||||||
|
let data = self.store.read_chunk(&info.digest)?;
|
||||||
|
CachedChunk::new(range, data).map(Some)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S: ReadChunk> BufferedDynamicReader<S> {
|
impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||||
pub fn new(index: DynamicIndexReader, store: S) -> Self {
|
pub fn new(index: DynamicIndexReader, store: S) -> Self {
|
||||||
let archive_size = index.chunk_end(index.index_entries - 1);
|
let archive_size = index.index_bytes();
|
||||||
Self {
|
Self {
|
||||||
store,
|
store,
|
||||||
index,
|
index,
|
||||||
@ -301,7 +285,8 @@ impl<S: ReadChunk> BufferedDynamicReader<S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
||||||
let (start, end, data) = self.lru_cache.access(
|
//let (start, end, data) = self.lru_cache.access(
|
||||||
|
let cached_chunk = self.lru_cache.access(
|
||||||
idx,
|
idx,
|
||||||
&mut ChunkCacher {
|
&mut ChunkCacher {
|
||||||
store: &mut self.store,
|
store: &mut self.store,
|
||||||
@ -309,21 +294,13 @@ impl<S: ReadChunk> BufferedDynamicReader<S> {
|
|||||||
},
|
},
|
||||||
)?.ok_or_else(|| format_err!("chunk not found by cacher"))?;
|
)?.ok_or_else(|| format_err!("chunk not found by cacher"))?;
|
||||||
|
|
||||||
if (*end - *start) != data.len() as u64 {
|
|
||||||
bail!(
|
|
||||||
"read chunk with wrong size ({} != {}",
|
|
||||||
(*end - *start),
|
|
||||||
data.len()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// fixme: avoid copy
|
// fixme: avoid copy
|
||||||
self.read_buffer.clear();
|
self.read_buffer.clear();
|
||||||
self.read_buffer.extend_from_slice(&data);
|
self.read_buffer.extend_from_slice(&cached_chunk.data);
|
||||||
|
|
||||||
self.buffered_chunk_idx = idx;
|
self.buffered_chunk_idx = idx;
|
||||||
|
|
||||||
self.buffered_chunk_start = *start;
|
self.buffered_chunk_start = cached_chunk.range.start;
|
||||||
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -340,7 +317,7 @@ impl<S: ReadChunk> crate::tools::BufferedRead for BufferedDynamicReader<S> {
|
|||||||
|
|
||||||
// optimization for sequential read
|
// optimization for sequential read
|
||||||
if buffer_len > 0
|
if buffer_len > 0
|
||||||
&& ((self.buffered_chunk_idx + 1) < index.index_entries)
|
&& ((self.buffered_chunk_idx + 1) < index.index.len())
|
||||||
&& (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
&& (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||||
{
|
{
|
||||||
let next_idx = self.buffered_chunk_idx + 1;
|
let next_idx = self.buffered_chunk_idx + 1;
|
||||||
@ -356,7 +333,7 @@ impl<S: ReadChunk> crate::tools::BufferedRead for BufferedDynamicReader<S> {
|
|||||||
|| (offset < self.buffered_chunk_start)
|
|| (offset < self.buffered_chunk_start)
|
||||||
|| (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
|| (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||||
{
|
{
|
||||||
let end_idx = index.index_entries - 1;
|
let end_idx = index.index.len() - 1;
|
||||||
let end = index.chunk_end(end_idx);
|
let end = index.chunk_end(end_idx);
|
||||||
let idx = index.binary_search(0, 0, end_idx, end, offset)?;
|
let idx = index.binary_search(0, 0, end_idx, end, offset)?;
|
||||||
self.buffer_chunk(idx)?;
|
self.buffer_chunk(idx)?;
|
||||||
@ -383,9 +360,7 @@ impl<S: ReadChunk> std::io::Read for BufferedDynamicReader<S> {
|
|||||||
data.len()
|
data.len()
|
||||||
};
|
};
|
||||||
|
|
||||||
unsafe {
|
buf[0..n].copy_from_slice(&data[0..n]);
|
||||||
std::ptr::copy_nonoverlapping(data.as_ptr(), buf.as_mut_ptr(), n);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.read_offset += n as u64;
|
self.read_offset += n as u64;
|
||||||
|
|
||||||
@ -417,6 +392,49 @@ impl<S: ReadChunk> std::io::Seek for BufferedDynamicReader<S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
||||||
|
/// async use!
|
||||||
|
///
|
||||||
|
/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
|
||||||
|
/// so that we can properly access it from multiple threads simultaneously while not issuing
|
||||||
|
/// duplicate simultaneous reads over http.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct LocalDynamicReadAt<R: ReadChunk> {
|
||||||
|
inner: Arc<Mutex<BufferedDynamicReader<R>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: ReadChunk> LocalDynamicReadAt<R> {
|
||||||
|
pub fn new(inner: BufferedDynamicReader<R>) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: Arc::new(Mutex::new(inner)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: ReadChunk> ReadAt for LocalDynamicReadAt<R> {
|
||||||
|
fn start_read_at<'a>(
|
||||||
|
self: Pin<&'a Self>,
|
||||||
|
_cx: &mut Context,
|
||||||
|
buf: &'a mut [u8],
|
||||||
|
offset: u64,
|
||||||
|
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||||
|
use std::io::Read;
|
||||||
|
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
||||||
|
let mut reader = self.inner.lock().unwrap();
|
||||||
|
reader.seek(SeekFrom::Start(offset))?;
|
||||||
|
Ok(reader.read(buf)?)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_complete<'a>(
|
||||||
|
self: Pin<&'a Self>,
|
||||||
|
_op: ReadAtOperation<'a>,
|
||||||
|
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||||
|
panic!("LocalDynamicReadAt::start_read_at returned Pending");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Create dynamic index files (`.dixd`)
|
/// Create dynamic index files (`.dixd`)
|
||||||
pub struct DynamicIndexWriter {
|
pub struct DynamicIndexWriter {
|
||||||
store: Arc<ChunkStore>,
|
store: Arc<ChunkStore>,
|
||||||
@ -460,9 +478,7 @@ impl DynamicIndexWriter {
|
|||||||
panic!("got unexpected header size");
|
panic!("got unexpected header size");
|
||||||
}
|
}
|
||||||
|
|
||||||
let ctime = std::time::SystemTime::now()
|
let ctime = epoch_now_u64()?;
|
||||||
.duration_since(std::time::SystemTime::UNIX_EPOCH)?
|
|
||||||
.as_secs();
|
|
||||||
|
|
||||||
let uuid = Uuid::generate();
|
let uuid = Uuid::generate();
|
||||||
|
|
||||||
|
@ -1,11 +1,10 @@
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use std::convert::TryInto;
|
|
||||||
use std::io::{Seek, SeekFrom};
|
use std::io::{Seek, SeekFrom};
|
||||||
|
|
||||||
use super::chunk_stat::*;
|
use super::chunk_stat::*;
|
||||||
use super::chunk_store::*;
|
use super::chunk_store::*;
|
||||||
use super::IndexFile;
|
use super::{IndexFile, ChunkReadInfo};
|
||||||
use crate::tools;
|
use crate::tools::{self, epoch_now_u64};
|
||||||
|
|
||||||
use chrono::{Local, TimeZone};
|
use chrono::{Local, TimeZone};
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
@ -147,38 +146,6 @@ impl FixedIndexReader {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn chunk_info(&self, pos: usize) -> Result<(u64, u64, [u8; 32]), Error> {
|
|
||||||
if pos >= self.index_length {
|
|
||||||
bail!("chunk index out of range");
|
|
||||||
}
|
|
||||||
let start = (pos * self.chunk_size) as u64;
|
|
||||||
let mut end = start + self.chunk_size as u64;
|
|
||||||
|
|
||||||
if end > self.size {
|
|
||||||
end = self.size;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut digest = std::mem::MaybeUninit::<[u8; 32]>::uninit();
|
|
||||||
unsafe {
|
|
||||||
std::ptr::copy_nonoverlapping(
|
|
||||||
self.index.add(pos * 32),
|
|
||||||
(*digest.as_mut_ptr()).as_mut_ptr(),
|
|
||||||
32,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((start, end, unsafe { digest.assume_init() }))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn chunk_digest(&self, pos: usize) -> &[u8; 32] {
|
|
||||||
if pos >= self.index_length {
|
|
||||||
panic!("chunk index out of range");
|
|
||||||
}
|
|
||||||
let slice = unsafe { std::slice::from_raw_parts(self.index.add(pos * 32), 32) };
|
|
||||||
slice.try_into().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn chunk_end(&self, pos: usize) -> u64 {
|
fn chunk_end(&self, pos: usize) -> u64 {
|
||||||
if pos >= self.index_length {
|
if pos >= self.index_length {
|
||||||
@ -193,20 +160,6 @@ impl FixedIndexReader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Compute checksum and data size
|
|
||||||
pub fn compute_csum(&self) -> ([u8; 32], u64) {
|
|
||||||
let mut csum = openssl::sha::Sha256::new();
|
|
||||||
let mut chunk_end = 0;
|
|
||||||
for pos in 0..self.index_length {
|
|
||||||
chunk_end = self.chunk_end(pos);
|
|
||||||
let digest = self.chunk_digest(pos);
|
|
||||||
csum.update(digest);
|
|
||||||
}
|
|
||||||
let csum = csum.finish();
|
|
||||||
|
|
||||||
(csum, chunk_end)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn print_info(&self) {
|
pub fn print_info(&self) {
|
||||||
println!("Size: {}", self.size);
|
println!("Size: {}", self.size);
|
||||||
println!("ChunkSize: {}", self.chunk_size);
|
println!("ChunkSize: {}", self.chunk_size);
|
||||||
@ -234,6 +187,38 @@ impl IndexFile for FixedIndexReader {
|
|||||||
fn index_bytes(&self) -> u64 {
|
fn index_bytes(&self) -> u64 {
|
||||||
self.size
|
self.size
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo> {
|
||||||
|
if pos >= self.index_length {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let start = (pos * self.chunk_size) as u64;
|
||||||
|
let mut end = start + self.chunk_size as u64;
|
||||||
|
|
||||||
|
if end > self.size {
|
||||||
|
end = self.size;
|
||||||
|
}
|
||||||
|
|
||||||
|
let digest = self.index_digest(pos).unwrap();
|
||||||
|
Some(ChunkReadInfo {
|
||||||
|
range: start..end,
|
||||||
|
digest: *digest,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||||
|
let mut csum = openssl::sha::Sha256::new();
|
||||||
|
let mut chunk_end = 0;
|
||||||
|
for pos in 0..self.index_count() {
|
||||||
|
let info = self.chunk_info(pos).unwrap();
|
||||||
|
chunk_end = info.range.end;
|
||||||
|
csum.update(&info.digest);
|
||||||
|
}
|
||||||
|
let csum = csum.finish();
|
||||||
|
|
||||||
|
(csum, chunk_end)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct FixedIndexWriter {
|
pub struct FixedIndexWriter {
|
||||||
@ -290,9 +275,7 @@ impl FixedIndexWriter {
|
|||||||
panic!("got unexpected header size");
|
panic!("got unexpected header size");
|
||||||
}
|
}
|
||||||
|
|
||||||
let ctime = std::time::SystemTime::now()
|
let ctime = epoch_now_u64()?;
|
||||||
.duration_since(std::time::SystemTime::UNIX_EPOCH)?
|
|
||||||
.as_secs();
|
|
||||||
|
|
||||||
let uuid = Uuid::generate();
|
let uuid = Uuid::generate();
|
||||||
|
|
||||||
@ -469,6 +452,18 @@ impl FixedIndexWriter {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn clone_data_from(&mut self, reader: &FixedIndexReader) -> Result<(), Error> {
|
||||||
|
if self.index_length != reader.index_count() {
|
||||||
|
bail!("clone_data_from failed - index sizes not equal");
|
||||||
|
}
|
||||||
|
|
||||||
|
for i in 0..self.index_length {
|
||||||
|
self.add_digest(i, reader.index_digest(i).unwrap())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct BufferedFixedReader<S> {
|
pub struct BufferedFixedReader<S> {
|
||||||
@ -501,18 +496,17 @@ impl<S: ReadChunk> BufferedFixedReader<S> {
|
|||||||
|
|
||||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
||||||
let index = &self.index;
|
let index = &self.index;
|
||||||
let (start, end, digest) = index.chunk_info(idx)?;
|
let info = match index.chunk_info(idx) {
|
||||||
|
Some(info) => info,
|
||||||
|
None => bail!("chunk index out of range"),
|
||||||
|
};
|
||||||
|
|
||||||
// fixme: avoid copy
|
// fixme: avoid copy
|
||||||
|
|
||||||
let data = self.store.read_chunk(&digest)?;
|
let data = self.store.read_chunk(&info.digest)?;
|
||||||
|
let size = info.range.end - info.range.start;
|
||||||
if (end - start) != data.len() as u64 {
|
if size != data.len() as u64 {
|
||||||
bail!(
|
bail!("read chunk with wrong size ({} != {}", size, data.len());
|
||||||
"read chunk with wrong size ({} != {}",
|
|
||||||
(end - start),
|
|
||||||
data.len()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
self.read_buffer.clear();
|
self.read_buffer.clear();
|
||||||
@ -520,8 +514,7 @@ impl<S: ReadChunk> BufferedFixedReader<S> {
|
|||||||
|
|
||||||
self.buffered_chunk_idx = idx;
|
self.buffered_chunk_idx = idx;
|
||||||
|
|
||||||
self.buffered_chunk_start = start as u64;
|
self.buffered_chunk_start = info.range.start as u64;
|
||||||
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,17 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::pin::Pin;
|
use std::ops::Range;
|
||||||
use std::task::{Context, Poll};
|
|
||||||
|
|
||||||
use bytes::{Bytes, BytesMut};
|
pub struct ChunkReadInfo {
|
||||||
use anyhow::{format_err, Error};
|
pub range: Range<u64>,
|
||||||
use futures::*;
|
pub digest: [u8; 32],
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ChunkReadInfo {
|
||||||
|
#[inline]
|
||||||
|
pub fn size(&self) -> u64 {
|
||||||
|
self.range.end - self.range.start
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Trait to get digest list from index files
|
/// Trait to get digest list from index files
|
||||||
///
|
///
|
||||||
@ -13,6 +20,10 @@ pub trait IndexFile {
|
|||||||
fn index_count(&self) -> usize;
|
fn index_count(&self) -> usize;
|
||||||
fn index_digest(&self, pos: usize) -> Option<&[u8; 32]>;
|
fn index_digest(&self, pos: usize) -> Option<&[u8; 32]>;
|
||||||
fn index_bytes(&self) -> u64;
|
fn index_bytes(&self) -> u64;
|
||||||
|
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo>;
|
||||||
|
|
||||||
|
/// Compute index checksum and size
|
||||||
|
fn compute_csum(&self) -> ([u8; 32], u64);
|
||||||
|
|
||||||
/// Returns most often used chunks
|
/// Returns most often used chunks
|
||||||
fn find_most_used_chunks(&self, max: usize) -> HashMap<[u8; 32], usize> {
|
fn find_most_used_chunks(&self, max: usize) -> HashMap<[u8; 32], usize> {
|
||||||
@ -46,111 +57,3 @@ pub trait IndexFile {
|
|||||||
map
|
map
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Encode digest list from an `IndexFile` into a binary stream
|
|
||||||
///
|
|
||||||
/// The reader simply returns a birary stream of 32 byte digest values.
|
|
||||||
pub struct DigestListEncoder {
|
|
||||||
index: Box<dyn IndexFile + Send + Sync>,
|
|
||||||
pos: usize,
|
|
||||||
count: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DigestListEncoder {
|
|
||||||
|
|
||||||
pub fn new(index: Box<dyn IndexFile + Send + Sync>) -> Self {
|
|
||||||
let count = index.index_count();
|
|
||||||
Self { index, pos: 0, count }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::io::Read for DigestListEncoder {
|
|
||||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
|
||||||
if buf.len() < 32 {
|
|
||||||
panic!("read buffer too small");
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.pos < self.count {
|
|
||||||
let mut written = 0;
|
|
||||||
loop {
|
|
||||||
let digest = self.index.index_digest(self.pos).unwrap();
|
|
||||||
buf[written..(written + 32)].copy_from_slice(digest);
|
|
||||||
self.pos += 1;
|
|
||||||
written += 32;
|
|
||||||
if self.pos >= self.count {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (written + 32) >= buf.len() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(written)
|
|
||||||
} else {
|
|
||||||
Ok(0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Decodes a Stream<Item=Bytes> into Stream<Item=<[u8;32]>
|
|
||||||
///
|
|
||||||
/// The reader simply returns a birary stream of 32 byte digest values.
|
|
||||||
|
|
||||||
pub struct DigestListDecoder<S: Unpin> {
|
|
||||||
input: S,
|
|
||||||
buffer: BytesMut,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: Unpin> DigestListDecoder<S> {
|
|
||||||
pub fn new(input: S) -> Self {
|
|
||||||
Self { input, buffer: BytesMut::new() }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: Unpin> Unpin for DigestListDecoder<S> {}
|
|
||||||
|
|
||||||
impl<S: Unpin, E> Stream for DigestListDecoder<S>
|
|
||||||
where
|
|
||||||
S: Stream<Item=Result<Bytes, E>>,
|
|
||||||
E: Into<Error>,
|
|
||||||
{
|
|
||||||
type Item = Result<[u8; 32], Error>;
|
|
||||||
|
|
||||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
|
||||||
let this = self.get_mut();
|
|
||||||
|
|
||||||
loop {
|
|
||||||
if this.buffer.len() >= 32 {
|
|
||||||
let left = this.buffer.split_to(32);
|
|
||||||
|
|
||||||
let mut digest = std::mem::MaybeUninit::<[u8; 32]>::uninit();
|
|
||||||
unsafe {
|
|
||||||
(*digest.as_mut_ptr()).copy_from_slice(&left[..]);
|
|
||||||
return Poll::Ready(Some(Ok(digest.assume_init())));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
match Pin::new(&mut this.input).poll_next(cx) {
|
|
||||||
Poll::Pending => {
|
|
||||||
return Poll::Pending;
|
|
||||||
}
|
|
||||||
Poll::Ready(Some(Err(err))) => {
|
|
||||||
return Poll::Ready(Some(Err(err.into())));
|
|
||||||
}
|
|
||||||
Poll::Ready(Some(Ok(data))) => {
|
|
||||||
this.buffer.extend_from_slice(&data);
|
|
||||||
// continue
|
|
||||||
}
|
|
||||||
Poll::Ready(None) => {
|
|
||||||
let rest = this.buffer.len();
|
|
||||||
if rest == 0 {
|
|
||||||
return Poll::Ready(None);
|
|
||||||
}
|
|
||||||
return Poll::Ready(Some(Err(format_err!(
|
|
||||||
"got small digest ({} != 32).",
|
|
||||||
rest,
|
|
||||||
))));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -11,6 +11,7 @@ pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
|||||||
|
|
||||||
pub struct FileInfo {
|
pub struct FileInfo {
|
||||||
pub filename: String,
|
pub filename: String,
|
||||||
|
pub encrypted: Option<bool>,
|
||||||
pub size: u64,
|
pub size: u64,
|
||||||
pub csum: [u8; 32],
|
pub csum: [u8; 32],
|
||||||
}
|
}
|
||||||
@ -48,9 +49,9 @@ impl BackupManifest {
|
|||||||
Self { files: Vec::new(), snapshot }
|
Self { files: Vec::new(), snapshot }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32]) -> Result<(), Error> {
|
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32], encrypted: Option<bool>) -> Result<(), Error> {
|
||||||
let _archive_type = archive_type(&filename)?; // check type
|
let _archive_type = archive_type(&filename)?; // check type
|
||||||
self.files.push(FileInfo { filename, size, csum });
|
self.files.push(FileInfo { filename, size, csum, encrypted });
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -90,11 +91,18 @@ impl BackupManifest {
|
|||||||
"backup-time": self.snapshot.backup_time().timestamp(),
|
"backup-time": self.snapshot.backup_time().timestamp(),
|
||||||
"files": self.files.iter()
|
"files": self.files.iter()
|
||||||
.fold(Vec::new(), |mut acc, info| {
|
.fold(Vec::new(), |mut acc, info| {
|
||||||
acc.push(json!({
|
let mut value = json!({
|
||||||
"filename": info.filename,
|
"filename": info.filename,
|
||||||
|
"encrypted": info.encrypted,
|
||||||
"size": info.size,
|
"size": info.size,
|
||||||
"csum": proxmox::tools::digest_to_hex(&info.csum),
|
"csum": proxmox::tools::digest_to_hex(&info.csum),
|
||||||
}));
|
});
|
||||||
|
|
||||||
|
if let Some(encrypted) = info.encrypted {
|
||||||
|
value["encrypted"] = encrypted.into();
|
||||||
|
}
|
||||||
|
|
||||||
|
acc.push(value);
|
||||||
acc
|
acc
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -134,7 +142,8 @@ impl TryFrom<Value> for BackupManifest {
|
|||||||
let csum = required_string_property(item, "csum")?;
|
let csum = required_string_property(item, "csum")?;
|
||||||
let csum = proxmox::tools::hex_to_digest(csum)?;
|
let csum = proxmox::tools::hex_to_digest(csum)?;
|
||||||
let size = required_integer_property(item, "size")? as u64;
|
let size = required_integer_property(item, "size")? as u64;
|
||||||
manifest.add_file(filename, size, csum)?;
|
let encrypted = item["encrypted"].as_bool();
|
||||||
|
manifest.add_file(filename, size, csum, encrypted)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if manifest.files().is_empty() {
|
if manifest.files().is_empty() {
|
||||||
|
@ -1,38 +1,39 @@
|
|||||||
use anyhow::{Error};
|
use std::future::Future;
|
||||||
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use super::datastore::*;
|
use anyhow::Error;
|
||||||
use super::crypt_config::*;
|
|
||||||
use super::data_blob::*;
|
use super::crypt_config::CryptConfig;
|
||||||
|
use super::data_blob::DataBlob;
|
||||||
|
use super::datastore::DataStore;
|
||||||
|
|
||||||
/// The ReadChunk trait allows reading backup data chunks (local or remote)
|
/// The ReadChunk trait allows reading backup data chunks (local or remote)
|
||||||
pub trait ReadChunk {
|
pub trait ReadChunk {
|
||||||
/// Returns the encoded chunk data
|
/// Returns the encoded chunk data
|
||||||
fn read_raw_chunk(&mut self, digest:&[u8; 32]) -> Result<DataBlob, Error>;
|
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error>;
|
||||||
|
|
||||||
/// Returns the decoded chunk data
|
/// Returns the decoded chunk data
|
||||||
fn read_chunk(&mut self, digest:&[u8; 32]) -> Result<Vec<u8>, Error>;
|
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct LocalChunkReader {
|
pub struct LocalChunkReader {
|
||||||
store: Arc<DataStore>,
|
store: Arc<DataStore>,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LocalChunkReader {
|
impl LocalChunkReader {
|
||||||
|
|
||||||
pub fn new(store: Arc<DataStore>, crypt_config: Option<Arc<CryptConfig>>) -> Self {
|
pub fn new(store: Arc<DataStore>, crypt_config: Option<Arc<CryptConfig>>) -> Self {
|
||||||
Self { store, crypt_config }
|
Self {
|
||||||
|
store,
|
||||||
|
crypt_config,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ReadChunk for LocalChunkReader {
|
impl ReadChunk for LocalChunkReader {
|
||||||
|
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||||
fn read_raw_chunk(&mut self, digest:&[u8; 32]) -> Result<DataBlob, Error> {
|
|
||||||
|
|
||||||
let digest_str = proxmox::tools::digest_to_hex(digest);
|
|
||||||
println!("READ CHUNK {}", digest_str);
|
|
||||||
|
|
||||||
let (path, _) = self.store.chunk_path(digest);
|
let (path, _) = self.store.chunk_path(digest);
|
||||||
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
||||||
let chunk = DataBlob::from_raw(raw_data)?;
|
let chunk = DataBlob::from_raw(raw_data)?;
|
||||||
@ -41,13 +42,59 @@ impl ReadChunk for LocalChunkReader {
|
|||||||
Ok(chunk)
|
Ok(chunk)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_chunk(&mut self, digest:&[u8; 32]) -> Result<Vec<u8>, Error> {
|
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||||
let chunk = self.read_raw_chunk(digest)?;
|
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
||||||
|
|
||||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||||
|
|
||||||
// fixme: verify digest?
|
// fixme: verify digest?
|
||||||
|
|
||||||
Ok(raw_data)
|
Ok(raw_data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub trait AsyncReadChunk: Send {
|
||||||
|
/// Returns the encoded chunk data
|
||||||
|
fn read_raw_chunk<'a>(
|
||||||
|
&'a self,
|
||||||
|
digest: &'a [u8; 32],
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>>;
|
||||||
|
|
||||||
|
/// Returns the decoded chunk data
|
||||||
|
fn read_chunk<'a>(
|
||||||
|
&'a self,
|
||||||
|
digest: &'a [u8; 32],
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsyncReadChunk for LocalChunkReader {
|
||||||
|
fn read_raw_chunk<'a>(
|
||||||
|
&'a self,
|
||||||
|
digest: &'a [u8; 32],
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
||||||
|
Box::pin(async move{
|
||||||
|
let (path, _) = self.store.chunk_path(digest);
|
||||||
|
|
||||||
|
let raw_data = tokio::fs::read(&path).await?;
|
||||||
|
let chunk = DataBlob::from_raw(raw_data)?;
|
||||||
|
chunk.verify_crc()?;
|
||||||
|
|
||||||
|
Ok(chunk)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_chunk<'a>(
|
||||||
|
&'a self,
|
||||||
|
digest: &'a [u8; 32],
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let chunk = AsyncReadChunk::read_raw_chunk(self, digest).await?;
|
||||||
|
|
||||||
|
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||||
|
|
||||||
|
// fixme: verify digest?
|
||||||
|
|
||||||
|
Ok(raw_data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
196
src/backup/verify.rs
Normal file
196
src/backup/verify.rs
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
|
||||||
|
use crate::server::WorkerTask;
|
||||||
|
|
||||||
|
use super::{
|
||||||
|
DataStore, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
||||||
|
ENCR_COMPR_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0,
|
||||||
|
FileInfo, ArchiveType, archive_type,
|
||||||
|
};
|
||||||
|
|
||||||
|
fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let (blob, raw_size) = datastore.load_blob(backup_dir, &info.filename)?;
|
||||||
|
|
||||||
|
let csum = openssl::sha::sha256(blob.raw_data());
|
||||||
|
if raw_size != info.size {
|
||||||
|
bail!("wrong size ({} != {})", info.size, raw_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
if csum != info.csum {
|
||||||
|
bail!("wrong index checksum");
|
||||||
|
}
|
||||||
|
|
||||||
|
blob.verify_crc()?;
|
||||||
|
|
||||||
|
let magic = blob.magic();
|
||||||
|
|
||||||
|
if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
blob.decode(None)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_index_chunks(
|
||||||
|
datastore: &DataStore,
|
||||||
|
index: Box<dyn IndexFile>,
|
||||||
|
worker: &WorkerTask,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
for pos in 0..index.index_count() {
|
||||||
|
|
||||||
|
worker.fail_on_abort()?;
|
||||||
|
|
||||||
|
let info = index.chunk_info(pos).unwrap();
|
||||||
|
let size = info.range.end - info.range.start;
|
||||||
|
datastore.verify_stored_chunk(&info.digest, size)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_fixed_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let mut path = backup_dir.relative_path();
|
||||||
|
path.push(&info.filename);
|
||||||
|
|
||||||
|
let index = datastore.open_fixed_reader(&path)?;
|
||||||
|
|
||||||
|
let (csum, size) = index.compute_csum();
|
||||||
|
if size != info.size {
|
||||||
|
bail!("wrong size ({} != {})", info.size, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
if csum != info.csum {
|
||||||
|
bail!("wrong index checksum");
|
||||||
|
}
|
||||||
|
|
||||||
|
verify_index_chunks(datastore, Box::new(index), worker)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
||||||
|
let mut path = backup_dir.relative_path();
|
||||||
|
path.push(&info.filename);
|
||||||
|
|
||||||
|
let index = datastore.open_dynamic_reader(&path)?;
|
||||||
|
|
||||||
|
let (csum, size) = index.compute_csum();
|
||||||
|
if size != info.size {
|
||||||
|
bail!("wrong size ({} != {})", info.size, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
if csum != info.csum {
|
||||||
|
bail!("wrong index checksum");
|
||||||
|
}
|
||||||
|
|
||||||
|
verify_index_chunks(datastore, Box::new(index), worker)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verify a single backup snapshot
|
||||||
|
///
|
||||||
|
/// This checks all archives inside a backup snapshot.
|
||||||
|
/// Errors are logged to the worker log.
|
||||||
|
///
|
||||||
|
/// Returns
|
||||||
|
/// - Ok(true) if verify is successful
|
||||||
|
/// - Ok(false) if there were verification errors
|
||||||
|
/// - Err(_) if task was aborted
|
||||||
|
pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker: &WorkerTask) -> Result<bool, Error> {
|
||||||
|
|
||||||
|
let manifest = match datastore.load_manifest(&backup_dir) {
|
||||||
|
Ok((manifest, _)) => manifest,
|
||||||
|
Err(err) => {
|
||||||
|
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
worker.log(format!("verify {}:{}", datastore.name(), backup_dir));
|
||||||
|
|
||||||
|
let mut error_count = 0;
|
||||||
|
|
||||||
|
for info in manifest.files() {
|
||||||
|
let result = proxmox::try_block!({
|
||||||
|
worker.log(format!(" check {}", info.filename));
|
||||||
|
match archive_type(&info.filename)? {
|
||||||
|
ArchiveType::FixedIndex => verify_fixed_index(&datastore, &backup_dir, info, worker),
|
||||||
|
ArchiveType::DynamicIndex => verify_dynamic_index(&datastore, &backup_dir, info, worker),
|
||||||
|
ArchiveType::Blob => verify_blob(&datastore, &backup_dir, info),
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
worker.fail_on_abort()?;
|
||||||
|
|
||||||
|
if let Err(err) = result {
|
||||||
|
worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err));
|
||||||
|
error_count += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(error_count == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verify all backups inside a backup group
|
||||||
|
///
|
||||||
|
/// Errors are logged to the worker log.
|
||||||
|
///
|
||||||
|
/// Returns
|
||||||
|
/// - Ok(true) if verify is successful
|
||||||
|
/// - Ok(false) if there were verification errors
|
||||||
|
/// - Err(_) if task was aborted
|
||||||
|
pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &WorkerTask) -> Result<bool, Error> {
|
||||||
|
|
||||||
|
let mut list = match group.list_backups(&datastore.base_path()) {
|
||||||
|
Ok(list) => list,
|
||||||
|
Err(err) => {
|
||||||
|
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
worker.log(format!("verify group {}:{}", datastore.name(), group));
|
||||||
|
|
||||||
|
let mut error_count = 0;
|
||||||
|
|
||||||
|
BackupInfo::sort_list(&mut list, false); // newest first
|
||||||
|
for info in list {
|
||||||
|
if !verify_backup_dir(datastore, &info.backup_dir, worker)? {
|
||||||
|
error_count += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(error_count == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verify all backups inside a datastore
|
||||||
|
///
|
||||||
|
/// Errors are logged to the worker log.
|
||||||
|
///
|
||||||
|
/// Returns
|
||||||
|
/// - Ok(true) if verify is successful
|
||||||
|
/// - Ok(false) if there were verification errors
|
||||||
|
/// - Err(_) if task was aborted
|
||||||
|
pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<bool, Error> {
|
||||||
|
|
||||||
|
let list = match BackupGroup::list_groups(&datastore.base_path()) {
|
||||||
|
Ok(list) => list,
|
||||||
|
Err(err) => {
|
||||||
|
worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err));
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
worker.log(format!("verify datastore {}", datastore.name()));
|
||||||
|
|
||||||
|
let mut error_count = 0;
|
||||||
|
for group in list {
|
||||||
|
if !verify_backup_group(datastore, &group, worker)? {
|
||||||
|
error_count += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(error_count == 0)
|
||||||
|
}
|
@ -14,6 +14,8 @@ use proxmox_backup::config;
|
|||||||
use proxmox_backup::buildcfg;
|
use proxmox_backup::buildcfg;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
proxmox_backup::tools::setup_safe_path_env();
|
||||||
|
|
||||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||||
eprintln!("Error: {}", err);
|
eprintln!("Error: {}", err);
|
||||||
std::process::exit(-1);
|
std::process::exit(-1);
|
||||||
|
@ -1,13 +1,20 @@
|
|||||||
use anyhow::{bail, format_err, Error};
|
|
||||||
use nix::unistd::{fork, ForkResult, pipe};
|
|
||||||
use std::os::unix::io::RawFd;
|
|
||||||
use chrono::{Local, DateTime, Utc, TimeZone};
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::collections::{HashSet, HashMap};
|
use std::collections::{HashSet, HashMap};
|
||||||
use std::ffi::OsStr;
|
use std::io::{self, Write, Seek, SeekFrom};
|
||||||
use std::io::{Write, Seek, SeekFrom};
|
|
||||||
use std::os::unix::fs::OpenOptionsExt;
|
use std::os::unix::fs::OpenOptionsExt;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::task::Context;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use chrono::{Local, DateTime, Utc, TimeZone};
|
||||||
|
use futures::future::FutureExt;
|
||||||
|
use futures::stream::{StreamExt, TryStreamExt};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use xdg::BaseDirectories;
|
||||||
|
|
||||||
|
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
|
||||||
use proxmox::{sortable, identity};
|
use proxmox::{sortable, identity};
|
||||||
use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
|
use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
|
||||||
use proxmox::sys::linux::tty;
|
use proxmox::sys::linux::tty;
|
||||||
@ -15,32 +22,52 @@ use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
|
|||||||
use proxmox::api::schema::*;
|
use proxmox::api::schema::*;
|
||||||
use proxmox::api::cli::*;
|
use proxmox::api::cli::*;
|
||||||
use proxmox::api::api;
|
use proxmox::api::api;
|
||||||
|
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||||
|
|
||||||
use proxmox_backup::tools;
|
use proxmox_backup::tools;
|
||||||
use proxmox_backup::api2::types::*;
|
use proxmox_backup::api2::types::*;
|
||||||
use proxmox_backup::client::*;
|
use proxmox_backup::client::*;
|
||||||
use proxmox_backup::backup::*;
|
use proxmox_backup::pxar::catalog::*;
|
||||||
use proxmox_backup::pxar::{ self, catalog::* };
|
use proxmox_backup::backup::{
|
||||||
|
archive_type,
|
||||||
|
encrypt_key_with_passphrase,
|
||||||
|
load_and_decrypt_key,
|
||||||
|
store_key_config,
|
||||||
|
verify_chunk_size,
|
||||||
|
ArchiveType,
|
||||||
|
AsyncReadChunk,
|
||||||
|
BackupDir,
|
||||||
|
BackupGroup,
|
||||||
|
BackupManifest,
|
||||||
|
BufferedDynamicReader,
|
||||||
|
CatalogReader,
|
||||||
|
CatalogWriter,
|
||||||
|
CATALOG_NAME,
|
||||||
|
ChunkStream,
|
||||||
|
CryptConfig,
|
||||||
|
DataBlob,
|
||||||
|
DynamicIndexReader,
|
||||||
|
FixedChunkStream,
|
||||||
|
FixedIndexReader,
|
||||||
|
IndexFile,
|
||||||
|
KeyConfig,
|
||||||
|
MANIFEST_BLOB_NAME,
|
||||||
|
Shell,
|
||||||
|
};
|
||||||
|
|
||||||
use serde_json::{json, Value};
|
mod proxmox_backup_client;
|
||||||
//use hyper::Body;
|
use proxmox_backup_client::*;
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
//use regex::Regex;
|
|
||||||
use xdg::BaseDirectories;
|
|
||||||
|
|
||||||
use futures::*;
|
|
||||||
use tokio::sync::mpsc;
|
|
||||||
|
|
||||||
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
|
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
|
||||||
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
|
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
|
||||||
|
|
||||||
|
|
||||||
const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
|
pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
|
||||||
.format(&BACKUP_REPO_URL)
|
.format(&BACKUP_REPO_URL)
|
||||||
.max_length(256)
|
.max_length(256)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
const KEYFILE_SCHEMA: Schema = StringSchema::new(
|
pub const KEYFILE_SCHEMA: Schema = StringSchema::new(
|
||||||
"Path to encryption key. All data will be encrypted using this key.")
|
"Path to encryption key. All data will be encrypted using this key.")
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
@ -55,7 +82,7 @@ fn get_default_repository() -> Option<String> {
|
|||||||
std::env::var("PBS_REPOSITORY").ok()
|
std::env::var("PBS_REPOSITORY").ok()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn extract_repository_from_value(
|
pub fn extract_repository_from_value(
|
||||||
param: &Value,
|
param: &Value,
|
||||||
) -> Result<BackupRepository, Error> {
|
) -> Result<BackupRepository, Error> {
|
||||||
|
|
||||||
@ -128,7 +155,7 @@ fn record_repository(repo: &BackupRepository) {
|
|||||||
let _ = replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new());
|
let _ = replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
pub fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||||
|
|
||||||
let mut result = vec![];
|
let mut result = vec![];
|
||||||
|
|
||||||
@ -212,7 +239,7 @@ async fn api_datastore_list_snapshots(
|
|||||||
Ok(result["data"].take())
|
Ok(result["data"].take())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn api_datastore_latest_snapshot(
|
pub async fn api_datastore_latest_snapshot(
|
||||||
client: &HttpClient,
|
client: &HttpClient,
|
||||||
store: &str,
|
store: &str,
|
||||||
group: BackupGroup,
|
group: BackupGroup,
|
||||||
@ -232,18 +259,17 @@ async fn api_datastore_latest_snapshot(
|
|||||||
Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
|
Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
async fn backup_directory<P: AsRef<Path>>(
|
async fn backup_directory<P: AsRef<Path>>(
|
||||||
client: &BackupWriter,
|
client: &BackupWriter,
|
||||||
|
previous_manifest: Option<Arc<BackupManifest>>,
|
||||||
dir_path: P,
|
dir_path: P,
|
||||||
archive_name: &str,
|
archive_name: &str,
|
||||||
chunk_size: Option<usize>,
|
chunk_size: Option<usize>,
|
||||||
device_set: Option<HashSet<u64>>,
|
device_set: Option<HashSet<u64>>,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
skip_lost_and_found: bool,
|
skip_lost_and_found: bool,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
|
||||||
catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
||||||
exclude_pattern: Vec<pxar::MatchPattern>,
|
exclude_pattern: Vec<MatchEntry>,
|
||||||
entries_max: usize,
|
entries_max: usize,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
|
|
||||||
@ -271,7 +297,7 @@ async fn backup_directory<P: AsRef<Path>>(
|
|||||||
});
|
});
|
||||||
|
|
||||||
let stats = client
|
let stats = client
|
||||||
.upload_stream(archive_name, stream, "dynamic", None, crypt_config)
|
.upload_stream(previous_manifest, archive_name, stream, "dynamic", None)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(stats)
|
Ok(stats)
|
||||||
@ -279,12 +305,12 @@ async fn backup_directory<P: AsRef<Path>>(
|
|||||||
|
|
||||||
async fn backup_image<P: AsRef<Path>>(
|
async fn backup_image<P: AsRef<Path>>(
|
||||||
client: &BackupWriter,
|
client: &BackupWriter,
|
||||||
|
previous_manifest: Option<Arc<BackupManifest>>,
|
||||||
image_path: P,
|
image_path: P,
|
||||||
archive_name: &str,
|
archive_name: &str,
|
||||||
image_size: u64,
|
image_size: u64,
|
||||||
chunk_size: Option<usize>,
|
chunk_size: Option<usize>,
|
||||||
_verbose: bool,
|
_verbose: bool,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
|
|
||||||
let path = image_path.as_ref().to_owned();
|
let path = image_path.as_ref().to_owned();
|
||||||
@ -297,7 +323,7 @@ async fn backup_image<P: AsRef<Path>>(
|
|||||||
let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
|
let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
|
||||||
|
|
||||||
let stats = client
|
let stats = client
|
||||||
.upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config)
|
.upload_stream(previous_manifest, archive_name, stream, "fixed", Some(image_size))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(stats)
|
Ok(stats)
|
||||||
@ -399,8 +425,8 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
let group = if let Some(path) = param["group"].as_str() {
|
let group: Option<BackupGroup> = if let Some(path) = param["group"].as_str() {
|
||||||
Some(BackupGroup::parse(path)?)
|
Some(path.parse()?)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
@ -417,7 +443,11 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
|
let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||||
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
|
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
|
||||||
Ok(tools::format::render_backup_file_list(&item.files))
|
let mut filenames = Vec::new();
|
||||||
|
for file in &item.files {
|
||||||
|
filenames.push(file.filename.to_string());
|
||||||
|
}
|
||||||
|
Ok(tools::format::render_backup_file_list(&filenames[..]))
|
||||||
};
|
};
|
||||||
|
|
||||||
let options = default_table_format_options()
|
let options = default_table_format_options()
|
||||||
@ -456,7 +486,7 @@ async fn forget_snapshots(param: Value) -> Result<Value, Error> {
|
|||||||
let repo = extract_repository_from_value(¶m)?;
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
|
||||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||||
let snapshot = BackupDir::parse(path)?;
|
let snapshot: BackupDir = path.parse()?;
|
||||||
|
|
||||||
let mut client = connect(repo.host(), repo.user())?;
|
let mut client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
@ -536,7 +566,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
|||||||
let repo = extract_repository_from_value(¶m)?;
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
|
||||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||||
let snapshot = BackupDir::parse(path)?;
|
let snapshot: BackupDir = path.parse()?;
|
||||||
|
|
||||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||||
|
|
||||||
@ -614,7 +644,7 @@ async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
|
|||||||
let repo = extract_repository_from_value(¶m)?;
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
|
||||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||||
let snapshot = BackupDir::parse(path)?;
|
let snapshot: BackupDir = path.parse()?;
|
||||||
|
|
||||||
let output_format = get_output_format(¶m);
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
@ -676,8 +706,7 @@ async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn spawn_catalog_upload(
|
fn spawn_catalog_upload(
|
||||||
client: Arc<BackupWriter>,
|
client: Arc<BackupWriter>
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
|
||||||
) -> Result<
|
) -> Result<
|
||||||
(
|
(
|
||||||
Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
||||||
@ -695,7 +724,7 @@ fn spawn_catalog_upload(
|
|||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let catalog_upload_result = client
|
let catalog_upload_result = client
|
||||||
.upload_stream(CATALOG_NAME, catalog_chunk_stream, "dynamic", None, crypt_config)
|
.upload_stream(None, CATALOG_NAME, catalog_chunk_stream, "dynamic", None)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
if let Err(ref err) = catalog_upload_result {
|
if let Err(ref err) = catalog_upload_result {
|
||||||
@ -769,7 +798,7 @@ fn spawn_catalog_upload(
|
|||||||
type: Integer,
|
type: Integer,
|
||||||
description: "Max number of entries to hold in memory.",
|
description: "Max number of entries to hold in memory.",
|
||||||
optional: true,
|
optional: true,
|
||||||
default: pxar::ENCODER_MAX_ENTRIES as isize,
|
default: proxmox_backup::pxar::ENCODER_MAX_ENTRIES as isize,
|
||||||
},
|
},
|
||||||
"verbose": {
|
"verbose": {
|
||||||
type: Boolean,
|
type: Boolean,
|
||||||
@ -812,17 +841,19 @@ async fn create_backup(
|
|||||||
|
|
||||||
let include_dev = param["include-dev"].as_array();
|
let include_dev = param["include-dev"].as_array();
|
||||||
|
|
||||||
let entries_max = param["entries-max"].as_u64().unwrap_or(pxar::ENCODER_MAX_ENTRIES as u64);
|
let entries_max = param["entries-max"].as_u64()
|
||||||
|
.unwrap_or(proxmox_backup::pxar::ENCODER_MAX_ENTRIES as u64);
|
||||||
|
|
||||||
let empty = Vec::new();
|
let empty = Vec::new();
|
||||||
let arg_pattern = param["exclude"].as_array().unwrap_or(&empty);
|
let exclude_args = param["exclude"].as_array().unwrap_or(&empty);
|
||||||
|
|
||||||
let mut pattern_list = Vec::with_capacity(arg_pattern.len());
|
let mut pattern_list = Vec::with_capacity(exclude_args.len());
|
||||||
for s in arg_pattern {
|
for entry in exclude_args {
|
||||||
let l = s.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
|
let entry = entry.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
|
||||||
let p = pxar::MatchPattern::from_line(l.as_bytes())?
|
pattern_list.push(
|
||||||
.ok_or_else(|| format_err!("Invalid match pattern in arguments"))?;
|
MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude)
|
||||||
pattern_list.push(p);
|
.map_err(|err| format_err!("invalid exclude pattern entry: {}", err))?
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
|
let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
|
||||||
@ -844,8 +875,6 @@ async fn create_backup(
|
|||||||
|
|
||||||
let mut upload_list = vec![];
|
let mut upload_list = vec![];
|
||||||
|
|
||||||
let mut upload_catalog = false;
|
|
||||||
|
|
||||||
for backupspec in backupspec_list {
|
for backupspec in backupspec_list {
|
||||||
let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
|
let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
|
||||||
let filename = &spec.config_string;
|
let filename = &spec.config_string;
|
||||||
@ -863,7 +892,6 @@ async fn create_backup(
|
|||||||
bail!("got unexpected file type (expected directory)");
|
bail!("got unexpected file type (expected directory)");
|
||||||
}
|
}
|
||||||
upload_list.push((BackupSpecificationType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
|
upload_list.push((BackupSpecificationType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
|
||||||
upload_catalog = true;
|
|
||||||
}
|
}
|
||||||
BackupSpecificationType::IMAGE => {
|
BackupSpecificationType::IMAGE => {
|
||||||
if !(file_type.is_file() || file_type.is_block_device()) {
|
if !(file_type.is_file() || file_type.is_block_device()) {
|
||||||
@ -923,8 +951,11 @@ async fn create_backup(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let is_encrypted = Some(crypt_config.is_some());
|
||||||
|
|
||||||
let client = BackupWriter::start(
|
let client = BackupWriter::start(
|
||||||
client,
|
client,
|
||||||
|
crypt_config.clone(),
|
||||||
repo.store(),
|
repo.store(),
|
||||||
backup_type,
|
backup_type,
|
||||||
&backup_id,
|
&backup_id,
|
||||||
@ -932,64 +963,79 @@ async fn create_backup(
|
|||||||
verbose,
|
verbose,
|
||||||
).await?;
|
).await?;
|
||||||
|
|
||||||
|
let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await {
|
||||||
|
Some(Arc::new(previous_manifest))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
|
let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
|
||||||
let mut manifest = BackupManifest::new(snapshot);
|
let mut manifest = BackupManifest::new(snapshot);
|
||||||
|
|
||||||
let (catalog, catalog_result_rx) = spawn_catalog_upload(client.clone(), crypt_config.clone())?;
|
let mut catalog = None;
|
||||||
|
let mut catalog_result_tx = None;
|
||||||
|
|
||||||
for (backup_type, filename, target, size) in upload_list {
|
for (backup_type, filename, target, size) in upload_list {
|
||||||
match backup_type {
|
match backup_type {
|
||||||
BackupSpecificationType::CONFIG => {
|
BackupSpecificationType::CONFIG => {
|
||||||
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
|
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
|
||||||
let stats = client
|
let stats = client
|
||||||
.upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
|
.upload_blob_from_file(&filename, &target, true, Some(true))
|
||||||
.await?;
|
.await?;
|
||||||
manifest.add_file(target, stats.size, stats.csum)?;
|
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||||
}
|
}
|
||||||
BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
|
BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
|
||||||
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
|
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
|
||||||
let stats = client
|
let stats = client
|
||||||
.upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
|
.upload_blob_from_file(&filename, &target, true, Some(true))
|
||||||
.await?;
|
.await?;
|
||||||
manifest.add_file(target, stats.size, stats.csum)?;
|
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||||
}
|
}
|
||||||
BackupSpecificationType::PXAR => {
|
BackupSpecificationType::PXAR => {
|
||||||
|
// start catalog upload on first use
|
||||||
|
if catalog.is_none() {
|
||||||
|
let (cat, res) = spawn_catalog_upload(client.clone())?;
|
||||||
|
catalog = Some(cat);
|
||||||
|
catalog_result_tx = Some(res);
|
||||||
|
}
|
||||||
|
let catalog = catalog.as_ref().unwrap();
|
||||||
|
|
||||||
println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
|
println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
|
||||||
catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
|
catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
|
||||||
let stats = backup_directory(
|
let stats = backup_directory(
|
||||||
&client,
|
&client,
|
||||||
|
previous_manifest.clone(),
|
||||||
&filename,
|
&filename,
|
||||||
&target,
|
&target,
|
||||||
chunk_size_opt,
|
chunk_size_opt,
|
||||||
devices.clone(),
|
devices.clone(),
|
||||||
verbose,
|
verbose,
|
||||||
skip_lost_and_found,
|
skip_lost_and_found,
|
||||||
crypt_config.clone(),
|
|
||||||
catalog.clone(),
|
catalog.clone(),
|
||||||
pattern_list.clone(),
|
pattern_list.clone(),
|
||||||
entries_max as usize,
|
entries_max as usize,
|
||||||
).await?;
|
).await?;
|
||||||
manifest.add_file(target, stats.size, stats.csum)?;
|
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||||
catalog.lock().unwrap().end_directory()?;
|
catalog.lock().unwrap().end_directory()?;
|
||||||
}
|
}
|
||||||
BackupSpecificationType::IMAGE => {
|
BackupSpecificationType::IMAGE => {
|
||||||
println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
|
println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
|
||||||
let stats = backup_image(
|
let stats = backup_image(
|
||||||
&client,
|
&client,
|
||||||
&filename,
|
previous_manifest.clone(),
|
||||||
|
&filename,
|
||||||
&target,
|
&target,
|
||||||
size,
|
size,
|
||||||
chunk_size_opt,
|
chunk_size_opt,
|
||||||
verbose,
|
verbose,
|
||||||
crypt_config.clone(),
|
|
||||||
).await?;
|
).await?;
|
||||||
manifest.add_file(target, stats.size, stats.csum)?;
|
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// finalize and upload catalog
|
// finalize and upload catalog
|
||||||
if upload_catalog {
|
if let Some(catalog) = catalog {
|
||||||
let mutex = Arc::try_unwrap(catalog)
|
let mutex = Arc::try_unwrap(catalog)
|
||||||
.map_err(|_| format_err!("unable to get catalog (still used)"))?;
|
.map_err(|_| format_err!("unable to get catalog (still used)"))?;
|
||||||
let mut catalog = mutex.into_inner().unwrap();
|
let mut catalog = mutex.into_inner().unwrap();
|
||||||
@ -998,18 +1044,19 @@ async fn create_backup(
|
|||||||
|
|
||||||
drop(catalog); // close upload stream
|
drop(catalog); // close upload stream
|
||||||
|
|
||||||
let stats = catalog_result_rx.await??;
|
if let Some(catalog_result_rx) = catalog_result_tx {
|
||||||
|
let stats = catalog_result_rx.await??;
|
||||||
manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum)?;
|
manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, is_encrypted)?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(rsa_encrypted_key) = rsa_encrypted_key {
|
if let Some(rsa_encrypted_key) = rsa_encrypted_key {
|
||||||
let target = "rsa-encrypted.key";
|
let target = "rsa-encrypted.key";
|
||||||
println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
|
println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
|
||||||
let stats = client
|
let stats = client
|
||||||
.upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
|
.upload_blob_from_data(rsa_encrypted_key, target, false, None)
|
||||||
.await?;
|
.await?;
|
||||||
manifest.add_file(format!("{}.blob", target), stats.size, stats.csum)?;
|
manifest.add_file(format!("{}.blob", target), stats.size, stats.csum, is_encrypted)?;
|
||||||
|
|
||||||
// openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
|
// openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
|
||||||
/*
|
/*
|
||||||
@ -1027,7 +1074,7 @@ async fn create_backup(
|
|||||||
println!("Upload index.json to '{:?}'", repo);
|
println!("Upload index.json to '{:?}'", repo);
|
||||||
let manifest = serde_json::to_string_pretty(&manifest)?.into();
|
let manifest = serde_json::to_string_pretty(&manifest)?.into();
|
||||||
client
|
client
|
||||||
.upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, crypt_config.clone(), true, true)
|
.upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, true, Some(true))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
client.finish().await?;
|
client.finish().await?;
|
||||||
@ -1062,7 +1109,7 @@ fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<Str
|
|||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
fn dump_image<W: Write>(
|
async fn dump_image<W: Write>(
|
||||||
client: Arc<BackupReader>,
|
client: Arc<BackupReader>,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
index: FixedIndexReader,
|
index: FixedIndexReader,
|
||||||
@ -1072,7 +1119,7 @@ fn dump_image<W: Write>(
|
|||||||
|
|
||||||
let most_used = index.find_most_used_chunks(8);
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
|
||||||
let mut chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||||
|
|
||||||
// Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
|
// Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
|
||||||
// and thus slows down reading. Instead, directly use RemoteChunkReader
|
// and thus slows down reading. Instead, directly use RemoteChunkReader
|
||||||
@ -1082,7 +1129,7 @@ fn dump_image<W: Write>(
|
|||||||
|
|
||||||
for pos in 0..index.index_count() {
|
for pos in 0..index.index_count() {
|
||||||
let digest = index.index_digest(pos).unwrap();
|
let digest = index.index_digest(pos).unwrap();
|
||||||
let raw_data = chunk_reader.read_chunk(&digest)?;
|
let raw_data = chunk_reader.read_chunk(&digest).await?;
|
||||||
writer.write_all(&raw_data)?;
|
writer.write_all(&raw_data)?;
|
||||||
bytes += raw_data.len();
|
bytes += raw_data.len();
|
||||||
if verbose {
|
if verbose {
|
||||||
@ -1171,10 +1218,10 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
|||||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||||
|
|
||||||
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||||
let group = BackupGroup::parse(path)?;
|
let group: BackupGroup = path.parse()?;
|
||||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||||
} else {
|
} else {
|
||||||
let snapshot = BackupDir::parse(path)?;
|
let snapshot: BackupDir = path.parse()?;
|
||||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1246,18 +1293,19 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
|||||||
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
|
||||||
if let Some(target) = target {
|
if let Some(target) = target {
|
||||||
|
proxmox_backup::pxar::extract_archive(
|
||||||
let feature_flags = pxar::flags::DEFAULT;
|
pxar::decoder::Decoder::from_std(reader)?,
|
||||||
let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags);
|
Path::new(target),
|
||||||
decoder.set_callback(move |path| {
|
&[],
|
||||||
if verbose {
|
proxmox_backup::pxar::Flags::DEFAULT,
|
||||||
eprintln!("{:?}", path);
|
allow_existing_dirs,
|
||||||
}
|
|path| {
|
||||||
Ok(())
|
if verbose {
|
||||||
});
|
println!("{:?}", path);
|
||||||
decoder.set_allow_existing_dirs(allow_existing_dirs);
|
}
|
||||||
|
},
|
||||||
decoder.restore(Path::new(target), &Vec::new())?;
|
)
|
||||||
|
.map_err(|err| format_err!("error extracting archive - {}", err))?;
|
||||||
} else {
|
} else {
|
||||||
let mut writer = std::fs::OpenOptions::new()
|
let mut writer = std::fs::OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
@ -1285,7 +1333,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
|||||||
.map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
|
.map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
|
||||||
};
|
};
|
||||||
|
|
||||||
dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
|
dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
@ -1320,7 +1368,7 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
|||||||
let repo = extract_repository_from_value(¶m)?;
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
|
||||||
let snapshot = tools::required_string_param(¶m, "snapshot")?;
|
let snapshot = tools::required_string_param(¶m, "snapshot")?;
|
||||||
let snapshot = BackupDir::parse(snapshot)?;
|
let snapshot: BackupDir = snapshot.parse()?;
|
||||||
|
|
||||||
let mut client = connect(repo.host(), repo.user())?;
|
let mut client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
@ -1394,7 +1442,7 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
|||||||
let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
|
let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
|
||||||
|
|
||||||
let group = tools::required_string_param(¶m, "group")?;
|
let group = tools::required_string_param(¶m, "group")?;
|
||||||
let group = BackupGroup::parse(group)?;
|
let group: BackupGroup = group.parse()?;
|
||||||
|
|
||||||
let output_format = get_output_format(¶m);
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
@ -1565,7 +1613,7 @@ async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String
|
|||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
pub fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||||
proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
|
proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1628,9 +1676,9 @@ async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<St
|
|||||||
_ => return result,
|
_ => return result,
|
||||||
};
|
};
|
||||||
|
|
||||||
let snapshot = match param.get("snapshot") {
|
let snapshot: BackupDir = match param.get("snapshot") {
|
||||||
Some(path) => {
|
Some(path) => {
|
||||||
match BackupDir::parse(path) {
|
match path.parse() {
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
_ => return result,
|
_ => return result,
|
||||||
}
|
}
|
||||||
@ -1666,7 +1714,7 @@ fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<Stri
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||||
complete_server_file_name(arg, param)
|
complete_server_file_name(arg, param)
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|v| {
|
.filter_map(|v| {
|
||||||
@ -1935,125 +1983,50 @@ fn key_mgmt_cli() -> CliCommandMap {
|
|||||||
.insert("change-passphrase", key_change_passphrase_cmd_def)
|
.insert("change-passphrase", key_change_passphrase_cmd_def)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mount(
|
|
||||||
param: Value,
|
|
||||||
_info: &ApiMethod,
|
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
|
||||||
) -> Result<Value, Error> {
|
|
||||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
|
||||||
if verbose {
|
|
||||||
// This will stay in foreground with debug output enabled as None is
|
|
||||||
// passed for the RawFd.
|
|
||||||
return proxmox_backup::tools::runtime::main(mount_do(param, None));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process should be deamonized.
|
use proxmox_backup::client::RemoteChunkReader;
|
||||||
// Make sure to fork before the async runtime is instantiated to avoid troubles.
|
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
||||||
let pipe = pipe()?;
|
/// async use!
|
||||||
match fork() {
|
///
|
||||||
Ok(ForkResult::Parent { .. }) => {
|
/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
|
||||||
nix::unistd::close(pipe.1).unwrap();
|
/// so that we can properly access it from multiple threads simultaneously while not issuing
|
||||||
// Blocks the parent process until we are ready to go in the child
|
/// duplicate simultaneous reads over http.
|
||||||
let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
|
pub struct BufferedDynamicReadAt {
|
||||||
Ok(Value::Null)
|
inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BufferedDynamicReadAt {
|
||||||
|
fn new(inner: BufferedDynamicReader<RemoteChunkReader>) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: Mutex::new(inner),
|
||||||
}
|
}
|
||||||
Ok(ForkResult::Child) => {
|
|
||||||
nix::unistd::close(pipe.0).unwrap();
|
|
||||||
nix::unistd::setsid().unwrap();
|
|
||||||
proxmox_backup::tools::runtime::main(mount_do(param, Some(pipe.1)))
|
|
||||||
}
|
|
||||||
Err(_) => bail!("failed to daemonize process"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
impl ReadAt for BufferedDynamicReadAt {
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
fn start_read_at<'a>(
|
||||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
self: Pin<&'a Self>,
|
||||||
let target = tools::required_string_param(¶m, "target")?;
|
_cx: &mut Context,
|
||||||
let client = connect(repo.host(), repo.user())?;
|
buf: &'a mut [u8],
|
||||||
|
offset: u64,
|
||||||
record_repository(&repo);
|
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||||
|
use std::io::Read;
|
||||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
||||||
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
let mut reader = self.inner.lock().unwrap();
|
||||||
let group = BackupGroup::parse(path)?;
|
reader.seek(SeekFrom::Start(offset))?;
|
||||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
Ok(reader.read(buf)?)
|
||||||
} else {
|
}))
|
||||||
let snapshot = BackupDir::parse(path)?;
|
|
||||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
|
||||||
};
|
|
||||||
|
|
||||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
|
||||||
let crypt_config = match keyfile {
|
|
||||||
None => None,
|
|
||||||
Some(path) => {
|
|
||||||
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
|
||||||
Some(Arc::new(CryptConfig::new(key)?))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let server_archive_name = if archive_name.ends_with(".pxar") {
|
|
||||||
format!("{}.didx", archive_name)
|
|
||||||
} else {
|
|
||||||
bail!("Can only mount pxar archives.");
|
|
||||||
};
|
|
||||||
|
|
||||||
let client = BackupReader::start(
|
|
||||||
client,
|
|
||||||
crypt_config.clone(),
|
|
||||||
repo.store(),
|
|
||||||
&backup_type,
|
|
||||||
&backup_id,
|
|
||||||
backup_time,
|
|
||||||
true,
|
|
||||||
).await?;
|
|
||||||
|
|
||||||
let manifest = client.download_manifest().await?;
|
|
||||||
|
|
||||||
if server_archive_name.ends_with(".didx") {
|
|
||||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
|
||||||
let most_used = index.find_most_used_chunks(8);
|
|
||||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
|
||||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
|
||||||
let decoder = pxar::Decoder::new(reader)?;
|
|
||||||
let options = OsStr::new("ro,default_permissions");
|
|
||||||
let mut session = pxar::fuse::Session::new(decoder, &options, pipe.is_none())
|
|
||||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
|
||||||
|
|
||||||
// Mount the session but not call fuse deamonize as this will cause
|
|
||||||
// issues with the runtime after the fork
|
|
||||||
let deamonize = false;
|
|
||||||
session.mount(&Path::new(target), deamonize)?;
|
|
||||||
|
|
||||||
if let Some(pipe) = pipe {
|
|
||||||
nix::unistd::chdir(Path::new("/")).unwrap();
|
|
||||||
// Finish creation of daemon by redirecting filedescriptors.
|
|
||||||
let nullfd = nix::fcntl::open(
|
|
||||||
"/dev/null",
|
|
||||||
nix::fcntl::OFlag::O_RDWR,
|
|
||||||
nix::sys::stat::Mode::empty(),
|
|
||||||
).unwrap();
|
|
||||||
nix::unistd::dup2(nullfd, 0).unwrap();
|
|
||||||
nix::unistd::dup2(nullfd, 1).unwrap();
|
|
||||||
nix::unistd::dup2(nullfd, 2).unwrap();
|
|
||||||
if nullfd > 2 {
|
|
||||||
nix::unistd::close(nullfd).unwrap();
|
|
||||||
}
|
|
||||||
// Signal the parent process that we are done with the setup and it can
|
|
||||||
// terminate.
|
|
||||||
nix::unistd::write(pipe, &[0u8])?;
|
|
||||||
nix::unistd::close(pipe).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
let multithreaded = true;
|
|
||||||
session.run_loop(multithreaded)?;
|
|
||||||
} else {
|
|
||||||
bail!("unknown archive file extension (expected .pxar)");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Value::Null)
|
fn poll_complete<'a>(
|
||||||
|
self: Pin<&'a Self>,
|
||||||
|
_op: ReadAtOperation<'a>,
|
||||||
|
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||||
|
panic!("LocalDynamicReadAt::start_read_at returned Pending");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -2085,10 +2058,10 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
|||||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||||
|
|
||||||
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||||
let group = BackupGroup::parse(path)?;
|
let group: BackupGroup = path.parse()?;
|
||||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||||
} else {
|
} else {
|
||||||
let snapshot = BackupDir::parse(path)?;
|
let snapshot: BackupDir = path.parse()?;
|
||||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -2117,7 +2090,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
|||||||
true,
|
true,
|
||||||
).await?;
|
).await?;
|
||||||
|
|
||||||
let tmpfile = std::fs::OpenOptions::new()
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
.read(true)
|
.read(true)
|
||||||
.custom_flags(libc::O_TMPFILE)
|
.custom_flags(libc::O_TMPFILE)
|
||||||
@ -2129,13 +2102,12 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
|||||||
let most_used = index.find_most_used_chunks(8);
|
let most_used = index.find_most_used_chunks(8);
|
||||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
|
||||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
let mut decoder = pxar::Decoder::new(reader)?;
|
let archive_size = reader.archive_size();
|
||||||
decoder.set_callback(|path| {
|
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||||
println!("{:?}", path);
|
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||||
Ok(())
|
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||||
});
|
|
||||||
|
|
||||||
let tmpfile = client.download(CATALOG_NAME, tmpfile).await?;
|
client.download(CATALOG_NAME, &mut tmpfile).await?;
|
||||||
let index = DynamicIndexReader::new(tmpfile)
|
let index = DynamicIndexReader::new(tmpfile)
|
||||||
.map_err(|err| format_err!("unable to read catalog index - {}", err))?;
|
.map_err(|err| format_err!("unable to read catalog index - {}", err))?;
|
||||||
|
|
||||||
@ -2161,10 +2133,10 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
|||||||
catalog_reader,
|
catalog_reader,
|
||||||
&server_archive_name,
|
&server_archive_name,
|
||||||
decoder,
|
decoder,
|
||||||
)?;
|
).await?;
|
||||||
|
|
||||||
println!("Starting interactive shell");
|
println!("Starting interactive shell");
|
||||||
state.shell()?;
|
state.shell().await?;
|
||||||
|
|
||||||
record_repository(&repo);
|
record_repository(&repo);
|
||||||
|
|
||||||
@ -2188,138 +2160,6 @@ fn catalog_mgmt_cli() -> CliCommandMap {
|
|||||||
.insert("shell", catalog_shell_cmd_def)
|
.insert("shell", catalog_shell_cmd_def)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
|
||||||
input: {
|
|
||||||
properties: {
|
|
||||||
repository: {
|
|
||||||
schema: REPO_URL_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
limit: {
|
|
||||||
description: "The maximal number of tasks to list.",
|
|
||||||
type: Integer,
|
|
||||||
optional: true,
|
|
||||||
minimum: 1,
|
|
||||||
maximum: 1000,
|
|
||||||
default: 50,
|
|
||||||
},
|
|
||||||
"output-format": {
|
|
||||||
schema: OUTPUT_FORMAT,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
all: {
|
|
||||||
type: Boolean,
|
|
||||||
description: "Also list stopped tasks.",
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
/// List running server tasks for this repo user
|
|
||||||
async fn task_list(param: Value) -> Result<Value, Error> {
|
|
||||||
|
|
||||||
let output_format = get_output_format(¶m);
|
|
||||||
|
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
|
||||||
|
|
||||||
let limit = param["limit"].as_u64().unwrap_or(50) as usize;
|
|
||||||
let running = !param["all"].as_bool().unwrap_or(false);
|
|
||||||
|
|
||||||
let args = json!({
|
|
||||||
"running": running,
|
|
||||||
"start": 0,
|
|
||||||
"limit": limit,
|
|
||||||
"userfilter": repo.user(),
|
|
||||||
"store": repo.store(),
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
|
|
||||||
let mut data = result["data"].take();
|
|
||||||
|
|
||||||
let schema = &proxmox_backup::api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
|
|
||||||
|
|
||||||
let options = default_table_format_options()
|
|
||||||
.column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
|
|
||||||
.column(ColumnConfig::new("endtime").right_align(false).renderer(tools::format::render_epoch))
|
|
||||||
.column(ColumnConfig::new("upid"))
|
|
||||||
.column(ColumnConfig::new("status").renderer(tools::format::render_task_status));
|
|
||||||
|
|
||||||
format_and_print_result_full(&mut data, schema, &output_format, &options);
|
|
||||||
|
|
||||||
Ok(Value::Null)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
input: {
|
|
||||||
properties: {
|
|
||||||
repository: {
|
|
||||||
schema: REPO_URL_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
upid: {
|
|
||||||
schema: UPID_SCHEMA,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
/// Display the task log.
|
|
||||||
async fn task_log(param: Value) -> Result<Value, Error> {
|
|
||||||
|
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
|
||||||
let upid = tools::required_string_param(¶m, "upid")?;
|
|
||||||
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
|
||||||
|
|
||||||
display_task_log(client, upid, true).await?;
|
|
||||||
|
|
||||||
Ok(Value::Null)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
input: {
|
|
||||||
properties: {
|
|
||||||
repository: {
|
|
||||||
schema: REPO_URL_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
upid: {
|
|
||||||
schema: UPID_SCHEMA,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
/// Try to stop a specific task.
|
|
||||||
async fn task_stop(param: Value) -> Result<Value, Error> {
|
|
||||||
|
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
|
||||||
let upid_str = tools::required_string_param(¶m, "upid")?;
|
|
||||||
|
|
||||||
let mut client = connect(repo.host(), repo.user())?;
|
|
||||||
|
|
||||||
let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
|
|
||||||
let _ = client.delete(&path, None).await?;
|
|
||||||
|
|
||||||
Ok(Value::Null)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn task_mgmt_cli() -> CliCommandMap {
|
|
||||||
|
|
||||||
let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
|
|
||||||
.completion_cb("repository", complete_repository);
|
|
||||||
|
|
||||||
let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
|
|
||||||
.arg_param(&["upid"]);
|
|
||||||
|
|
||||||
let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
|
|
||||||
.arg_param(&["upid"]);
|
|
||||||
|
|
||||||
CliCommandMap::new()
|
|
||||||
.insert("log", task_log_cmd_def)
|
|
||||||
.insert("list", task_list_cmd_def)
|
|
||||||
.insert("stop", task_stop_cmd_def)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
|
||||||
let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
|
let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
|
||||||
@ -2329,6 +2169,10 @@ fn main() {
|
|||||||
.completion_cb("keyfile", tools::complete_file_name)
|
.completion_cb("keyfile", tools::complete_file_name)
|
||||||
.completion_cb("chunk-size", complete_chunk_size);
|
.completion_cb("chunk-size", complete_chunk_size);
|
||||||
|
|
||||||
|
let benchmark_cmd_def = CliCommand::new(&API_METHOD_BENCHMARK)
|
||||||
|
.completion_cb("repository", complete_repository)
|
||||||
|
.completion_cb("keyfile", tools::complete_file_name);
|
||||||
|
|
||||||
let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
|
let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
|
||||||
.arg_param(&["snapshot", "logfile"])
|
.arg_param(&["snapshot", "logfile"])
|
||||||
.completion_cb("snapshot", complete_backup_snapshot)
|
.completion_cb("snapshot", complete_backup_snapshot)
|
||||||
@ -2378,30 +2222,6 @@ fn main() {
|
|||||||
let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
|
let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
|
||||||
.completion_cb("repository", complete_repository);
|
.completion_cb("repository", complete_repository);
|
||||||
|
|
||||||
#[sortable]
|
|
||||||
const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
|
|
||||||
&ApiHandler::Sync(&mount),
|
|
||||||
&ObjectSchema::new(
|
|
||||||
"Mount pxar archive.",
|
|
||||||
&sorted!([
|
|
||||||
("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
|
|
||||||
("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
|
|
||||||
("target", false, &StringSchema::new("Target directory path.").schema()),
|
|
||||||
("repository", true, &REPO_URL_SCHEMA),
|
|
||||||
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
|
|
||||||
("verbose", true, &BooleanSchema::new("Verbose output.").default(false).schema()),
|
|
||||||
]),
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
let mount_cmd_def = CliCommand::new(&API_METHOD_MOUNT)
|
|
||||||
.arg_param(&["snapshot", "archive-name", "target"])
|
|
||||||
.completion_cb("repository", complete_repository)
|
|
||||||
.completion_cb("snapshot", complete_group_or_snapshot)
|
|
||||||
.completion_cb("archive-name", complete_pxar_archive_name)
|
|
||||||
.completion_cb("target", tools::complete_file_name);
|
|
||||||
|
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
.insert("backup", backup_cmd_def)
|
.insert("backup", backup_cmd_def)
|
||||||
.insert("upload-log", upload_log_cmd_def)
|
.insert("upload-log", upload_log_cmd_def)
|
||||||
@ -2416,9 +2236,10 @@ fn main() {
|
|||||||
.insert("files", files_cmd_def)
|
.insert("files", files_cmd_def)
|
||||||
.insert("status", status_cmd_def)
|
.insert("status", status_cmd_def)
|
||||||
.insert("key", key_mgmt_cli())
|
.insert("key", key_mgmt_cli())
|
||||||
.insert("mount", mount_cmd_def)
|
.insert("mount", mount_cmd_def())
|
||||||
.insert("catalog", catalog_mgmt_cli())
|
.insert("catalog", catalog_mgmt_cli())
|
||||||
.insert("task", task_mgmt_cli());
|
.insert("task", task_mgmt_cli())
|
||||||
|
.insert("benchmark", benchmark_cmd_def);
|
||||||
|
|
||||||
let rpcenv = CliEnvironment::new();
|
let rpcenv = CliEnvironment::new();
|
||||||
run_cli_command(cmd_def, rpcenv, Some(|future| {
|
run_cli_command(cmd_def, rpcenv, Some(|future| {
|
||||||
|
@ -32,6 +32,24 @@ async fn view_task_result(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Note: local workers should print logs to stdout, so there is no need
|
||||||
|
// to fetch/display logs. We just wait for the worker to finish.
|
||||||
|
pub async fn wait_for_local_worker(upid_str: &str) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let upid: proxmox_backup::server::UPID = upid_str.parse()?;
|
||||||
|
|
||||||
|
let sleep_duration = core::time::Duration::new(0, 100_000_000);
|
||||||
|
|
||||||
|
loop {
|
||||||
|
if proxmox_backup::server::worker_is_active_local(&upid) {
|
||||||
|
tokio::time::delay_for(sleep_duration).await;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn connect() -> Result<HttpClient, Error> {
|
fn connect() -> Result<HttpClient, Error> {
|
||||||
|
|
||||||
let uid = nix::unistd::Uid::current();
|
let uid = nix::unistd::Uid::current();
|
||||||
@ -301,11 +319,48 @@ async fn pull_datastore(
|
|||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
"store": {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"output-format": {
|
||||||
|
schema: OUTPUT_FORMAT,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Verify backups
|
||||||
|
async fn verify(
|
||||||
|
store: String,
|
||||||
|
param: Value,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
|
let mut client = connect()?;
|
||||||
|
|
||||||
|
let args = json!({});
|
||||||
|
|
||||||
|
let path = format!("api2/json/admin/datastore/{}/verify", store);
|
||||||
|
|
||||||
|
let result = client.post(&path, Some(args)).await?;
|
||||||
|
|
||||||
|
view_task_result(client, result, &output_format).await?;
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
|
||||||
|
proxmox_backup::tools::setup_safe_path_env();
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
.insert("acl", acl_commands())
|
.insert("acl", acl_commands())
|
||||||
.insert("datastore", datastore_commands())
|
.insert("datastore", datastore_commands())
|
||||||
|
.insert("disk", disk_commands())
|
||||||
.insert("dns", dns_commands())
|
.insert("dns", dns_commands())
|
||||||
.insert("network", network_commands())
|
.insert("network", network_commands())
|
||||||
.insert("user", user_commands())
|
.insert("user", user_commands())
|
||||||
@ -321,8 +376,16 @@ fn main() {
|
|||||||
.completion_cb("local-store", config::datastore::complete_datastore_name)
|
.completion_cb("local-store", config::datastore::complete_datastore_name)
|
||||||
.completion_cb("remote", config::remote::complete_remote_name)
|
.completion_cb("remote", config::remote::complete_remote_name)
|
||||||
.completion_cb("remote-store", complete_remote_datastore_name)
|
.completion_cb("remote-store", complete_remote_datastore_name)
|
||||||
|
)
|
||||||
|
.insert(
|
||||||
|
"verify",
|
||||||
|
CliCommand::new(&API_METHOD_VERIFY)
|
||||||
|
.arg_param(&["store"])
|
||||||
|
.completion_cb("store", config::datastore::complete_datastore_name)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
let mut rpcenv = CliEnvironment::new();
|
let mut rpcenv = CliEnvironment::new();
|
||||||
rpcenv.set_user(Some(String::from("root@pam")));
|
rpcenv.set_user(Some(String::from("root@pam")));
|
||||||
|
|
||||||
|
@ -12,12 +12,14 @@ use proxmox::api::RpcEnvironmentType;
|
|||||||
use proxmox_backup::configdir;
|
use proxmox_backup::configdir;
|
||||||
use proxmox_backup::buildcfg;
|
use proxmox_backup::buildcfg;
|
||||||
use proxmox_backup::server;
|
use proxmox_backup::server;
|
||||||
use proxmox_backup::tools::daemon;
|
use proxmox_backup::tools::{daemon, epoch_now, epoch_now_u64};
|
||||||
use proxmox_backup::server::{ApiConfig, rest::*};
|
use proxmox_backup::server::{ApiConfig, rest::*};
|
||||||
use proxmox_backup::auth_helpers::*;
|
use proxmox_backup::auth_helpers::*;
|
||||||
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
|
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
proxmox_backup::tools::setup_safe_path_env();
|
||||||
|
|
||||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||||
eprintln!("Error: {}", err);
|
eprintln!("Error: {}", err);
|
||||||
std::process::exit(-1);
|
std::process::exit(-1);
|
||||||
@ -134,10 +136,10 @@ fn start_task_scheduler() {
|
|||||||
tokio::spawn(task.map(|_| ()));
|
tokio::spawn(task.map(|_| ()));
|
||||||
}
|
}
|
||||||
|
|
||||||
use std::time:: {Instant, Duration, SystemTime, UNIX_EPOCH};
|
use std::time:: {Instant, Duration};
|
||||||
|
|
||||||
fn next_minute() -> Result<Instant, Error> {
|
fn next_minute() -> Result<Instant, Error> {
|
||||||
let epoch_now = SystemTime::now().duration_since(UNIX_EPOCH)?;
|
let epoch_now = epoch_now()?;
|
||||||
let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
|
let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
|
||||||
Ok(Instant::now() + epoch_next - epoch_now)
|
Ok(Instant::now() + epoch_next - epoch_now)
|
||||||
}
|
}
|
||||||
@ -296,8 +298,9 @@ async fn schedule_datastore_garbage_collection() {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
|
|
||||||
Ok(epoch_now) => epoch_now.as_secs() as i64,
|
let now = match epoch_now_u64() {
|
||||||
|
Ok(epoch_now) => epoch_now as i64,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("query system time failed - {}", err);
|
eprintln!("query system time failed - {}", err);
|
||||||
continue;
|
continue;
|
||||||
@ -407,8 +410,8 @@ async fn schedule_datastore_prune() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
|
let now = match epoch_now_u64() {
|
||||||
Ok(epoch_now) => epoch_now.as_secs() as i64,
|
Ok(epoch_now) => epoch_now as i64,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("query system time failed - {}", err);
|
eprintln!("query system time failed - {}", err);
|
||||||
continue;
|
continue;
|
||||||
@ -532,8 +535,8 @@ async fn schedule_datastore_sync_jobs() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
|
let now = match epoch_now_u64() {
|
||||||
Ok(epoch_now) => epoch_now.as_secs() as i64,
|
Ok(epoch_now) => epoch_now as i64,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("query system time failed - {}", err);
|
eprintln!("query system time failed - {}", err);
|
||||||
continue;
|
continue;
|
||||||
@ -711,11 +714,11 @@ async fn generate_host_stats(save: bool) {
|
|||||||
fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) {
|
fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) {
|
||||||
|
|
||||||
match proxmox_backup::tools::disks::disk_usage(path) {
|
match proxmox_backup::tools::disks::disk_usage(path) {
|
||||||
Ok((total, used, _avail)) => {
|
Ok(status) => {
|
||||||
let rrd_key = format!("{}/total", rrd_prefix);
|
let rrd_key = format!("{}/total", rrd_prefix);
|
||||||
rrd_update_gauge(&rrd_key, total as f64, save);
|
rrd_update_gauge(&rrd_key, status.total as f64, save);
|
||||||
let rrd_key = format!("{}/used", rrd_prefix);
|
let rrd_key = format!("{}/used", rrd_prefix);
|
||||||
rrd_update_gauge(&rrd_key, used as f64, save);
|
rrd_update_gauge(&rrd_key, status.used as f64, save);
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("read disk_usage on {:?} failed - {}", path, err);
|
eprintln!("read disk_usage on {:?} failed - {}", path, err);
|
||||||
|
82
src/bin/proxmox_backup_client/benchmark.rs
Normal file
82
src/bin/proxmox_backup_client/benchmark.rs
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use anyhow::{Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
use chrono::{TimeZone, Utc};
|
||||||
|
|
||||||
|
use proxmox::api::{ApiMethod, RpcEnvironment};
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
|
use proxmox_backup::backup::{
|
||||||
|
load_and_decrypt_key,
|
||||||
|
CryptConfig,
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
use proxmox_backup::client::*;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
KEYFILE_SCHEMA, REPO_URL_SCHEMA,
|
||||||
|
extract_repository_from_value,
|
||||||
|
get_encryption_key_password,
|
||||||
|
record_repository,
|
||||||
|
connect,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
keyfile: {
|
||||||
|
schema: KEYFILE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Run benchmark tests
|
||||||
|
pub async fn benchmark(
|
||||||
|
param: Value,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
|
||||||
|
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||||
|
|
||||||
|
let crypt_config = match keyfile {
|
||||||
|
None => None,
|
||||||
|
Some(path) => {
|
||||||
|
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||||
|
let crypt_config = CryptConfig::new(key)?;
|
||||||
|
Some(Arc::new(crypt_config))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let backup_time = Utc.timestamp(Utc::now().timestamp(), 0);
|
||||||
|
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
record_repository(&repo);
|
||||||
|
|
||||||
|
let client = BackupWriter::start(
|
||||||
|
client,
|
||||||
|
crypt_config.clone(),
|
||||||
|
repo.store(),
|
||||||
|
"host",
|
||||||
|
"benshmark",
|
||||||
|
backup_time,
|
||||||
|
false,
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
println!("Start upload speed test");
|
||||||
|
let speed = client.upload_speedtest().await?;
|
||||||
|
|
||||||
|
println!("Upload speed: {} MiB/s", speed);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
6
src/bin/proxmox_backup_client/mod.rs
Normal file
6
src/bin/proxmox_backup_client/mod.rs
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
mod benchmark;
|
||||||
|
pub use benchmark::*;
|
||||||
|
mod mount;
|
||||||
|
pub use mount::*;
|
||||||
|
mod task;
|
||||||
|
pub use task::*;
|
196
src/bin/proxmox_backup_client/mount.rs
Normal file
196
src/bin/proxmox_backup_client/mount.rs
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::os::unix::io::RawFd;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::ffi::OsStr;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
use tokio::signal::unix::{signal, SignalKind};
|
||||||
|
use nix::unistd::{fork, ForkResult, pipe};
|
||||||
|
use futures::select;
|
||||||
|
use futures::future::FutureExt;
|
||||||
|
|
||||||
|
use proxmox::{sortable, identity};
|
||||||
|
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment, schema::*, cli::*};
|
||||||
|
|
||||||
|
|
||||||
|
use proxmox_backup::tools;
|
||||||
|
use proxmox_backup::backup::{
|
||||||
|
load_and_decrypt_key,
|
||||||
|
CryptConfig,
|
||||||
|
IndexFile,
|
||||||
|
BackupDir,
|
||||||
|
BackupGroup,
|
||||||
|
BufferedDynamicReader,
|
||||||
|
};
|
||||||
|
|
||||||
|
use proxmox_backup::client::*;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
REPO_URL_SCHEMA,
|
||||||
|
extract_repository_from_value,
|
||||||
|
get_encryption_key_password,
|
||||||
|
complete_pxar_archive_name,
|
||||||
|
complete_group_or_snapshot,
|
||||||
|
complete_repository,
|
||||||
|
record_repository,
|
||||||
|
connect,
|
||||||
|
api_datastore_latest_snapshot,
|
||||||
|
BufferedDynamicReadAt,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[sortable]
|
||||||
|
const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
|
||||||
|
&ApiHandler::Sync(&mount),
|
||||||
|
&ObjectSchema::new(
|
||||||
|
"Mount pxar archive.",
|
||||||
|
&sorted!([
|
||||||
|
("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
|
||||||
|
("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
|
||||||
|
("target", false, &StringSchema::new("Target directory path.").schema()),
|
||||||
|
("repository", true, &REPO_URL_SCHEMA),
|
||||||
|
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
|
||||||
|
("verbose", true, &BooleanSchema::new("Verbose output.").default(false).schema()),
|
||||||
|
]),
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
pub fn mount_cmd_def() -> CliCommand {
|
||||||
|
|
||||||
|
CliCommand::new(&API_METHOD_MOUNT)
|
||||||
|
.arg_param(&["snapshot", "archive-name", "target"])
|
||||||
|
.completion_cb("repository", complete_repository)
|
||||||
|
.completion_cb("snapshot", complete_group_or_snapshot)
|
||||||
|
.completion_cb("archive-name", complete_pxar_archive_name)
|
||||||
|
.completion_cb("target", tools::complete_file_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mount(
|
||||||
|
param: Value,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||||
|
if verbose {
|
||||||
|
// This will stay in foreground with debug output enabled as None is
|
||||||
|
// passed for the RawFd.
|
||||||
|
return proxmox_backup::tools::runtime::main(mount_do(param, None));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process should be deamonized.
|
||||||
|
// Make sure to fork before the async runtime is instantiated to avoid troubles.
|
||||||
|
let pipe = pipe()?;
|
||||||
|
match fork() {
|
||||||
|
Ok(ForkResult::Parent { .. }) => {
|
||||||
|
nix::unistd::close(pipe.1).unwrap();
|
||||||
|
// Blocks the parent process until we are ready to go in the child
|
||||||
|
let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
Ok(ForkResult::Child) => {
|
||||||
|
nix::unistd::close(pipe.0).unwrap();
|
||||||
|
nix::unistd::setsid().unwrap();
|
||||||
|
proxmox_backup::tools::runtime::main(mount_do(param, Some(pipe.1)))
|
||||||
|
}
|
||||||
|
Err(_) => bail!("failed to daemonize process"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||||
|
let target = tools::required_string_param(¶m, "target")?;
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
|
record_repository(&repo);
|
||||||
|
|
||||||
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||||
|
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||||
|
let group: BackupGroup = path.parse()?;
|
||||||
|
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||||
|
} else {
|
||||||
|
let snapshot: BackupDir = path.parse()?;
|
||||||
|
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||||
|
};
|
||||||
|
|
||||||
|
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||||
|
let crypt_config = match keyfile {
|
||||||
|
None => None,
|
||||||
|
Some(path) => {
|
||||||
|
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||||
|
Some(Arc::new(CryptConfig::new(key)?))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||||
|
format!("{}.didx", archive_name)
|
||||||
|
} else {
|
||||||
|
bail!("Can only mount pxar archives.");
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = BackupReader::start(
|
||||||
|
client,
|
||||||
|
crypt_config.clone(),
|
||||||
|
repo.store(),
|
||||||
|
&backup_type,
|
||||||
|
&backup_id,
|
||||||
|
backup_time,
|
||||||
|
true,
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
let manifest = client.download_manifest().await?;
|
||||||
|
|
||||||
|
if server_archive_name.ends_with(".didx") {
|
||||||
|
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||||
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||||
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
let archive_size = reader.archive_size();
|
||||||
|
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||||
|
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||||
|
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||||
|
let options = OsStr::new("ro,default_permissions");
|
||||||
|
|
||||||
|
let session = proxmox_backup::pxar::fuse::Session::mount(
|
||||||
|
decoder,
|
||||||
|
&options,
|
||||||
|
false,
|
||||||
|
Path::new(target),
|
||||||
|
)
|
||||||
|
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||||
|
|
||||||
|
if let Some(pipe) = pipe {
|
||||||
|
nix::unistd::chdir(Path::new("/")).unwrap();
|
||||||
|
// Finish creation of daemon by redirecting filedescriptors.
|
||||||
|
let nullfd = nix::fcntl::open(
|
||||||
|
"/dev/null",
|
||||||
|
nix::fcntl::OFlag::O_RDWR,
|
||||||
|
nix::sys::stat::Mode::empty(),
|
||||||
|
).unwrap();
|
||||||
|
nix::unistd::dup2(nullfd, 0).unwrap();
|
||||||
|
nix::unistd::dup2(nullfd, 1).unwrap();
|
||||||
|
nix::unistd::dup2(nullfd, 2).unwrap();
|
||||||
|
if nullfd > 2 {
|
||||||
|
nix::unistd::close(nullfd).unwrap();
|
||||||
|
}
|
||||||
|
// Signal the parent process that we are done with the setup and it can
|
||||||
|
// terminate.
|
||||||
|
nix::unistd::write(pipe, &[0u8])?;
|
||||||
|
nix::unistd::close(pipe).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut interrupt = signal(SignalKind::interrupt())?;
|
||||||
|
select! {
|
||||||
|
res = session.fuse() => res?,
|
||||||
|
_ = interrupt.recv().fuse() => {
|
||||||
|
// exit on interrupted
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
bail!("unknown archive file extension (expected .pxar)");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
148
src/bin/proxmox_backup_client/task.rs
Normal file
148
src/bin/proxmox_backup_client/task.rs
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
use anyhow::{Error};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
|
use proxmox::api::{api, cli::*};
|
||||||
|
|
||||||
|
use proxmox_backup::tools;
|
||||||
|
|
||||||
|
use proxmox_backup::client::*;
|
||||||
|
use proxmox_backup::api2::types::UPID_SCHEMA;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
REPO_URL_SCHEMA,
|
||||||
|
extract_repository_from_value,
|
||||||
|
complete_repository,
|
||||||
|
connect,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
limit: {
|
||||||
|
description: "The maximal number of tasks to list.",
|
||||||
|
type: Integer,
|
||||||
|
optional: true,
|
||||||
|
minimum: 1,
|
||||||
|
maximum: 1000,
|
||||||
|
default: 50,
|
||||||
|
},
|
||||||
|
"output-format": {
|
||||||
|
schema: OUTPUT_FORMAT,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
all: {
|
||||||
|
type: Boolean,
|
||||||
|
description: "Also list stopped tasks.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// List running server tasks for this repo user
|
||||||
|
async fn task_list(param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
|
let limit = param["limit"].as_u64().unwrap_or(50) as usize;
|
||||||
|
let running = !param["all"].as_bool().unwrap_or(false);
|
||||||
|
|
||||||
|
let args = json!({
|
||||||
|
"running": running,
|
||||||
|
"start": 0,
|
||||||
|
"limit": limit,
|
||||||
|
"userfilter": repo.user(),
|
||||||
|
"store": repo.store(),
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
|
||||||
|
let mut data = result["data"].take();
|
||||||
|
|
||||||
|
let schema = &proxmox_backup::api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
|
||||||
|
|
||||||
|
let options = default_table_format_options()
|
||||||
|
.column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
|
||||||
|
.column(ColumnConfig::new("endtime").right_align(false).renderer(tools::format::render_epoch))
|
||||||
|
.column(ColumnConfig::new("upid"))
|
||||||
|
.column(ColumnConfig::new("status").renderer(tools::format::render_task_status));
|
||||||
|
|
||||||
|
format_and_print_result_full(&mut data, schema, &output_format, &options);
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
upid: {
|
||||||
|
schema: UPID_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Display the task log.
|
||||||
|
async fn task_log(param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
let upid = tools::required_string_param(¶m, "upid")?;
|
||||||
|
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
|
display_task_log(client, upid, true).await?;
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
upid: {
|
||||||
|
schema: UPID_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Try to stop a specific task.
|
||||||
|
async fn task_stop(param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
let upid_str = tools::required_string_param(¶m, "upid")?;
|
||||||
|
|
||||||
|
let mut client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
|
let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
|
||||||
|
let _ = client.delete(&path, None).await?;
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn task_mgmt_cli() -> CliCommandMap {
|
||||||
|
|
||||||
|
let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
|
||||||
|
.completion_cb("repository", complete_repository);
|
||||||
|
|
||||||
|
let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
|
||||||
|
.arg_param(&["upid"]);
|
||||||
|
|
||||||
|
let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
|
||||||
|
.arg_param(&["upid"]);
|
||||||
|
|
||||||
|
CliCommandMap::new()
|
||||||
|
.insert("log", task_log_cmd_def)
|
||||||
|
.insert("list", task_list_cmd_def)
|
||||||
|
.insert("stop", task_stop_cmd_def)
|
||||||
|
}
|
@ -86,7 +86,7 @@ pub fn datastore_commands() -> CommandLineInterface {
|
|||||||
.completion_cb("name", config::datastore::complete_datastore_name)
|
.completion_cb("name", config::datastore::complete_datastore_name)
|
||||||
.completion_cb("gc-schedule", config::datastore::complete_calendar_event)
|
.completion_cb("gc-schedule", config::datastore::complete_calendar_event)
|
||||||
.completion_cb("prune-schedule", config::datastore::complete_calendar_event)
|
.completion_cb("prune-schedule", config::datastore::complete_calendar_event)
|
||||||
)
|
)
|
||||||
.insert("remove",
|
.insert("remove",
|
||||||
CliCommand::new(&api2::config::datastore::API_METHOD_DELETE_DATASTORE)
|
CliCommand::new(&api2::config::datastore::API_METHOD_DELETE_DATASTORE)
|
||||||
.arg_param(&["name"])
|
.arg_param(&["name"])
|
||||||
|
353
src/bin/proxmox_backup_manager/disk.rs
Normal file
353
src/bin/proxmox_backup_manager/disk.rs
Normal file
@ -0,0 +1,353 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||||
|
|
||||||
|
use proxmox_backup::tools::disks::{
|
||||||
|
FileSystemType,
|
||||||
|
SmartAttribute,
|
||||||
|
complete_disk_name,
|
||||||
|
};
|
||||||
|
|
||||||
|
use proxmox_backup::api2::node::disks::{
|
||||||
|
zfs::DISK_LIST_SCHEMA,
|
||||||
|
zfs::ZFS_ASHIFT_SCHEMA,
|
||||||
|
zfs::ZfsRaidLevel,
|
||||||
|
zfs::ZfsCompressionType,
|
||||||
|
};
|
||||||
|
|
||||||
|
use proxmox_backup::api2::{self, types::* };
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
"output-format": {
|
||||||
|
schema: OUTPUT_FORMAT,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Local disk list.
|
||||||
|
fn list_disks(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
|
param["node"] = "localhost".into();
|
||||||
|
|
||||||
|
let info = &api2::node::disks::API_METHOD_LIST_DISKS;
|
||||||
|
let mut data = match info.handler {
|
||||||
|
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let render_wearout = |value: &Value, _record: &Value| -> Result<String, Error> {
|
||||||
|
match value.as_f64() {
|
||||||
|
Some(value) => Ok(format!("{:.2} %", if value <= 100.0 { 100.0 - value } else { 0.0 })),
|
||||||
|
None => Ok(String::from("-")),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let options = default_table_format_options()
|
||||||
|
.column(ColumnConfig::new("name"))
|
||||||
|
.column(ColumnConfig::new("used"))
|
||||||
|
.column(ColumnConfig::new("gpt"))
|
||||||
|
.column(ColumnConfig::new("disk-type"))
|
||||||
|
.column(ColumnConfig::new("size"))
|
||||||
|
.column(ColumnConfig::new("model"))
|
||||||
|
.column(ColumnConfig::new("wearout").renderer(render_wearout))
|
||||||
|
.column(ColumnConfig::new("status"))
|
||||||
|
;
|
||||||
|
|
||||||
|
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
disk: {
|
||||||
|
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
"output-format": {
|
||||||
|
schema: OUTPUT_FORMAT,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "SMART attributes.",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: SmartAttribute,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Show SMART attributes.
|
||||||
|
fn smart_attributes(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
|
param["node"] = "localhost".into();
|
||||||
|
|
||||||
|
let info = &api2::node::disks::API_METHOD_SMART_STATUS;
|
||||||
|
let mut data = match info.handler {
|
||||||
|
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut data = data["attributes"].take();
|
||||||
|
|
||||||
|
let options = default_table_format_options();
|
||||||
|
format_and_print_result_full(&mut data, API_METHOD_SMART_ATTRIBUTES.returns, &output_format, &options);
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
disk: {
|
||||||
|
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
uuid: {
|
||||||
|
description: "UUID for the GPT table.",
|
||||||
|
type: String,
|
||||||
|
optional: true,
|
||||||
|
max_length: 36,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Initialize empty Disk with GPT
|
||||||
|
async fn initialize_disk(
|
||||||
|
mut param: Value,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
param["node"] = "localhost".into();
|
||||||
|
|
||||||
|
let info = &api2::node::disks::API_METHOD_INITIALIZE_DISK;
|
||||||
|
let result = match info.handler {
|
||||||
|
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
crate::wait_for_local_worker(result.as_str().unwrap()).await?;
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
devices: {
|
||||||
|
schema: DISK_LIST_SCHEMA,
|
||||||
|
},
|
||||||
|
raidlevel: {
|
||||||
|
type: ZfsRaidLevel,
|
||||||
|
},
|
||||||
|
ashift: {
|
||||||
|
schema: ZFS_ASHIFT_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
compression: {
|
||||||
|
type: ZfsCompressionType,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"add-datastore": {
|
||||||
|
description: "Configure a datastore using the zpool.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// create a zfs pool
|
||||||
|
async fn create_zpool(
|
||||||
|
mut param: Value,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
param["node"] = "localhost".into();
|
||||||
|
|
||||||
|
let info = &api2::node::disks::zfs::API_METHOD_CREATE_ZPOOL;
|
||||||
|
let result = match info.handler {
|
||||||
|
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
crate::wait_for_local_worker(result.as_str().unwrap()).await?;
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
"output-format": {
|
||||||
|
schema: OUTPUT_FORMAT,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Local zfs pools.
|
||||||
|
fn list_zpools(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
|
param["node"] = "localhost".into();
|
||||||
|
|
||||||
|
let info = &api2::node::disks::zfs::API_METHOD_LIST_ZPOOLS;
|
||||||
|
let mut data = match info.handler {
|
||||||
|
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let render_usage = |value: &Value, record: &Value| -> Result<String, Error> {
|
||||||
|
let value = value.as_u64().unwrap_or(0);
|
||||||
|
let size = match record["size"].as_u64() {
|
||||||
|
Some(size) => size,
|
||||||
|
None => bail!("missing size property"),
|
||||||
|
};
|
||||||
|
if size == 0 {
|
||||||
|
bail!("got zero size");
|
||||||
|
}
|
||||||
|
Ok(format!("{:.2} %", (value as f64)/(size as f64)))
|
||||||
|
};
|
||||||
|
|
||||||
|
let options = default_table_format_options()
|
||||||
|
.column(ColumnConfig::new("name"))
|
||||||
|
.column(ColumnConfig::new("size"))
|
||||||
|
.column(ColumnConfig::new("alloc").right_align(true).renderer(render_usage))
|
||||||
|
.column(ColumnConfig::new("health"));
|
||||||
|
|
||||||
|
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn zpool_commands() -> CommandLineInterface {
|
||||||
|
|
||||||
|
let cmd_def = CliCommandMap::new()
|
||||||
|
.insert("list", CliCommand::new(&API_METHOD_LIST_ZPOOLS))
|
||||||
|
.insert("create",
|
||||||
|
CliCommand::new(&API_METHOD_CREATE_ZPOOL)
|
||||||
|
.arg_param(&["name"])
|
||||||
|
.completion_cb("devices", complete_disk_name) // fixme: comlete the list
|
||||||
|
);
|
||||||
|
|
||||||
|
cmd_def.into()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
"output-format": {
|
||||||
|
schema: OUTPUT_FORMAT,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// List systemd datastore mount units.
|
||||||
|
fn list_datastore_mounts(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
|
param["node"] = "localhost".into();
|
||||||
|
|
||||||
|
let info = &api2::node::disks::directory::API_METHOD_LIST_DATASTORE_MOUNTS;
|
||||||
|
let mut data = match info.handler {
|
||||||
|
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let options = default_table_format_options()
|
||||||
|
.column(ColumnConfig::new("path"))
|
||||||
|
.column(ColumnConfig::new("device"))
|
||||||
|
.column(ColumnConfig::new("filesystem"))
|
||||||
|
.column(ColumnConfig::new("options"));
|
||||||
|
|
||||||
|
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
disk: {
|
||||||
|
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
"add-datastore": {
|
||||||
|
description: "Configure a datastore using the directory.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
filesystem: {
|
||||||
|
type: FileSystemType,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Create a Filesystem on an unused disk. Will be mounted under '/mnt/datastore/<name>'.
|
||||||
|
async fn create_datastore_disk(
|
||||||
|
mut param: Value,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
param["node"] = "localhost".into();
|
||||||
|
|
||||||
|
let info = &api2::node::disks::directory::API_METHOD_CREATE_DATASTORE_DISK;
|
||||||
|
let result = match info.handler {
|
||||||
|
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
crate::wait_for_local_worker(result.as_str().unwrap()).await?;
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn filesystem_commands() -> CommandLineInterface {
|
||||||
|
|
||||||
|
let cmd_def = CliCommandMap::new()
|
||||||
|
.insert("list", CliCommand::new(&API_METHOD_LIST_DATASTORE_MOUNTS))
|
||||||
|
.insert("create",
|
||||||
|
CliCommand::new(&API_METHOD_CREATE_DATASTORE_DISK)
|
||||||
|
.arg_param(&["name"])
|
||||||
|
.completion_cb("disk", complete_disk_name)
|
||||||
|
);
|
||||||
|
|
||||||
|
cmd_def.into()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn disk_commands() -> CommandLineInterface {
|
||||||
|
|
||||||
|
let cmd_def = CliCommandMap::new()
|
||||||
|
.insert("list", CliCommand::new(&API_METHOD_LIST_DISKS))
|
||||||
|
.insert("smart-attributes",
|
||||||
|
CliCommand::new(&API_METHOD_SMART_ATTRIBUTES)
|
||||||
|
.arg_param(&["disk"])
|
||||||
|
.completion_cb("disk", complete_disk_name)
|
||||||
|
)
|
||||||
|
.insert("fs", filesystem_commands())
|
||||||
|
.insert("zpool", zpool_commands())
|
||||||
|
.insert("initialize",
|
||||||
|
CliCommand::new(&API_METHOD_INITIALIZE_DISK)
|
||||||
|
.arg_param(&["disk"])
|
||||||
|
.completion_cb("disk", complete_disk_name)
|
||||||
|
);
|
||||||
|
|
||||||
|
cmd_def.into()
|
||||||
|
}
|
@ -14,3 +14,5 @@ mod sync;
|
|||||||
pub use sync::*;
|
pub use sync::*;
|
||||||
mod user;
|
mod user;
|
||||||
pub use user::*;
|
pub use user::*;
|
||||||
|
mod disk;
|
||||||
|
pub use disk::*;
|
||||||
|
800
src/bin/pxar.rs
800
src/bin/pxar.rs
@ -1,191 +1,305 @@
|
|||||||
extern crate proxmox_backup;
|
use std::collections::HashSet;
|
||||||
|
use std::ffi::OsStr;
|
||||||
|
use std::fs::OpenOptions;
|
||||||
|
use std::os::unix::fs::OpenOptionsExt;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
|
use futures::future::FutureExt;
|
||||||
|
use futures::select;
|
||||||
|
use tokio::signal::unix::{signal, SignalKind};
|
||||||
|
|
||||||
|
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
|
||||||
|
|
||||||
use proxmox::{sortable, identity};
|
|
||||||
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
|
|
||||||
use proxmox::api::schema::*;
|
|
||||||
use proxmox::api::cli::*;
|
use proxmox::api::cli::*;
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
use proxmox_backup::tools;
|
use proxmox_backup::tools;
|
||||||
|
use proxmox_backup::pxar::{fuse, format_single_line_entry, ENCODER_MAX_ENTRIES, Flags};
|
||||||
use serde_json::{Value};
|
|
||||||
|
|
||||||
use std::io::Write;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::fs::OpenOptions;
|
|
||||||
use std::ffi::OsStr;
|
|
||||||
use std::os::unix::fs::OpenOptionsExt;
|
|
||||||
use std::os::unix::io::AsRawFd;
|
|
||||||
use std::collections::HashSet;
|
|
||||||
|
|
||||||
use proxmox_backup::pxar;
|
|
||||||
|
|
||||||
fn dump_archive_from_reader<R: std::io::Read>(
|
|
||||||
reader: &mut R,
|
|
||||||
feature_flags: u64,
|
|
||||||
verbose: bool,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut decoder = pxar::SequentialDecoder::new(reader, feature_flags);
|
|
||||||
|
|
||||||
let stdout = std::io::stdout();
|
|
||||||
let mut out = stdout.lock();
|
|
||||||
|
|
||||||
let mut path = PathBuf::new();
|
|
||||||
decoder.dump_entry(&mut path, verbose, &mut out)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn dump_archive(
|
|
||||||
param: Value,
|
|
||||||
_info: &ApiMethod,
|
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
|
||||||
) -> Result<Value, Error> {
|
|
||||||
|
|
||||||
let archive = tools::required_string_param(¶m, "archive")?;
|
|
||||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
|
||||||
|
|
||||||
let feature_flags = pxar::flags::DEFAULT;
|
|
||||||
|
|
||||||
if archive == "-" {
|
|
||||||
let stdin = std::io::stdin();
|
|
||||||
let mut reader = stdin.lock();
|
|
||||||
dump_archive_from_reader(&mut reader, feature_flags, verbose)?;
|
|
||||||
} else {
|
|
||||||
if verbose { println!("PXAR dump: {}", archive); }
|
|
||||||
let file = std::fs::File::open(archive)?;
|
|
||||||
let mut reader = std::io::BufReader::new(file);
|
|
||||||
dump_archive_from_reader(&mut reader, feature_flags, verbose)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Value::Null)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn extract_archive_from_reader<R: std::io::Read>(
|
fn extract_archive_from_reader<R: std::io::Read>(
|
||||||
reader: &mut R,
|
reader: &mut R,
|
||||||
target: &str,
|
target: &str,
|
||||||
feature_flags: u64,
|
feature_flags: Flags,
|
||||||
allow_existing_dirs: bool,
|
allow_existing_dirs: bool,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
pattern: Option<Vec<pxar::MatchPattern>>
|
match_list: &[MatchEntry],
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut decoder = pxar::SequentialDecoder::new(reader, feature_flags);
|
proxmox_backup::pxar::extract_archive(
|
||||||
decoder.set_callback(move |path| {
|
pxar::decoder::Decoder::from_std(reader)?,
|
||||||
if verbose {
|
Path::new(target),
|
||||||
println!("{:?}", path);
|
&match_list,
|
||||||
}
|
feature_flags,
|
||||||
Ok(())
|
allow_existing_dirs,
|
||||||
});
|
|path| {
|
||||||
decoder.set_allow_existing_dirs(allow_existing_dirs);
|
if verbose {
|
||||||
|
println!("{:?}", path);
|
||||||
let pattern = pattern.unwrap_or_else(Vec::new);
|
}
|
||||||
decoder.restore(Path::new(target), &pattern)?;
|
},
|
||||||
|
)
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
archive: {
|
||||||
|
description: "Archive name.",
|
||||||
|
},
|
||||||
|
pattern: {
|
||||||
|
description: "List of paths or pattern matching files to restore",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: String,
|
||||||
|
description: "Path or pattern matching files to restore.",
|
||||||
|
},
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
target: {
|
||||||
|
description: "Target directory",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
verbose: {
|
||||||
|
description: "Verbose output.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
"no-xattrs": {
|
||||||
|
description: "Ignore extended file attributes.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
"no-fcaps": {
|
||||||
|
description: "Ignore file capabilities.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
"no-acls": {
|
||||||
|
description: "Ignore access control list entries.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
"allow-existing-dirs": {
|
||||||
|
description: "Allows directories to already exist on restore.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
"files-from": {
|
||||||
|
description: "File containing match pattern for files to restore.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"no-device-nodes": {
|
||||||
|
description: "Ignore device nodes.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
"no-fifos": {
|
||||||
|
description: "Ignore fifos.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
"no-sockets": {
|
||||||
|
description: "Ignore sockets.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Extract an archive.
|
||||||
fn extract_archive(
|
fn extract_archive(
|
||||||
param: Value,
|
archive: String,
|
||||||
_info: &ApiMethod,
|
pattern: Option<Vec<String>>,
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
target: Option<String>,
|
||||||
) -> Result<Value, Error> {
|
verbose: bool,
|
||||||
|
no_xattrs: bool,
|
||||||
let archive = tools::required_string_param(¶m, "archive")?;
|
no_fcaps: bool,
|
||||||
let target = param["target"].as_str().unwrap_or(".");
|
no_acls: bool,
|
||||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
allow_existing_dirs: bool,
|
||||||
let no_xattrs = param["no-xattrs"].as_bool().unwrap_or(false);
|
files_from: Option<String>,
|
||||||
let no_fcaps = param["no-fcaps"].as_bool().unwrap_or(false);
|
no_device_nodes: bool,
|
||||||
let no_acls = param["no-acls"].as_bool().unwrap_or(false);
|
no_fifos: bool,
|
||||||
let no_device_nodes = param["no-device-nodes"].as_bool().unwrap_or(false);
|
no_sockets: bool,
|
||||||
let no_fifos = param["no-fifos"].as_bool().unwrap_or(false);
|
) -> Result<(), Error> {
|
||||||
let no_sockets = param["no-sockets"].as_bool().unwrap_or(false);
|
let mut feature_flags = Flags::DEFAULT;
|
||||||
let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
|
|
||||||
let files_from = param["files-from"].as_str();
|
|
||||||
let empty = Vec::new();
|
|
||||||
let arg_pattern = param["pattern"].as_array().unwrap_or(&empty);
|
|
||||||
|
|
||||||
let mut feature_flags = pxar::flags::DEFAULT;
|
|
||||||
if no_xattrs {
|
if no_xattrs {
|
||||||
feature_flags ^= pxar::flags::WITH_XATTRS;
|
feature_flags ^= Flags::WITH_XATTRS;
|
||||||
}
|
}
|
||||||
if no_fcaps {
|
if no_fcaps {
|
||||||
feature_flags ^= pxar::flags::WITH_FCAPS;
|
feature_flags ^= Flags::WITH_FCAPS;
|
||||||
}
|
}
|
||||||
if no_acls {
|
if no_acls {
|
||||||
feature_flags ^= pxar::flags::WITH_ACL;
|
feature_flags ^= Flags::WITH_ACL;
|
||||||
}
|
}
|
||||||
if no_device_nodes {
|
if no_device_nodes {
|
||||||
feature_flags ^= pxar::flags::WITH_DEVICE_NODES;
|
feature_flags ^= Flags::WITH_DEVICE_NODES;
|
||||||
}
|
}
|
||||||
if no_fifos {
|
if no_fifos {
|
||||||
feature_flags ^= pxar::flags::WITH_FIFOS;
|
feature_flags ^= Flags::WITH_FIFOS;
|
||||||
}
|
}
|
||||||
if no_sockets {
|
if no_sockets {
|
||||||
feature_flags ^= pxar::flags::WITH_SOCKETS;
|
feature_flags ^= Flags::WITH_SOCKETS;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut pattern_list = Vec::new();
|
let pattern = pattern.unwrap_or_else(Vec::new);
|
||||||
if let Some(filename) = files_from {
|
let target = target.as_ref().map_or_else(|| ".", String::as_str);
|
||||||
let dir = nix::dir::Dir::open("./", nix::fcntl::OFlag::O_RDONLY, nix::sys::stat::Mode::empty())?;
|
|
||||||
if let Some((mut pattern, _, _)) = pxar::MatchPattern::from_file(dir.as_raw_fd(), filename)? {
|
let mut match_list = Vec::new();
|
||||||
pattern_list.append(&mut pattern);
|
if let Some(filename) = &files_from {
|
||||||
|
for line in proxmox_backup::tools::file_get_non_comment_lines(filename)? {
|
||||||
|
let line = line
|
||||||
|
.map_err(|err| format_err!("error reading {}: {}", filename, err))?;
|
||||||
|
match_list.push(
|
||||||
|
MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, MatchType::Include)
|
||||||
|
.map_err(|err| format_err!("bad pattern in file '{}': {}", filename, err))?,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for s in arg_pattern {
|
for entry in pattern {
|
||||||
let l = s.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
|
match_list.push(
|
||||||
let p = pxar::MatchPattern::from_line(l.as_bytes())?
|
MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Include)
|
||||||
.ok_or_else(|| format_err!("Invalid match pattern in arguments"))?;
|
.map_err(|err| format_err!("error in pattern: {}", err))?,
|
||||||
pattern_list.push(p);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let pattern = if pattern_list.is_empty() {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(pattern_list)
|
|
||||||
};
|
|
||||||
|
|
||||||
if archive == "-" {
|
if archive == "-" {
|
||||||
let stdin = std::io::stdin();
|
let stdin = std::io::stdin();
|
||||||
let mut reader = stdin.lock();
|
let mut reader = stdin.lock();
|
||||||
extract_archive_from_reader(&mut reader, target, feature_flags, allow_existing_dirs, verbose, pattern)?;
|
extract_archive_from_reader(
|
||||||
|
&mut reader,
|
||||||
|
&target,
|
||||||
|
feature_flags,
|
||||||
|
allow_existing_dirs,
|
||||||
|
verbose,
|
||||||
|
&match_list,
|
||||||
|
)?;
|
||||||
} else {
|
} else {
|
||||||
if verbose { println!("PXAR extract: {}", archive); }
|
if verbose {
|
||||||
|
println!("PXAR extract: {}", archive);
|
||||||
|
}
|
||||||
let file = std::fs::File::open(archive)?;
|
let file = std::fs::File::open(archive)?;
|
||||||
let mut reader = std::io::BufReader::new(file);
|
let mut reader = std::io::BufReader::new(file);
|
||||||
extract_archive_from_reader(&mut reader, target, feature_flags, allow_existing_dirs, verbose, pattern)?;
|
extract_archive_from_reader(
|
||||||
|
&mut reader,
|
||||||
|
&target,
|
||||||
|
feature_flags,
|
||||||
|
allow_existing_dirs,
|
||||||
|
verbose,
|
||||||
|
&match_list,
|
||||||
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
archive: {
|
||||||
|
description: "Archive name.",
|
||||||
|
},
|
||||||
|
source: {
|
||||||
|
description: "Source directory.",
|
||||||
|
},
|
||||||
|
verbose: {
|
||||||
|
description: "Verbose output.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
"no-xattrs": {
|
||||||
|
description: "Ignore extended file attributes.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
"no-fcaps": {
|
||||||
|
description: "Ignore file capabilities.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
"no-acls": {
|
||||||
|
description: "Ignore access control list entries.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
"all-file-systems": {
|
||||||
|
description: "Include mounted sudirs.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
"no-device-nodes": {
|
||||||
|
description: "Ignore device nodes.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
"no-fifos": {
|
||||||
|
description: "Ignore fifos.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
"no-sockets": {
|
||||||
|
description: "Ignore sockets.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
exclude: {
|
||||||
|
description: "List of paths or pattern matching files to exclude.",
|
||||||
|
optional: true,
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
description: "Path or pattern matching files to restore",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"entries-max": {
|
||||||
|
description: "Max number of entries loaded at once into memory",
|
||||||
|
optional: true,
|
||||||
|
default: ENCODER_MAX_ENTRIES as isize,
|
||||||
|
minimum: 0,
|
||||||
|
maximum: std::isize::MAX,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Create a new .pxar archive.
|
||||||
fn create_archive(
|
fn create_archive(
|
||||||
param: Value,
|
archive: String,
|
||||||
_info: &ApiMethod,
|
source: String,
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
verbose: bool,
|
||||||
) -> Result<Value, Error> {
|
no_xattrs: bool,
|
||||||
|
no_fcaps: bool,
|
||||||
|
no_acls: bool,
|
||||||
|
all_file_systems: bool,
|
||||||
|
no_device_nodes: bool,
|
||||||
|
no_fifos: bool,
|
||||||
|
no_sockets: bool,
|
||||||
|
exclude: Option<Vec<String>>,
|
||||||
|
entries_max: isize,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let pattern_list = {
|
||||||
|
let input = exclude.unwrap_or_else(Vec::new);
|
||||||
|
let mut pattern_list = Vec::with_capacity(input.len());
|
||||||
|
for entry in input {
|
||||||
|
pattern_list.push(
|
||||||
|
MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude)
|
||||||
|
.map_err(|err| format_err!("error in exclude pattern: {}", err))?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
pattern_list
|
||||||
|
};
|
||||||
|
|
||||||
let archive = tools::required_string_param(¶m, "archive")?;
|
let device_set = if all_file_systems {
|
||||||
let source = tools::required_string_param(¶m, "source")?;
|
None
|
||||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
} else {
|
||||||
let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
|
Some(HashSet::new())
|
||||||
let no_xattrs = param["no-xattrs"].as_bool().unwrap_or(false);
|
};
|
||||||
let no_fcaps = param["no-fcaps"].as_bool().unwrap_or(false);
|
|
||||||
let no_acls = param["no-acls"].as_bool().unwrap_or(false);
|
|
||||||
let no_device_nodes = param["no-device-nodes"].as_bool().unwrap_or(false);
|
|
||||||
let no_fifos = param["no-fifos"].as_bool().unwrap_or(false);
|
|
||||||
let no_sockets = param["no-sockets"].as_bool().unwrap_or(false);
|
|
||||||
let empty = Vec::new();
|
|
||||||
let exclude_pattern = param["exclude"].as_array().unwrap_or(&empty);
|
|
||||||
let entries_max = param["entries-max"].as_u64().unwrap_or(pxar::ENCODER_MAX_ENTRIES as u64);
|
|
||||||
|
|
||||||
let devices = if all_file_systems { None } else { Some(HashSet::new()) };
|
|
||||||
|
|
||||||
let source = PathBuf::from(source);
|
let source = PathBuf::from(source);
|
||||||
|
|
||||||
let mut dir = nix::dir::Dir::open(
|
let dir = nix::dir::Dir::open(
|
||||||
&source, nix::fcntl::OFlag::O_NOFOLLOW, nix::sys::stat::Mode::empty())?;
|
&source,
|
||||||
|
nix::fcntl::OFlag::O_NOFOLLOW,
|
||||||
|
nix::sys::stat::Mode::empty(),
|
||||||
|
)?;
|
||||||
|
|
||||||
let file = OpenOptions::new()
|
let file = OpenOptions::new()
|
||||||
.create_new(true)
|
.create_new(true)
|
||||||
@ -193,332 +307,150 @@ fn create_archive(
|
|||||||
.mode(0o640)
|
.mode(0o640)
|
||||||
.open(archive)?;
|
.open(archive)?;
|
||||||
|
|
||||||
let mut writer = std::io::BufWriter::with_capacity(1024*1024, file);
|
let writer = std::io::BufWriter::with_capacity(1024 * 1024, file);
|
||||||
let mut feature_flags = pxar::flags::DEFAULT;
|
let mut feature_flags = Flags::DEFAULT;
|
||||||
if no_xattrs {
|
if no_xattrs {
|
||||||
feature_flags ^= pxar::flags::WITH_XATTRS;
|
feature_flags ^= Flags::WITH_XATTRS;
|
||||||
}
|
}
|
||||||
if no_fcaps {
|
if no_fcaps {
|
||||||
feature_flags ^= pxar::flags::WITH_FCAPS;
|
feature_flags ^= Flags::WITH_FCAPS;
|
||||||
}
|
}
|
||||||
if no_acls {
|
if no_acls {
|
||||||
feature_flags ^= pxar::flags::WITH_ACL;
|
feature_flags ^= Flags::WITH_ACL;
|
||||||
}
|
}
|
||||||
if no_device_nodes {
|
if no_device_nodes {
|
||||||
feature_flags ^= pxar::flags::WITH_DEVICE_NODES;
|
feature_flags ^= Flags::WITH_DEVICE_NODES;
|
||||||
}
|
}
|
||||||
if no_fifos {
|
if no_fifos {
|
||||||
feature_flags ^= pxar::flags::WITH_FIFOS;
|
feature_flags ^= Flags::WITH_FIFOS;
|
||||||
}
|
}
|
||||||
if no_sockets {
|
if no_sockets {
|
||||||
feature_flags ^= pxar::flags::WITH_SOCKETS;
|
feature_flags ^= Flags::WITH_SOCKETS;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut pattern_list = Vec::new();
|
let writer = pxar::encoder::sync::StandardWriter::new(writer);
|
||||||
for s in exclude_pattern {
|
proxmox_backup::pxar::create_archive(
|
||||||
let l = s.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
|
dir,
|
||||||
let p = pxar::MatchPattern::from_line(l.as_bytes())?
|
writer,
|
||||||
.ok_or_else(|| format_err!("Invalid match pattern in arguments"))?;
|
|
||||||
pattern_list.push(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
let catalog = None::<&mut pxar::catalog::DummyCatalogWriter>;
|
|
||||||
pxar::Encoder::encode(
|
|
||||||
source,
|
|
||||||
&mut dir,
|
|
||||||
&mut writer,
|
|
||||||
catalog,
|
|
||||||
devices,
|
|
||||||
verbose,
|
|
||||||
false,
|
|
||||||
feature_flags,
|
|
||||||
pattern_list,
|
pattern_list,
|
||||||
|
feature_flags,
|
||||||
|
device_set,
|
||||||
|
false,
|
||||||
|
|path| {
|
||||||
|
if verbose {
|
||||||
|
println!("{:?}", path);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
entries_max as usize,
|
entries_max as usize,
|
||||||
|
None,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
writer.flush()?;
|
Ok(())
|
||||||
|
|
||||||
Ok(Value::Null)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
archive: { description: "Archive name." },
|
||||||
|
mountpoint: { description: "Mountpoint for the file system." },
|
||||||
|
verbose: {
|
||||||
|
description: "Verbose output, running in the foreground (for debugging).",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
/// Mount the archive to the provided mountpoint via FUSE.
|
/// Mount the archive to the provided mountpoint via FUSE.
|
||||||
fn mount_archive(
|
async fn mount_archive(
|
||||||
param: Value,
|
archive: String,
|
||||||
_info: &ApiMethod,
|
mountpoint: String,
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
verbose: bool,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<(), Error> {
|
||||||
let archive = tools::required_string_param(¶m, "archive")?;
|
let archive = Path::new(&archive);
|
||||||
let mountpoint = tools::required_string_param(¶m, "mountpoint")?;
|
let mountpoint = Path::new(&mountpoint);
|
||||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
|
||||||
let no_mt = param["no-mt"].as_bool().unwrap_or(false);
|
|
||||||
|
|
||||||
let archive = Path::new(archive);
|
|
||||||
let mountpoint = Path::new(mountpoint);
|
|
||||||
let options = OsStr::new("ro,default_permissions");
|
let options = OsStr::new("ro,default_permissions");
|
||||||
let mut session = pxar::fuse::Session::from_path(&archive, &options, verbose)
|
|
||||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
|
||||||
// Mount the session and deamonize if verbose is not set
|
|
||||||
session.mount(&mountpoint, !verbose)?;
|
|
||||||
session.run_loop(!no_mt)?;
|
|
||||||
|
|
||||||
Ok(Value::Null)
|
let session = fuse::Session::mount_path(&archive, &options, verbose, mountpoint)
|
||||||
|
.await
|
||||||
|
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||||
|
|
||||||
|
let mut interrupt = signal(SignalKind::interrupt())?;
|
||||||
|
|
||||||
|
select! {
|
||||||
|
res = session.fuse() => res?,
|
||||||
|
_ = interrupt.recv().fuse() => {
|
||||||
|
if verbose {
|
||||||
|
eprintln!("interrupted");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[sortable]
|
#[api(
|
||||||
const API_METHOD_CREATE_ARCHIVE: ApiMethod = ApiMethod::new(
|
input: {
|
||||||
&ApiHandler::Sync(&create_archive),
|
properties: {
|
||||||
&ObjectSchema::new(
|
archive: {
|
||||||
"Create new .pxar archive.",
|
description: "Archive name.",
|
||||||
&sorted!([
|
},
|
||||||
(
|
verbose: {
|
||||||
"archive",
|
description: "Verbose output.",
|
||||||
false,
|
optional: true,
|
||||||
&StringSchema::new("Archive name").schema()
|
default: false,
|
||||||
),
|
},
|
||||||
(
|
},
|
||||||
"source",
|
},
|
||||||
false,
|
)]
|
||||||
&StringSchema::new("Source directory.").schema()
|
/// List the contents of an archive.
|
||||||
),
|
fn dump_archive(archive: String, verbose: bool) -> Result<(), Error> {
|
||||||
(
|
for entry in pxar::decoder::Decoder::open(archive)? {
|
||||||
"verbose",
|
let entry = entry?;
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Verbose output.")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"no-xattrs",
|
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Ignore extended file attributes.")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"no-fcaps",
|
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Ignore file capabilities.")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"no-acls",
|
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Ignore access control list entries.")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"all-file-systems",
|
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Include mounted sudirs.")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"no-device-nodes",
|
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Ignore device nodes.")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"no-fifos",
|
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Ignore fifos.")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"no-sockets",
|
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Ignore sockets.")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"exclude",
|
|
||||||
true,
|
|
||||||
&ArraySchema::new(
|
|
||||||
"List of paths or pattern matching files to exclude.",
|
|
||||||
&StringSchema::new("Path or pattern matching files to restore.").schema()
|
|
||||||
).schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"entries-max",
|
|
||||||
true,
|
|
||||||
&IntegerSchema::new("Max number of entries loaded at once into memory")
|
|
||||||
.default(pxar::ENCODER_MAX_ENTRIES as isize)
|
|
||||||
.minimum(0)
|
|
||||||
.maximum(std::isize::MAX)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
]),
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
#[sortable]
|
if verbose {
|
||||||
const API_METHOD_EXTRACT_ARCHIVE: ApiMethod = ApiMethod::new(
|
println!("{}", format_single_line_entry(&entry));
|
||||||
&ApiHandler::Sync(&extract_archive),
|
} else {
|
||||||
&ObjectSchema::new(
|
println!("{:?}", entry.path());
|
||||||
"Extract an archive.",
|
}
|
||||||
&sorted!([
|
}
|
||||||
(
|
Ok(())
|
||||||
"archive",
|
}
|
||||||
false,
|
|
||||||
&StringSchema::new("Archive name.").schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"pattern",
|
|
||||||
true,
|
|
||||||
&ArraySchema::new(
|
|
||||||
"List of paths or pattern matching files to restore",
|
|
||||||
&StringSchema::new("Path or pattern matching files to restore.").schema()
|
|
||||||
).schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"target",
|
|
||||||
true,
|
|
||||||
&StringSchema::new("Target directory.").schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"verbose",
|
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Verbose output.")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"no-xattrs",
|
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Ignore extended file attributes.")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"no-fcaps",
|
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Ignore file capabilities.")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"no-acls",
|
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Ignore access control list entries.")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"allow-existing-dirs",
|
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Allows directories to already exist on restore.")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"files-from",
|
|
||||||
true,
|
|
||||||
&StringSchema::new("Match pattern for files to restore.").schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"no-device-nodes",
|
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Ignore device nodes.")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"no-fifos",
|
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Ignore fifos.")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"no-sockets",
|
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Ignore sockets.")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
]),
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
#[sortable]
|
|
||||||
const API_METHOD_MOUNT_ARCHIVE: ApiMethod = ApiMethod::new(
|
|
||||||
&ApiHandler::Sync(&mount_archive),
|
|
||||||
&ObjectSchema::new(
|
|
||||||
"Mount the archive as filesystem via FUSE.",
|
|
||||||
&sorted!([
|
|
||||||
(
|
|
||||||
"archive",
|
|
||||||
false,
|
|
||||||
&StringSchema::new("Archive name.").schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"mountpoint",
|
|
||||||
false,
|
|
||||||
&StringSchema::new("Mountpoint for the filesystem root.").schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"verbose",
|
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Verbose output, keeps process running in foreground (for debugging).")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"no-mt",
|
|
||||||
true,
|
|
||||||
&BooleanSchema::new("Run in single threaded mode (for debugging).")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
]),
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
#[sortable]
|
|
||||||
const API_METHOD_DUMP_ARCHIVE: ApiMethod = ApiMethod::new(
|
|
||||||
&ApiHandler::Sync(&dump_archive),
|
|
||||||
&ObjectSchema::new(
|
|
||||||
"List the contents of an archive.",
|
|
||||||
&sorted!([
|
|
||||||
( "archive", false, &StringSchema::new("Archive name.").schema()),
|
|
||||||
( "verbose", true, &BooleanSchema::new("Verbose output.")
|
|
||||||
.default(false)
|
|
||||||
.schema()
|
|
||||||
),
|
|
||||||
])
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
.insert("create", CliCommand::new(&API_METHOD_CREATE_ARCHIVE)
|
.insert(
|
||||||
.arg_param(&["archive", "source"])
|
"create",
|
||||||
.completion_cb("archive", tools::complete_file_name)
|
CliCommand::new(&API_METHOD_CREATE_ARCHIVE)
|
||||||
.completion_cb("source", tools::complete_file_name)
|
.arg_param(&["archive", "source"])
|
||||||
|
.completion_cb("archive", tools::complete_file_name)
|
||||||
|
.completion_cb("source", tools::complete_file_name),
|
||||||
)
|
)
|
||||||
.insert("extract", CliCommand::new(&API_METHOD_EXTRACT_ARCHIVE)
|
.insert(
|
||||||
.arg_param(&["archive", "target"])
|
"extract",
|
||||||
.completion_cb("archive", tools::complete_file_name)
|
CliCommand::new(&API_METHOD_EXTRACT_ARCHIVE)
|
||||||
.completion_cb("target", tools::complete_file_name)
|
.arg_param(&["archive", "target"])
|
||||||
.completion_cb("files-from", tools::complete_file_name)
|
.completion_cb("archive", tools::complete_file_name)
|
||||||
)
|
.completion_cb("target", tools::complete_file_name)
|
||||||
.insert("mount", CliCommand::new(&API_METHOD_MOUNT_ARCHIVE)
|
.completion_cb("files-from", tools::complete_file_name),
|
||||||
.arg_param(&["archive", "mountpoint"])
|
|
||||||
.completion_cb("archive", tools::complete_file_name)
|
|
||||||
.completion_cb("mountpoint", tools::complete_file_name)
|
|
||||||
)
|
)
|
||||||
.insert("list", CliCommand::new(&API_METHOD_DUMP_ARCHIVE)
|
.insert(
|
||||||
.arg_param(&["archive"])
|
"mount",
|
||||||
.completion_cb("archive", tools::complete_file_name)
|
CliCommand::new(&API_METHOD_MOUNT_ARCHIVE)
|
||||||
|
.arg_param(&["archive", "mountpoint"])
|
||||||
|
.completion_cb("archive", tools::complete_file_name)
|
||||||
|
.completion_cb("mountpoint", tools::complete_file_name),
|
||||||
|
)
|
||||||
|
.insert(
|
||||||
|
"list",
|
||||||
|
CliCommand::new(&API_METHOD_DUMP_ARCHIVE)
|
||||||
|
.arg_param(&["archive"])
|
||||||
|
.completion_cb("archive", tools::complete_file_name),
|
||||||
);
|
);
|
||||||
|
|
||||||
let rpcenv = CliEnvironment::new();
|
let rpcenv = CliEnvironment::new();
|
||||||
run_cli_command(cmd_def, rpcenv, None);
|
run_cli_command(cmd_def, rpcenv, Some(|future| {
|
||||||
|
proxmox_backup::tools::runtime::main(future)
|
||||||
|
}));
|
||||||
}
|
}
|
||||||
|
@ -3,11 +3,11 @@
|
|||||||
//! This library implements the client side to access the backups
|
//! This library implements the client side to access the backups
|
||||||
//! server using https.
|
//! server using https.
|
||||||
|
|
||||||
pub mod pipe_to_stream;
|
|
||||||
mod merge_known_chunks;
|
mod merge_known_chunks;
|
||||||
|
pub mod pipe_to_stream;
|
||||||
|
|
||||||
mod http_client;
|
mod http_client;
|
||||||
pub use http_client::*;
|
pub use http_client::*;
|
||||||
|
|
||||||
mod task_log;
|
mod task_log;
|
||||||
pub use task_log::*;
|
pub use task_log::*;
|
||||||
@ -24,9 +24,6 @@ pub use remote_chunk_reader::*;
|
|||||||
mod pxar_backup_stream;
|
mod pxar_backup_stream;
|
||||||
pub use pxar_backup_stream::*;
|
pub use pxar_backup_stream::*;
|
||||||
|
|
||||||
mod pxar_decode_writer;
|
|
||||||
pub use pxar_decode_writer::*;
|
|
||||||
|
|
||||||
mod backup_repo;
|
mod backup_repo;
|
||||||
pub use backup_repo::*;
|
pub use backup_repo::*;
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ impl BackupReader {
|
|||||||
&self,
|
&self,
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
output: W,
|
output: W,
|
||||||
) -> Result<W, Error> {
|
) -> Result<(), Error> {
|
||||||
let path = "download";
|
let path = "download";
|
||||||
let param = json!({ "file-name": file_name });
|
let param = json!({ "file-name": file_name });
|
||||||
self.h2.download(path, Some(param), output).await
|
self.h2.download(path, Some(param), output).await
|
||||||
@ -103,7 +103,7 @@ impl BackupReader {
|
|||||||
pub async fn speedtest<W: Write + Send>(
|
pub async fn speedtest<W: Write + Send>(
|
||||||
&self,
|
&self,
|
||||||
output: W,
|
output: W,
|
||||||
) -> Result<W, Error> {
|
) -> Result<(), Error> {
|
||||||
self.h2.download("speedtest", None, output).await
|
self.h2.download("speedtest", None, output).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -112,7 +112,7 @@ impl BackupReader {
|
|||||||
&self,
|
&self,
|
||||||
digest: &[u8; 32],
|
digest: &[u8; 32],
|
||||||
output: W,
|
output: W,
|
||||||
) -> Result<W, Error> {
|
) -> Result<(), Error> {
|
||||||
let path = "chunk";
|
let path = "chunk";
|
||||||
let param = json!({ "digest": digest_to_hex(digest) });
|
let param = json!({ "digest": digest_to_hex(digest) });
|
||||||
self.h2.download(path, Some(param), output).await
|
self.h2.download(path, Some(param), output).await
|
||||||
@ -127,7 +127,8 @@ impl BackupReader {
|
|||||||
|
|
||||||
use std::convert::TryFrom;
|
use std::convert::TryFrom;
|
||||||
|
|
||||||
let raw_data = self.download(MANIFEST_BLOB_NAME, Vec::with_capacity(64*1024)).await?;
|
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||||
|
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
|
||||||
let blob = DataBlob::from_raw(raw_data)?;
|
let blob = DataBlob::from_raw(raw_data)?;
|
||||||
blob.verify_crc()?;
|
blob.verify_crc()?;
|
||||||
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||||
@ -146,13 +147,13 @@ impl BackupReader {
|
|||||||
name: &str,
|
name: &str,
|
||||||
) -> Result<DataBlobReader<File>, Error> {
|
) -> Result<DataBlobReader<File>, Error> {
|
||||||
|
|
||||||
let tmpfile = std::fs::OpenOptions::new()
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
.read(true)
|
.read(true)
|
||||||
.custom_flags(libc::O_TMPFILE)
|
.custom_flags(libc::O_TMPFILE)
|
||||||
.open("/tmp")?;
|
.open("/tmp")?;
|
||||||
|
|
||||||
let mut tmpfile = self.download(name, tmpfile).await?;
|
self.download(name, &mut tmpfile).await?;
|
||||||
|
|
||||||
let (csum, size) = compute_file_csum(&mut tmpfile)?;
|
let (csum, size) = compute_file_csum(&mut tmpfile)?;
|
||||||
manifest.verify_file(name, &csum, size)?;
|
manifest.verify_file(name, &csum, size)?;
|
||||||
@ -172,13 +173,13 @@ impl BackupReader {
|
|||||||
name: &str,
|
name: &str,
|
||||||
) -> Result<DynamicIndexReader, Error> {
|
) -> Result<DynamicIndexReader, Error> {
|
||||||
|
|
||||||
let tmpfile = std::fs::OpenOptions::new()
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
.read(true)
|
.read(true)
|
||||||
.custom_flags(libc::O_TMPFILE)
|
.custom_flags(libc::O_TMPFILE)
|
||||||
.open("/tmp")?;
|
.open("/tmp")?;
|
||||||
|
|
||||||
let tmpfile = self.download(name, tmpfile).await?;
|
self.download(name, &mut tmpfile).await?;
|
||||||
|
|
||||||
let index = DynamicIndexReader::new(tmpfile)
|
let index = DynamicIndexReader::new(tmpfile)
|
||||||
.map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?;
|
.map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?;
|
||||||
@ -200,13 +201,13 @@ impl BackupReader {
|
|||||||
name: &str,
|
name: &str,
|
||||||
) -> Result<FixedIndexReader, Error> {
|
) -> Result<FixedIndexReader, Error> {
|
||||||
|
|
||||||
let tmpfile = std::fs::OpenOptions::new()
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
.read(true)
|
.read(true)
|
||||||
.custom_flags(libc::O_TMPFILE)
|
.custom_flags(libc::O_TMPFILE)
|
||||||
.open("/tmp")?;
|
.open("/tmp")?;
|
||||||
|
|
||||||
let tmpfile = self.download(name, tmpfile).await?;
|
self.download(name, &mut tmpfile).await?;
|
||||||
|
|
||||||
let index = FixedIndexReader::new(tmpfile)
|
let index = FixedIndexReader::new(tmpfile)
|
||||||
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?;
|
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?;
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
use std::os::unix::fs::OpenOptionsExt;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
@ -22,6 +23,7 @@ pub struct BackupWriter {
|
|||||||
h2: H2Client,
|
h2: H2Client,
|
||||||
abort: AbortHandle,
|
abort: AbortHandle,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for BackupWriter {
|
impl Drop for BackupWriter {
|
||||||
@ -38,12 +40,13 @@ pub struct BackupStats {
|
|||||||
|
|
||||||
impl BackupWriter {
|
impl BackupWriter {
|
||||||
|
|
||||||
fn new(h2: H2Client, abort: AbortHandle, verbose: bool) -> Arc<Self> {
|
fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>, verbose: bool) -> Arc<Self> {
|
||||||
Arc::new(Self { h2, abort, verbose })
|
Arc::new(Self { h2, abort, crypt_config, verbose })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn start(
|
pub async fn start(
|
||||||
client: HttpClient,
|
client: HttpClient,
|
||||||
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
datastore: &str,
|
datastore: &str,
|
||||||
backup_type: &str,
|
backup_type: &str,
|
||||||
backup_id: &str,
|
backup_id: &str,
|
||||||
@ -64,7 +67,7 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!())).await?;
|
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!())).await?;
|
||||||
|
|
||||||
Ok(BackupWriter::new(h2, abort, debug))
|
Ok(BackupWriter::new(h2, abort, crypt_config, debug))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get(
|
pub async fn get(
|
||||||
@ -159,16 +162,19 @@ impl BackupWriter {
|
|||||||
&self,
|
&self,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
|
||||||
compress: bool,
|
compress: bool,
|
||||||
sign_only: bool,
|
crypt_or_sign: Option<bool>,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
|
|
||||||
let blob = if let Some(ref crypt_config) = crypt_config {
|
let blob = if let Some(ref crypt_config) = self.crypt_config {
|
||||||
if sign_only {
|
if let Some(encrypt) = crypt_or_sign {
|
||||||
DataBlob::create_signed(&data, crypt_config, compress)?
|
if encrypt {
|
||||||
|
DataBlob::encode(&data, Some(crypt_config), compress)?
|
||||||
|
} else {
|
||||||
|
DataBlob::create_signed(&data, crypt_config, compress)?
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
DataBlob::encode(&data, Some(crypt_config), compress)?
|
DataBlob::encode(&data, None, compress)?
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
DataBlob::encode(&data, None, compress)?
|
DataBlob::encode(&data, None, compress)?
|
||||||
@ -187,8 +193,8 @@ impl BackupWriter {
|
|||||||
&self,
|
&self,
|
||||||
src_path: P,
|
src_path: P,
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
|
||||||
compress: bool,
|
compress: bool,
|
||||||
|
crypt_or_sign: Option<bool>,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
|
|
||||||
let src_path = src_path.as_ref();
|
let src_path = src_path.as_ref();
|
||||||
@ -203,25 +209,16 @@ impl BackupWriter {
|
|||||||
.await
|
.await
|
||||||
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
|
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
|
||||||
|
|
||||||
let blob = DataBlob::encode(&contents, crypt_config.as_ref().map(AsRef::as_ref), compress)?;
|
self.upload_blob_from_data(contents, file_name, compress, crypt_or_sign).await
|
||||||
let raw_data = blob.into_inner();
|
|
||||||
let size = raw_data.len() as u64;
|
|
||||||
let csum = openssl::sha::sha256(&raw_data);
|
|
||||||
let param = json!({
|
|
||||||
"encoded-size": size,
|
|
||||||
"file-name": file_name,
|
|
||||||
});
|
|
||||||
self.h2.upload("POST", "blob", Some(param), "application/octet-stream", raw_data).await?;
|
|
||||||
Ok(BackupStats { size, csum })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn upload_stream(
|
pub async fn upload_stream(
|
||||||
&self,
|
&self,
|
||||||
|
previous_manifest: Option<Arc<BackupManifest>>,
|
||||||
archive_name: &str,
|
archive_name: &str,
|
||||||
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
||||||
prefix: &str,
|
prefix: &str,
|
||||||
fixed_size: Option<u64>,
|
fixed_size: Option<u64>,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
let known_chunks = Arc::new(Mutex::new(HashSet::new()));
|
let known_chunks = Arc::new(Mutex::new(HashSet::new()));
|
||||||
|
|
||||||
@ -233,7 +230,18 @@ impl BackupWriter {
|
|||||||
let index_path = format!("{}_index", prefix);
|
let index_path = format!("{}_index", prefix);
|
||||||
let close_path = format!("{}_close", prefix);
|
let close_path = format!("{}_close", prefix);
|
||||||
|
|
||||||
self.download_chunk_list(&index_path, archive_name, known_chunks.clone()).await?;
|
if let Some(manifest) = previous_manifest {
|
||||||
|
// try, but ignore errors
|
||||||
|
match archive_type(archive_name) {
|
||||||
|
Ok(ArchiveType::FixedIndex) => {
|
||||||
|
let _ = self.download_previous_fixed_index(archive_name, &manifest, known_chunks.clone()).await;
|
||||||
|
}
|
||||||
|
Ok(ArchiveType::DynamicIndex) => {
|
||||||
|
let _ = self.download_previous_dynamic_index(archive_name, &manifest, known_chunks.clone()).await;
|
||||||
|
}
|
||||||
|
_ => { /* do nothing */ }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let wid = self.h2.post(&index_path, Some(param)).await?.as_u64().unwrap();
|
let wid = self.h2.post(&index_path, Some(param)).await?.as_u64().unwrap();
|
||||||
|
|
||||||
@ -244,7 +252,7 @@ impl BackupWriter {
|
|||||||
stream,
|
stream,
|
||||||
&prefix,
|
&prefix,
|
||||||
known_chunks.clone(),
|
known_chunks.clone(),
|
||||||
crypt_config,
|
self.crypt_config.clone(),
|
||||||
self.verbose,
|
self.verbose,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
@ -374,41 +382,93 @@ impl BackupWriter {
|
|||||||
(verify_queue_tx, verify_result_rx)
|
(verify_queue_tx, verify_result_rx)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn download_chunk_list(
|
pub async fn download_previous_fixed_index(
|
||||||
&self,
|
&self,
|
||||||
path: &str,
|
|
||||||
archive_name: &str,
|
archive_name: &str,
|
||||||
|
manifest: &BackupManifest,
|
||||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<FixedIndexReader, Error> {
|
||||||
|
|
||||||
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.read(true)
|
||||||
|
.custom_flags(libc::O_TMPFILE)
|
||||||
|
.open("/tmp")?;
|
||||||
|
|
||||||
let param = json!({ "archive-name": archive_name });
|
let param = json!({ "archive-name": archive_name });
|
||||||
let request = H2Client::request_builder("localhost", "GET", path, Some(param), None).unwrap();
|
self.h2.download("previous", Some(param), &mut tmpfile).await?;
|
||||||
|
|
||||||
let h2request = self.h2.send_request(request, None).await?;
|
let index = FixedIndexReader::new(tmpfile)
|
||||||
let resp = h2request.await?;
|
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", archive_name, err))?;
|
||||||
|
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||||
|
let (csum, size) = index.compute_csum();
|
||||||
|
manifest.verify_file(archive_name, &csum, size)?;
|
||||||
|
|
||||||
let status = resp.status();
|
// add index chunks to known chunks
|
||||||
|
let mut known_chunks = known_chunks.lock().unwrap();
|
||||||
if !status.is_success() {
|
for i in 0..index.index_count() {
|
||||||
H2Client::h2api_response(resp).await?; // raise error
|
known_chunks.insert(*index.index_digest(i).unwrap());
|
||||||
unreachable!();
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut body = resp.into_body();
|
|
||||||
let mut flow_control = body.flow_control().clone();
|
|
||||||
|
|
||||||
let mut stream = DigestListDecoder::new(body.map_err(Error::from));
|
|
||||||
|
|
||||||
while let Some(chunk) = stream.try_next().await? {
|
|
||||||
let _ = flow_control.release_capacity(chunk.len());
|
|
||||||
known_chunks.lock().unwrap().insert(chunk);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.verbose {
|
if self.verbose {
|
||||||
println!("{}: known chunks list length is {}", archive_name, known_chunks.lock().unwrap().len());
|
println!("{}: known chunks list length is {}", archive_name, index.index_count());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(index)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn download_previous_dynamic_index(
|
||||||
|
&self,
|
||||||
|
archive_name: &str,
|
||||||
|
manifest: &BackupManifest,
|
||||||
|
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
|
) -> Result<DynamicIndexReader, Error> {
|
||||||
|
|
||||||
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.read(true)
|
||||||
|
.custom_flags(libc::O_TMPFILE)
|
||||||
|
.open("/tmp")?;
|
||||||
|
|
||||||
|
let param = json!({ "archive-name": archive_name });
|
||||||
|
self.h2.download("previous", Some(param), &mut tmpfile).await?;
|
||||||
|
|
||||||
|
let index = DynamicIndexReader::new(tmpfile)
|
||||||
|
.map_err(|err| format_err!("unable to read dynmamic index '{}' - {}", archive_name, err))?;
|
||||||
|
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||||
|
let (csum, size) = index.compute_csum();
|
||||||
|
manifest.verify_file(archive_name, &csum, size)?;
|
||||||
|
|
||||||
|
// add index chunks to known chunks
|
||||||
|
let mut known_chunks = known_chunks.lock().unwrap();
|
||||||
|
for i in 0..index.index_count() {
|
||||||
|
known_chunks.insert(*index.index_digest(i).unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.verbose {
|
||||||
|
println!("{}: known chunks list length is {}", archive_name, index.index_count());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(index)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Download backup manifest (index.json) of last backup
|
||||||
|
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
|
||||||
|
|
||||||
|
use std::convert::TryFrom;
|
||||||
|
|
||||||
|
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||||
|
|
||||||
|
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
|
||||||
|
self.h2.download("previous", Some(param), &mut raw_data).await?;
|
||||||
|
|
||||||
|
let blob = DataBlob::from_raw(raw_data)?;
|
||||||
|
blob.verify_crc()?;
|
||||||
|
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||||
|
let json: Value = serde_json::from_slice(&data[..])?;
|
||||||
|
let manifest = BackupManifest::try_from(json)?;
|
||||||
|
|
||||||
|
Ok(manifest)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn upload_chunk_info_stream(
|
fn upload_chunk_info_stream(
|
||||||
|
@ -466,7 +466,7 @@ impl HttpClient {
|
|||||||
&mut self,
|
&mut self,
|
||||||
path: &str,
|
path: &str,
|
||||||
output: &mut (dyn Write + Send),
|
output: &mut (dyn Write + Send),
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut req = Self::request_builder(&self.server, "GET", path, None).unwrap();
|
let mut req = Self::request_builder(&self.server, "GET", path, None).unwrap();
|
||||||
|
|
||||||
let client = self.client.clone();
|
let client = self.client.clone();
|
||||||
@ -707,7 +707,7 @@ impl H2Client {
|
|||||||
path: &str,
|
path: &str,
|
||||||
param: Option<Value>,
|
param: Option<Value>,
|
||||||
mut output: W,
|
mut output: W,
|
||||||
) -> Result<W, Error> {
|
) -> Result<(), Error> {
|
||||||
let request = Self::request_builder("localhost", "GET", path, param, None).unwrap();
|
let request = Self::request_builder("localhost", "GET", path, param, None).unwrap();
|
||||||
|
|
||||||
let response_future = self.send_request(request, None).await?;
|
let response_future = self.send_request(request, None).await?;
|
||||||
@ -727,7 +727,7 @@ impl H2Client {
|
|||||||
output.write_all(&chunk)?;
|
output.write_all(&chunk)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(output)
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn upload(
|
pub async fn upload(
|
||||||
|
@ -34,7 +34,7 @@ async fn pull_index_chunks<I: IndexFile>(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
//worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest)));
|
//worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||||
let chunk = chunk_reader.read_raw_chunk(&digest)?;
|
let chunk = chunk_reader.read_raw_chunk(&digest).await?;
|
||||||
|
|
||||||
target.insert_chunk(&chunk, &digest)?;
|
target.insert_chunk(&chunk, &digest)?;
|
||||||
}
|
}
|
||||||
@ -47,13 +47,13 @@ async fn download_manifest(
|
|||||||
filename: &std::path::Path,
|
filename: &std::path::Path,
|
||||||
) -> Result<std::fs::File, Error> {
|
) -> Result<std::fs::File, Error> {
|
||||||
|
|
||||||
let tmp_manifest_file = std::fs::OpenOptions::new()
|
let mut tmp_manifest_file = std::fs::OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
.create(true)
|
.create(true)
|
||||||
.read(true)
|
.read(true)
|
||||||
.open(&filename)?;
|
.open(&filename)?;
|
||||||
|
|
||||||
let mut tmp_manifest_file = reader.download(MANIFEST_BLOB_NAME, tmp_manifest_file).await?;
|
reader.download(MANIFEST_BLOB_NAME, &mut tmp_manifest_file).await?;
|
||||||
|
|
||||||
tmp_manifest_file.seek(SeekFrom::Start(0))?;
|
tmp_manifest_file.seek(SeekFrom::Start(0))?;
|
||||||
|
|
||||||
@ -77,13 +77,13 @@ async fn pull_single_archive(
|
|||||||
tmp_path.set_extension("tmp");
|
tmp_path.set_extension("tmp");
|
||||||
|
|
||||||
worker.log(format!("sync archive {}", archive_name));
|
worker.log(format!("sync archive {}", archive_name));
|
||||||
let tmpfile = std::fs::OpenOptions::new()
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
.create(true)
|
.create(true)
|
||||||
.read(true)
|
.read(true)
|
||||||
.open(&tmp_path)?;
|
.open(&tmp_path)?;
|
||||||
|
|
||||||
let tmpfile = reader.download(archive_name, tmpfile).await?;
|
reader.download(archive_name, &mut tmpfile).await?;
|
||||||
|
|
||||||
match archive_type(archive_name)? {
|
match archive_type(archive_name)? {
|
||||||
ArchiveType::DynamicIndex => {
|
ArchiveType::DynamicIndex => {
|
||||||
@ -124,7 +124,7 @@ async fn try_client_log_download(
|
|||||||
.open(&tmp_path)?;
|
.open(&tmp_path)?;
|
||||||
|
|
||||||
// Note: be silent if there is no log - only log successful download
|
// Note: be silent if there is no log - only log successful download
|
||||||
if let Ok(_) = reader.download(CLIENT_LOG_BLOB_NAME, tmpfile).await {
|
if let Ok(()) = reader.download(CLIENT_LOG_BLOB_NAME, tmpfile).await {
|
||||||
if let Err(err) = std::fs::rename(&tmp_path, &path) {
|
if let Err(err) = std::fs::rename(&tmp_path, &path) {
|
||||||
bail!("Atomic rename file {:?} failed - {}", path, err);
|
bail!("Atomic rename file {:?} failed - {}", path, err);
|
||||||
}
|
}
|
||||||
|
@ -9,12 +9,12 @@ use std::thread;
|
|||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
use futures::stream::Stream;
|
use futures::stream::Stream;
|
||||||
|
use nix::dir::Dir;
|
||||||
use nix::fcntl::OFlag;
|
use nix::fcntl::OFlag;
|
||||||
use nix::sys::stat::Mode;
|
use nix::sys::stat::Mode;
|
||||||
use nix::dir::Dir;
|
|
||||||
|
|
||||||
use crate::pxar;
|
use pathpatterns::MatchEntry;
|
||||||
|
|
||||||
use crate::backup::CatalogWriter;
|
use crate::backup::CatalogWriter;
|
||||||
|
|
||||||
/// Stream implementation to encode and upload .pxar archives.
|
/// Stream implementation to encode and upload .pxar archives.
|
||||||
@ -29,7 +29,6 @@ pub struct PxarBackupStream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for PxarBackupStream {
|
impl Drop for PxarBackupStream {
|
||||||
|
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
self.rx = None;
|
self.rx = None;
|
||||||
self.child.take().unwrap().join().unwrap();
|
self.child.take().unwrap().join().unwrap();
|
||||||
@ -37,46 +36,49 @@ impl Drop for PxarBackupStream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PxarBackupStream {
|
impl PxarBackupStream {
|
||||||
|
|
||||||
pub fn new<W: Write + Send + 'static>(
|
pub fn new<W: Write + Send + 'static>(
|
||||||
mut dir: Dir,
|
dir: Dir,
|
||||||
path: PathBuf,
|
_path: PathBuf,
|
||||||
device_set: Option<HashSet<u64>>,
|
device_set: Option<HashSet<u64>>,
|
||||||
verbose: bool,
|
_verbose: bool,
|
||||||
skip_lost_and_found: bool,
|
skip_lost_and_found: bool,
|
||||||
catalog: Arc<Mutex<CatalogWriter<W>>>,
|
catalog: Arc<Mutex<CatalogWriter<W>>>,
|
||||||
exclude_pattern: Vec<pxar::MatchPattern>,
|
patterns: Vec<MatchEntry>,
|
||||||
entries_max: usize,
|
entries_max: usize,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
|
|
||||||
let (tx, rx) = std::sync::mpsc::sync_channel(10);
|
let (tx, rx) = std::sync::mpsc::sync_channel(10);
|
||||||
|
|
||||||
let buffer_size = 256*1024;
|
let buffer_size = 256 * 1024;
|
||||||
|
|
||||||
let error = Arc::new(Mutex::new(None));
|
let error = Arc::new(Mutex::new(None));
|
||||||
let error2 = error.clone();
|
let child = std::thread::Builder::new()
|
||||||
|
.name("PxarBackupStream".to_string())
|
||||||
|
.spawn({
|
||||||
|
let error = Arc::clone(&error);
|
||||||
|
move || {
|
||||||
|
let mut catalog_guard = catalog.lock().unwrap();
|
||||||
|
let writer = std::io::BufWriter::with_capacity(
|
||||||
|
buffer_size,
|
||||||
|
crate::tools::StdChannelWriter::new(tx),
|
||||||
|
);
|
||||||
|
|
||||||
let catalog = catalog.clone();
|
let writer = pxar::encoder::sync::StandardWriter::new(writer);
|
||||||
let child = std::thread::Builder::new().name("PxarBackupStream".to_string()).spawn(move || {
|
if let Err(err) = crate::pxar::create_archive(
|
||||||
let mut guard = catalog.lock().unwrap();
|
dir,
|
||||||
let mut writer = std::io::BufWriter::with_capacity(buffer_size, crate::tools::StdChannelWriter::new(tx));
|
writer,
|
||||||
|
patterns,
|
||||||
if let Err(err) = pxar::Encoder::encode(
|
crate::pxar::Flags::DEFAULT,
|
||||||
path,
|
device_set,
|
||||||
&mut dir,
|
skip_lost_and_found,
|
||||||
&mut writer,
|
|_| Ok(()),
|
||||||
Some(&mut *guard),
|
entries_max,
|
||||||
device_set,
|
Some(&mut *catalog_guard),
|
||||||
verbose,
|
) {
|
||||||
skip_lost_and_found,
|
let mut error = error.lock().unwrap();
|
||||||
pxar::flags::DEFAULT,
|
*error = Some(err.to_string());
|
||||||
exclude_pattern,
|
}
|
||||||
entries_max,
|
}
|
||||||
) {
|
})?;
|
||||||
let mut error = error2.lock().unwrap();
|
|
||||||
*error = Some(err.to_string());
|
|
||||||
}
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
rx: Some(rx),
|
rx: Some(rx),
|
||||||
@ -91,23 +93,31 @@ impl PxarBackupStream {
|
|||||||
verbose: bool,
|
verbose: bool,
|
||||||
skip_lost_and_found: bool,
|
skip_lost_and_found: bool,
|
||||||
catalog: Arc<Mutex<CatalogWriter<W>>>,
|
catalog: Arc<Mutex<CatalogWriter<W>>>,
|
||||||
exclude_pattern: Vec<pxar::MatchPattern>,
|
patterns: Vec<MatchEntry>,
|
||||||
entries_max: usize,
|
entries_max: usize,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
|
|
||||||
let dir = nix::dir::Dir::open(dirname, OFlag::O_DIRECTORY, Mode::empty())?;
|
let dir = nix::dir::Dir::open(dirname, OFlag::O_DIRECTORY, Mode::empty())?;
|
||||||
let path = std::path::PathBuf::from(dirname);
|
let path = std::path::PathBuf::from(dirname);
|
||||||
|
|
||||||
Self::new(dir, path, device_set, verbose, skip_lost_and_found, catalog, exclude_pattern, entries_max)
|
Self::new(
|
||||||
|
dir,
|
||||||
|
path,
|
||||||
|
device_set,
|
||||||
|
verbose,
|
||||||
|
skip_lost_and_found,
|
||||||
|
catalog,
|
||||||
|
patterns,
|
||||||
|
entries_max,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Stream for PxarBackupStream {
|
impl Stream for PxarBackupStream {
|
||||||
|
|
||||||
type Item = Result<Vec<u8>, Error>;
|
type Item = Result<Vec<u8>, Error>;
|
||||||
|
|
||||||
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
|
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||||
{ // limit lock scope
|
{
|
||||||
|
// limit lock scope
|
||||||
let error = self.error.lock().unwrap();
|
let error = self.error.lock().unwrap();
|
||||||
if let Some(ref msg) = *error {
|
if let Some(ref msg) = *error {
|
||||||
return Poll::Ready(Some(Err(format_err!("{}", msg))));
|
return Poll::Ready(Some(Err(format_err!("{}", msg))));
|
||||||
|
@ -1,70 +0,0 @@
|
|||||||
use anyhow::{Error};
|
|
||||||
|
|
||||||
use std::thread;
|
|
||||||
use std::os::unix::io::FromRawFd;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::io::Write;
|
|
||||||
|
|
||||||
use crate::pxar;
|
|
||||||
|
|
||||||
/// Writer implementation to deccode a .pxar archive (download).
|
|
||||||
|
|
||||||
pub struct PxarDecodeWriter {
|
|
||||||
pipe: Option<std::fs::File>,
|
|
||||||
child: Option<thread::JoinHandle<()>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for PxarDecodeWriter {
|
|
||||||
|
|
||||||
fn drop(&mut self) {
|
|
||||||
drop(self.pipe.take());
|
|
||||||
self.child.take().unwrap().join().unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PxarDecodeWriter {
|
|
||||||
|
|
||||||
pub fn new(base: &Path, verbose: bool) -> Result<Self, Error> {
|
|
||||||
let (rx, tx) = nix::unistd::pipe()?;
|
|
||||||
|
|
||||||
let base = PathBuf::from(base);
|
|
||||||
|
|
||||||
let child = thread::spawn(move|| {
|
|
||||||
let mut reader = unsafe { std::fs::File::from_raw_fd(rx) };
|
|
||||||
let mut decoder = pxar::SequentialDecoder::new(&mut reader, pxar::flags::DEFAULT);
|
|
||||||
decoder.set_callback(move |path| {
|
|
||||||
if verbose {
|
|
||||||
println!("{:?}", path);
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
});
|
|
||||||
|
|
||||||
if let Err(err) = decoder.restore(&base, &Vec::new()) {
|
|
||||||
eprintln!("pxar decode failed - {}", err);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let pipe = unsafe { std::fs::File::from_raw_fd(tx) };
|
|
||||||
|
|
||||||
Ok(Self { pipe: Some(pipe), child: Some(child) })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Write for PxarDecodeWriter {
|
|
||||||
|
|
||||||
fn write(&mut self, buffer: &[u8]) -> Result<usize, std::io::Error> {
|
|
||||||
let pipe = match self.pipe {
|
|
||||||
Some(ref mut pipe) => pipe,
|
|
||||||
None => unreachable!(),
|
|
||||||
};
|
|
||||||
pipe.write(buffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn flush(&mut self) -> Result<(), std::io::Error> {
|
|
||||||
let pipe = match self.pipe {
|
|
||||||
Some(ref mut pipe) => pipe,
|
|
||||||
None => unreachable!(),
|
|
||||||
};
|
|
||||||
pipe.flush()
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,10 +1,12 @@
|
|||||||
|
use std::future::Future;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::pin::Pin;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
use anyhow::{Error};
|
use anyhow::Error;
|
||||||
|
|
||||||
use super::BackupReader;
|
use super::BackupReader;
|
||||||
use crate::backup::{ReadChunk, DataBlob, CryptConfig};
|
use crate::backup::{AsyncReadChunk, CryptConfig, DataBlob, ReadChunk};
|
||||||
use crate::tools::runtime::block_on;
|
use crate::tools::runtime::block_on;
|
||||||
|
|
||||||
/// Read chunks from remote host using ``BackupReader``
|
/// Read chunks from remote host using ``BackupReader``
|
||||||
@ -12,11 +14,10 @@ pub struct RemoteChunkReader {
|
|||||||
client: Arc<BackupReader>,
|
client: Arc<BackupReader>,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
cache_hint: HashMap<[u8; 32], usize>,
|
cache_hint: HashMap<[u8; 32], usize>,
|
||||||
cache: HashMap<[u8; 32], Vec<u8>>,
|
cache: Mutex<HashMap<[u8; 32], Vec<u8>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RemoteChunkReader {
|
impl RemoteChunkReader {
|
||||||
|
|
||||||
/// Create a new instance.
|
/// Create a new instance.
|
||||||
///
|
///
|
||||||
/// Chunks listed in ``cache_hint`` are cached and kept in RAM.
|
/// Chunks listed in ``cache_hint`` are cached and kept in RAM.
|
||||||
@ -25,50 +26,82 @@ impl RemoteChunkReader {
|
|||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
cache_hint: HashMap<[u8; 32], usize>,
|
cache_hint: HashMap<[u8; 32], usize>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
|
Self {
|
||||||
Self { client, crypt_config, cache_hint, cache: HashMap::new() }
|
client,
|
||||||
|
crypt_config,
|
||||||
|
cache_hint,
|
||||||
|
cache: Mutex::new(HashMap::new()),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl ReadChunk for RemoteChunkReader {
|
pub async fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||||
|
let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
|
||||||
|
|
||||||
fn read_raw_chunk(&mut self, digest:&[u8; 32]) -> Result<DataBlob, Error> {
|
self.client
|
||||||
|
.download_chunk(&digest, &mut chunk_data)
|
||||||
let mut chunk_data = Vec::with_capacity(4*1024*1024);
|
.await?;
|
||||||
|
|
||||||
//tokio::task::block_in_place(|| futures::executor::block_on(self.client.download_chunk(&digest, &mut chunk_data)))?;
|
|
||||||
block_on(async {
|
|
||||||
// download_chunk returns the writer back to us, but we need to return a 'static value
|
|
||||||
self.client
|
|
||||||
.download_chunk(&digest, &mut chunk_data)
|
|
||||||
.await
|
|
||||||
.map(drop)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let chunk = DataBlob::from_raw(chunk_data)?;
|
let chunk = DataBlob::from_raw(chunk_data)?;
|
||||||
chunk.verify_crc()?;
|
chunk.verify_crc()?;
|
||||||
|
|
||||||
Ok(chunk)
|
Ok(chunk)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn read_chunk(&mut self, digest:&[u8; 32]) -> Result<Vec<u8>, Error> {
|
impl ReadChunk for RemoteChunkReader {
|
||||||
|
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||||
|
block_on(Self::read_raw_chunk(self, digest))
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(raw_data) = self.cache.get(digest) {
|
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||||
|
if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
|
||||||
return Ok(raw_data.to_vec());
|
return Ok(raw_data.to_vec());
|
||||||
}
|
}
|
||||||
|
|
||||||
let chunk = self.read_raw_chunk(digest)?;
|
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
||||||
|
|
||||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||||
|
|
||||||
// fixme: verify digest?
|
// fixme: verify digest?
|
||||||
|
|
||||||
let use_cache = self.cache_hint.contains_key(digest);
|
let use_cache = self.cache_hint.contains_key(digest);
|
||||||
if use_cache {
|
if use_cache {
|
||||||
self.cache.insert(*digest, raw_data.to_vec());
|
(*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(raw_data)
|
Ok(raw_data)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsyncReadChunk for RemoteChunkReader {
|
||||||
|
fn read_raw_chunk<'a>(
|
||||||
|
&'a self,
|
||||||
|
digest: &'a [u8; 32],
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
||||||
|
Box::pin(Self::read_raw_chunk(self, digest))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_chunk<'a>(
|
||||||
|
&'a self,
|
||||||
|
digest: &'a [u8; 32],
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
|
||||||
|
return Ok(raw_data.to_vec());
|
||||||
|
}
|
||||||
|
|
||||||
|
let chunk = Self::read_raw_chunk(self, digest).await?;
|
||||||
|
|
||||||
|
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||||
|
|
||||||
|
// fixme: verify digest?
|
||||||
|
|
||||||
|
let use_cache = self.cache_hint.contains_key(digest);
|
||||||
|
if use_cache {
|
||||||
|
(*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(raw_data)
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -190,7 +190,7 @@ pub fn check_acl_path(path: &str) -> Result<(), Error> {
|
|||||||
"system" => {
|
"system" => {
|
||||||
if components_len == 1 { return Ok(()); }
|
if components_len == 1 { return Ok(()); }
|
||||||
match components[1] {
|
match components[1] {
|
||||||
"log" | "status" | "tasks" | "time" => {
|
"disks" | "log" | "status" | "tasks" | "time" => {
|
||||||
if components_len == 2 { return Ok(()); }
|
if components_len == 2 { return Ok(()); }
|
||||||
}
|
}
|
||||||
"services" => { // /system/services/{service}
|
"services" => { // /system/services/{service}
|
||||||
|
@ -141,7 +141,7 @@ pub fn get_network_interfaces() -> Result<HashMap<String, bool>, Error> {
|
|||||||
|
|
||||||
pub fn compute_file_diff(filename: &str, shadow: &str) -> Result<String, Error> {
|
pub fn compute_file_diff(filename: &str, shadow: &str) -> Result<String, Error> {
|
||||||
|
|
||||||
let output = Command::new("/usr/bin/diff")
|
let output = Command::new("diff")
|
||||||
.arg("-b")
|
.arg("-b")
|
||||||
.arg("-u")
|
.arg("-u")
|
||||||
.arg(filename)
|
.arg(filename)
|
||||||
@ -165,10 +165,10 @@ pub fn assert_ifupdown2_installed() -> Result<(), Error> {
|
|||||||
|
|
||||||
pub fn network_reload() -> Result<(), Error> {
|
pub fn network_reload() -> Result<(), Error> {
|
||||||
|
|
||||||
let output = Command::new("/sbin/ifreload")
|
let output = Command::new("ifreload")
|
||||||
.arg("-a")
|
.arg("-a")
|
||||||
.output()
|
.output()
|
||||||
.map_err(|err| format_err!("failed to execute '/sbin/ifreload' - {}", err))?;
|
.map_err(|err| format_err!("failed to execute 'ifreload' - {}", err))?;
|
||||||
|
|
||||||
crate::tools::command_output(output, None)
|
crate::tools::command_output(output, None)
|
||||||
.map_err(|err| format_err!("ifreload failed: {}", err))?;
|
.map_err(|err| format_err!("ifreload failed: {}", err))?;
|
||||||
|
@ -1,229 +0,0 @@
|
|||||||
//! Helpers to generate a binary search tree stored in an array from a
|
|
||||||
//! sorted array.
|
|
||||||
//!
|
|
||||||
//! Specifically, for any given sorted array 'input' permute the
|
|
||||||
//! array so that the following rule holds:
|
|
||||||
//!
|
|
||||||
//! For each array item with index i, the item at 2i+1 is smaller and
|
|
||||||
//! the item 2i+2 is larger.
|
|
||||||
//!
|
|
||||||
//! This structure permits efficient (meaning: O(log(n)) binary
|
|
||||||
//! searches: start with item i=0 (i.e. the root of the BST), compare
|
|
||||||
//! the value with the searched item, if smaller proceed at item
|
|
||||||
//! 2i+1, if larger proceed at item 2i+2, and repeat, until either
|
|
||||||
//! the item is found, or the indexes grow beyond the array size,
|
|
||||||
//! which means the entry does not exist.
|
|
||||||
//!
|
|
||||||
//! Effectively this implements bisection, but instead of jumping
|
|
||||||
//! around wildly in the array during a single search we only search
|
|
||||||
//! with strictly monotonically increasing indexes.
|
|
||||||
//!
|
|
||||||
//! Algorithm is from casync (camakebst.c), simplified and optimized
|
|
||||||
//! for rust. Permutation function originally by L. Bressel, 2017. We
|
|
||||||
//! pass permutation info to user provided callback, which actually
|
|
||||||
//! implements the data copy.
|
|
||||||
//!
|
|
||||||
//! The Wikipedia Artikel for [Binary
|
|
||||||
//! Heap](https://en.wikipedia.org/wiki/Binary_heap) gives a short
|
|
||||||
//! intro howto store binary trees using an array.
|
|
||||||
|
|
||||||
use std::cmp::Ordering;
|
|
||||||
|
|
||||||
#[allow(clippy::many_single_char_names)]
|
|
||||||
fn copy_binary_search_tree_inner<F: FnMut(usize, usize)>(
|
|
||||||
copy_func: &mut F,
|
|
||||||
// we work on input array input[o..o+n]
|
|
||||||
n: usize,
|
|
||||||
o: usize,
|
|
||||||
e: usize,
|
|
||||||
i: usize,
|
|
||||||
) {
|
|
||||||
let p = 1 << e;
|
|
||||||
|
|
||||||
let t = p + (p>>1) - 1;
|
|
||||||
|
|
||||||
let m = if n > t {
|
|
||||||
// |...........p.............t....n........(2p)|
|
|
||||||
p - 1
|
|
||||||
} else {
|
|
||||||
// |...........p.....n.......t.............(2p)|
|
|
||||||
p - 1 - (t-n)
|
|
||||||
};
|
|
||||||
|
|
||||||
(copy_func)(o+m, i);
|
|
||||||
|
|
||||||
if m > 0 {
|
|
||||||
copy_binary_search_tree_inner(copy_func, m, o, e-1, i*2+1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (m + 1) < n {
|
|
||||||
copy_binary_search_tree_inner(copy_func, n-m-1, o+m+1, e-1, i*2+2);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This function calls the provided `copy_func()` with the permutation
|
|
||||||
/// info.
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// # use proxmox_backup::pxar::copy_binary_search_tree;
|
|
||||||
/// copy_binary_search_tree(5, |src, dest| {
|
|
||||||
/// println!("Copy {} to {}", src, dest);
|
|
||||||
/// });
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// This will produce the following output:
|
|
||||||
///
|
|
||||||
/// ```no-compile
|
|
||||||
/// Copy 3 to 0
|
|
||||||
/// Copy 1 to 1
|
|
||||||
/// Copy 0 to 3
|
|
||||||
/// Copy 2 to 4
|
|
||||||
/// Copy 4 to 2
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// So this generates the following permutation: `[3,1,4,0,2]`.
|
|
||||||
|
|
||||||
pub fn copy_binary_search_tree<F: FnMut(usize, usize)>(
|
|
||||||
n: usize,
|
|
||||||
mut copy_func: F,
|
|
||||||
) {
|
|
||||||
if n == 0 { return };
|
|
||||||
let e = (64 - n.leading_zeros() - 1) as usize; // fast log2(n)
|
|
||||||
|
|
||||||
copy_binary_search_tree_inner(&mut copy_func, n, 0, e, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/// This function searches for the index where the comparison by the provided
|
|
||||||
/// `compare()` function returns `Ordering::Equal`.
|
|
||||||
/// The order of the comparison matters (noncommutative) and should be search
|
|
||||||
/// value compared to value at given index as shown in the examples.
|
|
||||||
/// The parameter `skip_multiples` defines the number of matches to ignore while
|
|
||||||
/// searching before returning the index in order to lookup duplicate entries in
|
|
||||||
/// the tree.
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// # use proxmox_backup::pxar::{copy_binary_search_tree, search_binary_tree_by};
|
|
||||||
/// let mut vals = vec![0,1,2,2,2,3,4,5,6,6,7,8,8,8];
|
|
||||||
///
|
|
||||||
/// let clone = vals.clone();
|
|
||||||
/// copy_binary_search_tree(vals.len(), |s, d| {
|
|
||||||
/// vals[d] = clone[s];
|
|
||||||
/// });
|
|
||||||
/// let should_be = vec![5,2,8,1,3,6,8,0,2,2,4,6,7,8];
|
|
||||||
/// assert_eq!(vals, should_be);
|
|
||||||
///
|
|
||||||
/// let find = 8;
|
|
||||||
/// let skip_multiples = 0;
|
|
||||||
/// let idx = search_binary_tree_by(0, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
|
||||||
/// assert_eq!(idx, Some(2));
|
|
||||||
///
|
|
||||||
/// let find = 8;
|
|
||||||
/// let skip_multiples = 1;
|
|
||||||
/// let idx = search_binary_tree_by(2, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
|
||||||
/// assert_eq!(idx, Some(6));
|
|
||||||
///
|
|
||||||
/// let find = 8;
|
|
||||||
/// let skip_multiples = 1;
|
|
||||||
/// let idx = search_binary_tree_by(6, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
|
||||||
/// assert_eq!(idx, Some(13));
|
|
||||||
///
|
|
||||||
/// let find = 5;
|
|
||||||
/// let skip_multiples = 1;
|
|
||||||
/// let idx = search_binary_tree_by(0, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
|
||||||
/// assert!(idx.is_none());
|
|
||||||
///
|
|
||||||
/// let find = 5;
|
|
||||||
/// let skip_multiples = 0;
|
|
||||||
/// // if start index is equal to the array length, `None` is returned.
|
|
||||||
/// let idx = search_binary_tree_by(vals.len(), vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
|
||||||
/// assert!(idx.is_none());
|
|
||||||
///
|
|
||||||
/// let find = 5;
|
|
||||||
/// let skip_multiples = 0;
|
|
||||||
/// // if start index is larger than length, `None` is returned.
|
|
||||||
/// let idx = search_binary_tree_by(vals.len() + 1, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
|
||||||
/// assert!(idx.is_none());
|
|
||||||
/// ```
|
|
||||||
|
|
||||||
pub fn search_binary_tree_by<F: Copy + Fn(usize) -> Ordering>(
|
|
||||||
start: usize,
|
|
||||||
size: usize,
|
|
||||||
skip_multiples: usize,
|
|
||||||
compare: F
|
|
||||||
) -> Option<usize> {
|
|
||||||
if start >= size {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut skip = skip_multiples;
|
|
||||||
let cmp = compare(start);
|
|
||||||
if cmp == Ordering::Equal {
|
|
||||||
if skip == 0 {
|
|
||||||
// Found matching hash and want this one
|
|
||||||
return Some(start);
|
|
||||||
}
|
|
||||||
// Found matching hash, but we should skip the first `skip_multiple`,
|
|
||||||
// so continue search with reduced skip count.
|
|
||||||
skip -= 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if cmp == Ordering::Less || cmp == Ordering::Equal {
|
|
||||||
let res = search_binary_tree_by(2 * start + 1, size, skip, compare);
|
|
||||||
if res.is_some() {
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cmp == Ordering::Greater || cmp == Ordering::Equal {
|
|
||||||
let res = search_binary_tree_by(2 * start + 2, size, skip, compare);
|
|
||||||
if res.is_some() {
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_binary_search_tree() {
|
|
||||||
|
|
||||||
fn run_test(len: usize) -> Vec<usize> {
|
|
||||||
|
|
||||||
const MARKER: usize = 0xfffffff;
|
|
||||||
let mut output = vec![];
|
|
||||||
for _i in 0..len { output.push(MARKER); }
|
|
||||||
copy_binary_search_tree(len, |s, d| {
|
|
||||||
assert!(output[d] == MARKER);
|
|
||||||
output[d] = s;
|
|
||||||
});
|
|
||||||
if len < 32 { println!("GOT:{}:{:?}", len, output); }
|
|
||||||
for i in 0..len {
|
|
||||||
assert!(output[i] != MARKER);
|
|
||||||
}
|
|
||||||
output
|
|
||||||
}
|
|
||||||
|
|
||||||
assert!(run_test(0).len() == 0);
|
|
||||||
assert!(run_test(1) == [0]);
|
|
||||||
assert!(run_test(2) == [1,0]);
|
|
||||||
assert!(run_test(3) == [1,0,2]);
|
|
||||||
assert!(run_test(4) == [2,1,3,0]);
|
|
||||||
assert!(run_test(5) == [3,1,4,0,2]);
|
|
||||||
assert!(run_test(6) == [3,1,5,0,2,4]);
|
|
||||||
assert!(run_test(7) == [3,1,5,0,2,4,6]);
|
|
||||||
assert!(run_test(8) == [4,2,6,1,3,5,7,0]);
|
|
||||||
assert!(run_test(9) == [5,3,7,1,4,6,8,0,2]);
|
|
||||||
assert!(run_test(10) == [6,3,8,1,5,7,9,0,2,4]);
|
|
||||||
assert!(run_test(11) == [7,3,9,1,5,8,10,0,2,4,6]);
|
|
||||||
assert!(run_test(12) == [7,3,10,1,5,9,11,0,2,4,6,8]);
|
|
||||||
assert!(run_test(13) == [7,3,11,1,5,9,12,0,2,4,6,8,10]);
|
|
||||||
assert!(run_test(14) == [7,3,11,1,5,9,13,0,2,4,6,8,10,12]);
|
|
||||||
assert!(run_test(15) == [7,3,11,1,5,9,13,0,2,4,6,8,10,12,14]);
|
|
||||||
assert!(run_test(16) == [8,4,12,2,6,10,14,1,3,5,7,9,11,13,15,0]);
|
|
||||||
assert!(run_test(17) == [9,5,13,3,7,11,15,1,4,6,8,10,12,14,16,0,2]);
|
|
||||||
|
|
||||||
for len in 18..1000 {
|
|
||||||
run_test(len);
|
|
||||||
}
|
|
||||||
}
|
|
1006
src/pxar/create.rs
Normal file
1006
src/pxar/create.rs
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,365 +0,0 @@
|
|||||||
//! *pxar* format decoder for seekable files
|
|
||||||
//!
|
|
||||||
//! This module contain the code to decode *pxar* archive files.
|
|
||||||
|
|
||||||
use std::convert::TryFrom;
|
|
||||||
use std::ffi::{OsString, OsStr};
|
|
||||||
use std::io::{Read, Seek, SeekFrom};
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::os::unix::ffi::OsStrExt;
|
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
|
||||||
use libc;
|
|
||||||
|
|
||||||
use super::binary_search_tree::search_binary_tree_by;
|
|
||||||
use super::format_definition::*;
|
|
||||||
use super::sequential_decoder::SequentialDecoder;
|
|
||||||
use super::match_pattern::MatchPattern;
|
|
||||||
|
|
||||||
use proxmox::tools::io::ReadExt;
|
|
||||||
|
|
||||||
pub struct DirectoryEntry {
|
|
||||||
/// Points to the `PxarEntry` of the directory
|
|
||||||
start: u64,
|
|
||||||
/// Points past the goodbye table tail
|
|
||||||
end: u64,
|
|
||||||
/// Filename of entry
|
|
||||||
pub filename: OsString,
|
|
||||||
/// Entry (mode, permissions)
|
|
||||||
pub entry: PxarEntry,
|
|
||||||
/// Extended attributes
|
|
||||||
pub xattr: PxarAttributes,
|
|
||||||
/// Payload size
|
|
||||||
pub size: u64,
|
|
||||||
/// Target path for symbolic links
|
|
||||||
pub target: Option<PathBuf>,
|
|
||||||
/// Start offset of the payload if present.
|
|
||||||
pub payload_offset: Option<u64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait to create ReadSeek Decoder trait objects.
|
|
||||||
trait ReadSeek: Read + Seek {}
|
|
||||||
impl <R: Read + Seek> ReadSeek for R {}
|
|
||||||
|
|
||||||
// This one needs Read+Seek
|
|
||||||
pub struct Decoder {
|
|
||||||
inner: SequentialDecoder<Box<dyn ReadSeek + Send>>,
|
|
||||||
root_start: u64,
|
|
||||||
root_end: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
const HEADER_SIZE: u64 = std::mem::size_of::<PxarHeader>() as u64;
|
|
||||||
const GOODBYE_ITEM_SIZE: u64 = std::mem::size_of::<PxarGoodbyeItem>() as u64;
|
|
||||||
|
|
||||||
impl Decoder {
|
|
||||||
pub fn new<R: Read + Seek + Send + 'static>(mut reader: R) -> Result<Self, Error> {
|
|
||||||
let root_end = reader.seek(SeekFrom::End(0))?;
|
|
||||||
let boxed_reader: Box<dyn ReadSeek + 'static + Send> = Box::new(reader);
|
|
||||||
let inner = SequentialDecoder::new(boxed_reader, super::flags::DEFAULT);
|
|
||||||
|
|
||||||
Ok(Self { inner, root_start: 0, root_end })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_callback<F: Fn(&Path) -> Result<(), Error> + Send + 'static>(&mut self, callback: F ) {
|
|
||||||
self.inner.set_callback(callback);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn root(&mut self) -> Result<DirectoryEntry, Error> {
|
|
||||||
self.seek(SeekFrom::Start(0))?;
|
|
||||||
let header: PxarHeader = self.inner.read_item()?;
|
|
||||||
check_ca_header::<PxarEntry>(&header, PXAR_ENTRY)?;
|
|
||||||
let entry: PxarEntry = self.inner.read_item()?;
|
|
||||||
let (header, xattr) = self.inner.read_attributes()?;
|
|
||||||
let (size, payload_offset) = match header.htype {
|
|
||||||
PXAR_PAYLOAD => (header.size - HEADER_SIZE, Some(self.seek(SeekFrom::Current(0))?)),
|
|
||||||
_ => (0, None),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(DirectoryEntry {
|
|
||||||
start: self.root_start,
|
|
||||||
end: self.root_end,
|
|
||||||
filename: OsString::new(), // Empty
|
|
||||||
entry,
|
|
||||||
xattr,
|
|
||||||
size,
|
|
||||||
target: None,
|
|
||||||
payload_offset,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
|
|
||||||
let pos = self.inner.get_reader_mut().seek(pos)?;
|
|
||||||
Ok(pos)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn root_end_offset(&self) -> u64 {
|
|
||||||
self.root_end
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Restore the subarchive starting at `dir` to the provided target `path`.
|
|
||||||
///
|
|
||||||
/// Only restore the content matched by the MatchPattern `pattern`.
|
|
||||||
/// An empty Vec `pattern` means restore all.
|
|
||||||
pub fn restore(&mut self, dir: &DirectoryEntry, path: &Path, pattern: &Vec<MatchPattern>) -> Result<(), Error> {
|
|
||||||
let start = dir.start;
|
|
||||||
self.seek(SeekFrom::Start(start))?;
|
|
||||||
self.inner.restore(path, pattern)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read_directory_entry(
|
|
||||||
&mut self,
|
|
||||||
start: u64,
|
|
||||||
end: u64,
|
|
||||||
) -> Result<DirectoryEntry, Error> {
|
|
||||||
self.seek(SeekFrom::Start(start))?;
|
|
||||||
|
|
||||||
let head: PxarHeader = self.inner.read_item()?;
|
|
||||||
|
|
||||||
if head.htype != PXAR_FILENAME {
|
|
||||||
bail!("wrong filename header type for object [{}..{}]", start, end);
|
|
||||||
}
|
|
||||||
|
|
||||||
let entry_start = start + head.size;
|
|
||||||
|
|
||||||
let filename = self.inner.read_filename(head.size)?;
|
|
||||||
|
|
||||||
let head: PxarHeader = self.inner.read_item()?;
|
|
||||||
if head.htype == PXAR_FORMAT_HARDLINK {
|
|
||||||
let (_, offset) = self.inner.read_hardlink(head.size)?;
|
|
||||||
// TODO: Howto find correct end offset for hardlink target?
|
|
||||||
// This is a bit tricky since we cannot find correct end in an efficient
|
|
||||||
// way, on the other hand it doesn't really matter (for now) since target
|
|
||||||
// is never a directory and end is not used in such cases.
|
|
||||||
return self.read_directory_entry(start - offset, end);
|
|
||||||
}
|
|
||||||
check_ca_header::<PxarEntry>(&head, PXAR_ENTRY)?;
|
|
||||||
let entry: PxarEntry = self.inner.read_item()?;
|
|
||||||
let (header, xattr) = self.inner.read_attributes()?;
|
|
||||||
let (size, payload_offset, target) = match header.htype {
|
|
||||||
PXAR_PAYLOAD =>
|
|
||||||
(header.size - HEADER_SIZE, Some(self.seek(SeekFrom::Current(0))?), None),
|
|
||||||
PXAR_SYMLINK =>
|
|
||||||
(header.size - HEADER_SIZE, None, Some(self.inner.read_link(header.size)?)),
|
|
||||||
_ => (0, None, None),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(DirectoryEntry {
|
|
||||||
start: entry_start,
|
|
||||||
end,
|
|
||||||
filename,
|
|
||||||
entry,
|
|
||||||
xattr,
|
|
||||||
size,
|
|
||||||
target,
|
|
||||||
payload_offset,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the goodbye table based on the provided end offset.
|
|
||||||
///
|
|
||||||
/// Get the goodbye table entries and the start and end offsets of the
|
|
||||||
/// items they reference.
|
|
||||||
/// If the start offset is provided, we use that to check the consistency of
|
|
||||||
/// the data, else the start offset calculated based on the goodbye tail is
|
|
||||||
/// used.
|
|
||||||
pub(crate) fn goodbye_table(
|
|
||||||
&mut self,
|
|
||||||
start: Option<u64>,
|
|
||||||
end: u64,
|
|
||||||
) -> Result<Vec<(PxarGoodbyeItem, u64, u64)>, Error> {
|
|
||||||
self.seek(SeekFrom::Start(end - GOODBYE_ITEM_SIZE))?;
|
|
||||||
|
|
||||||
let tail: PxarGoodbyeItem = self.inner.read_item()?;
|
|
||||||
if tail.hash != PXAR_GOODBYE_TAIL_MARKER {
|
|
||||||
bail!("missing goodbye tail marker for object at offset {}", end);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the start offset was provided, we use and check based on that.
|
|
||||||
// If not, we rely on the offset calculated from the goodbye table entry.
|
|
||||||
let start = start.unwrap_or(end - tail.offset - tail.size);
|
|
||||||
let goodbye_table_size = tail.size;
|
|
||||||
if goodbye_table_size < (HEADER_SIZE + GOODBYE_ITEM_SIZE) {
|
|
||||||
bail!("short goodbye table size for object [{}..{}]", start, end);
|
|
||||||
}
|
|
||||||
|
|
||||||
let goodbye_inner_size = goodbye_table_size - HEADER_SIZE - GOODBYE_ITEM_SIZE;
|
|
||||||
if (goodbye_inner_size % GOODBYE_ITEM_SIZE) != 0 {
|
|
||||||
bail!(
|
|
||||||
"wrong goodbye inner table size for entry [{}..{}]",
|
|
||||||
start,
|
|
||||||
end
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let goodbye_start = end - goodbye_table_size;
|
|
||||||
if tail.offset != (goodbye_start - start) {
|
|
||||||
bail!(
|
|
||||||
"wrong offset in goodbye tail marker for entry [{}..{}]",
|
|
||||||
start,
|
|
||||||
end
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.seek(SeekFrom::Start(goodbye_start))?;
|
|
||||||
let head: PxarHeader = self.inner.read_item()?;
|
|
||||||
if head.htype != PXAR_GOODBYE {
|
|
||||||
bail!(
|
|
||||||
"wrong goodbye table header type for entry [{}..{}]",
|
|
||||||
start,
|
|
||||||
end
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if head.size != goodbye_table_size {
|
|
||||||
bail!("wrong goodbye table size for entry [{}..{}]", start, end);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut gb_entries = Vec::new();
|
|
||||||
for i in 0..goodbye_inner_size / GOODBYE_ITEM_SIZE {
|
|
||||||
let item: PxarGoodbyeItem = self.inner.read_item()?;
|
|
||||||
if item.offset > (goodbye_start - start) {
|
|
||||||
bail!(
|
|
||||||
"goodbye entry {} offset out of range [{}..{}] {} {} {}",
|
|
||||||
i,
|
|
||||||
start,
|
|
||||||
end,
|
|
||||||
item.offset,
|
|
||||||
goodbye_start,
|
|
||||||
start
|
|
||||||
);
|
|
||||||
}
|
|
||||||
let item_start = goodbye_start - item.offset;
|
|
||||||
let item_end = item_start + item.size;
|
|
||||||
if item_end > goodbye_start {
|
|
||||||
bail!("goodbye entry {} end out of range [{}..{}]", i, start, end);
|
|
||||||
}
|
|
||||||
gb_entries.push((item, item_start, item_end));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(gb_entries)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn list_dir(&mut self, dir: &DirectoryEntry) -> Result<Vec<DirectoryEntry>, Error> {
|
|
||||||
let start = dir.start;
|
|
||||||
let end = dir.end;
|
|
||||||
|
|
||||||
//println!("list_dir1: {} {}", start, end);
|
|
||||||
|
|
||||||
if (end - start) < (HEADER_SIZE + GOODBYE_ITEM_SIZE) {
|
|
||||||
bail!("detected short object [{}..{}]", start, end);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut result = vec![];
|
|
||||||
let goodbye_table = self.goodbye_table(Some(start), end)?;
|
|
||||||
for (_, item_start, item_end) in goodbye_table {
|
|
||||||
let entry = self.read_directory_entry(item_start, item_end)?;
|
|
||||||
//println!("ENTRY: {} {} {:?}", item_start, item_end, entry.filename);
|
|
||||||
result.push(entry);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn print_filenames<W: std::io::Write>(
|
|
||||||
&mut self,
|
|
||||||
output: &mut W,
|
|
||||||
prefix: &mut PathBuf,
|
|
||||||
dir: &DirectoryEntry,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut list = self.list_dir(dir)?;
|
|
||||||
|
|
||||||
list.sort_unstable_by(|a, b| a.filename.cmp(&b.filename));
|
|
||||||
|
|
||||||
for item in &list {
|
|
||||||
prefix.push(item.filename.clone());
|
|
||||||
|
|
||||||
let mode = item.entry.mode as u32;
|
|
||||||
|
|
||||||
let ifmt = mode & libc::S_IFMT;
|
|
||||||
|
|
||||||
writeln!(output, "{:?}", prefix)?;
|
|
||||||
|
|
||||||
match ifmt {
|
|
||||||
libc::S_IFDIR => self.print_filenames(output, prefix, item)?,
|
|
||||||
libc::S_IFREG | libc::S_IFLNK | libc::S_IFBLK | libc::S_IFCHR => {}
|
|
||||||
_ => bail!("unknown item mode/type for {:?}", prefix),
|
|
||||||
}
|
|
||||||
|
|
||||||
prefix.pop();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Lookup the item identified by `filename` in the provided `DirectoryEntry`.
|
|
||||||
///
|
|
||||||
/// Calculates the hash of the filename and searches for matching entries in
|
|
||||||
/// the goodbye table of the provided `DirectoryEntry`.
|
|
||||||
/// If found, also the filename is compared to avoid hash collision.
|
|
||||||
/// If the filename does not match, the search resumes with the next entry in
|
|
||||||
/// the goodbye table.
|
|
||||||
/// If there is no entry with matching `filename`, `Ok(None)` is returned.
|
|
||||||
pub fn lookup(
|
|
||||||
&mut self,
|
|
||||||
dir: &DirectoryEntry,
|
|
||||||
filename: &OsStr,
|
|
||||||
) -> Result<Option<DirectoryEntry>, Error> {
|
|
||||||
let gbt = self.goodbye_table(Some(dir.start), dir.end)?;
|
|
||||||
let hash = compute_goodbye_hash(filename.as_bytes());
|
|
||||||
|
|
||||||
let mut start_idx = 0;
|
|
||||||
let mut skip_multiple = 0;
|
|
||||||
loop {
|
|
||||||
// Search for the next goodbye entry with matching hash.
|
|
||||||
let idx = search_binary_tree_by(
|
|
||||||
start_idx,
|
|
||||||
gbt.len(),
|
|
||||||
skip_multiple,
|
|
||||||
|idx| hash.cmp(&gbt[idx].0.hash),
|
|
||||||
);
|
|
||||||
let (_item, start, end) = match idx {
|
|
||||||
Some(idx) => &gbt[idx],
|
|
||||||
None => return Ok(None),
|
|
||||||
};
|
|
||||||
|
|
||||||
let entry = self.read_directory_entry(*start, *end)?;
|
|
||||||
|
|
||||||
// Possible hash collision, need to check if the found entry is indeed
|
|
||||||
// the filename to lookup.
|
|
||||||
if entry.filename == filename {
|
|
||||||
return Ok(Some(entry));
|
|
||||||
}
|
|
||||||
// Hash collision, check the next entry in the goodbye table by starting
|
|
||||||
// from given index but skipping one more match (so hash at index itself).
|
|
||||||
start_idx = idx.unwrap();
|
|
||||||
skip_multiple = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read the payload of the file given by `entry`.
|
|
||||||
///
|
|
||||||
/// This will read a files payload as raw bytes starting from `offset` after
|
|
||||||
/// the payload marker, reading `size` bytes.
|
|
||||||
/// If the payload from `offset` to EOF is smaller than `size` bytes, the
|
|
||||||
/// buffer with reduced size is returned.
|
|
||||||
/// If `offset` is larger than the payload size of the `DirectoryEntry`, an
|
|
||||||
/// empty buffer is returned.
|
|
||||||
pub fn read(&mut self, entry: &DirectoryEntry, size: usize, offset: u64) -> Result<Vec<u8>, Error> {
|
|
||||||
let start_offset = entry.payload_offset
|
|
||||||
.ok_or_else(|| format_err!("entry has no payload offset"))?;
|
|
||||||
if offset >= entry.size {
|
|
||||||
return Ok(Vec::new());
|
|
||||||
}
|
|
||||||
let len = if u64::try_from(size)? > entry.size {
|
|
||||||
usize::try_from(entry.size)?
|
|
||||||
} else {
|
|
||||||
size
|
|
||||||
};
|
|
||||||
self.seek(SeekFrom::Start(start_offset + offset))?;
|
|
||||||
let data = self.inner.get_reader_mut().read_exact_allocated(len)?;
|
|
||||||
|
|
||||||
Ok(data)
|
|
||||||
}
|
|
||||||
}
|
|
@ -2,117 +2,149 @@ use std::ffi::{OsStr, OsString};
|
|||||||
use std::os::unix::io::{AsRawFd, RawFd};
|
use std::os::unix::io::{AsRawFd, RawFd};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use nix::errno::Errno;
|
use nix::dir::Dir;
|
||||||
use nix::fcntl::OFlag;
|
use nix::fcntl::OFlag;
|
||||||
use nix::sys::stat::Mode;
|
use nix::sys::stat::{mkdirat, Mode};
|
||||||
use nix::NixPath;
|
|
||||||
|
|
||||||
use super::format_definition::{PxarAttributes, PxarEntry};
|
use proxmox::sys::error::SysError;
|
||||||
|
use pxar::Metadata;
|
||||||
|
|
||||||
|
use crate::pxar::tools::{assert_single_path_component, perms_from_metadata};
|
||||||
|
|
||||||
pub struct PxarDir {
|
pub struct PxarDir {
|
||||||
pub filename: OsString,
|
file_name: OsString,
|
||||||
pub entry: PxarEntry,
|
metadata: Metadata,
|
||||||
pub attr: PxarAttributes,
|
dir: Option<Dir>,
|
||||||
pub dir: Option<nix::dir::Dir>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct PxarDirStack {
|
|
||||||
root: RawFd,
|
|
||||||
data: Vec<PxarDir>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PxarDir {
|
impl PxarDir {
|
||||||
pub fn new(filename: &OsStr, entry: PxarEntry, attr: PxarAttributes) -> Self {
|
pub fn new(file_name: OsString, metadata: Metadata) -> Self {
|
||||||
Self {
|
Self {
|
||||||
filename: filename.to_os_string(),
|
file_name,
|
||||||
entry,
|
metadata,
|
||||||
attr,
|
|
||||||
dir: None,
|
dir: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_dir(&self, parent: RawFd, create_new: bool) -> Result<nix::dir::Dir, nix::Error> {
|
pub fn with_dir(dir: Dir, metadata: Metadata) -> Self {
|
||||||
let res = self
|
Self {
|
||||||
.filename
|
file_name: OsString::from("."),
|
||||||
.with_nix_path(|cstr| unsafe { libc::mkdirat(parent, cstr.as_ptr(), libc::S_IRWXU) })?;
|
metadata,
|
||||||
|
dir: Some(dir),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
match Errno::result(res) {
|
fn create_dir(&mut self, parent: RawFd, allow_existing_dirs: bool) -> Result<RawFd, Error> {
|
||||||
Ok(_) => {}
|
match mkdirat(
|
||||||
|
parent,
|
||||||
|
self.file_name.as_os_str(),
|
||||||
|
perms_from_metadata(&self.metadata)?,
|
||||||
|
) {
|
||||||
|
Ok(()) => (),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
if err == nix::Error::Sys(nix::errno::Errno::EEXIST) {
|
if !(allow_existing_dirs && err.already_exists()) {
|
||||||
if create_new {
|
return Err(err.into());
|
||||||
return Err(err);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return Err(err);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let dir = nix::dir::Dir::openat(
|
self.open_dir(parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn open_dir(&mut self, parent: RawFd) -> Result<RawFd, Error> {
|
||||||
|
let dir = Dir::openat(
|
||||||
parent,
|
parent,
|
||||||
self.filename.as_os_str(),
|
self.file_name.as_os_str(),
|
||||||
OFlag::O_DIRECTORY,
|
OFlag::O_DIRECTORY,
|
||||||
Mode::empty(),
|
Mode::empty(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Ok(dir)
|
let fd = dir.as_raw_fd();
|
||||||
|
self.dir = Some(dir);
|
||||||
|
|
||||||
|
Ok(fd)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn try_as_raw_fd(&self) -> Option<RawFd> {
|
||||||
|
self.dir.as_ref().map(AsRawFd::as_raw_fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn metadata(&self) -> &Metadata {
|
||||||
|
&self.metadata
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn file_name(&self) -> &OsStr {
|
||||||
|
&self.file_name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct PxarDirStack {
|
||||||
|
dirs: Vec<PxarDir>,
|
||||||
|
path: PathBuf,
|
||||||
|
created: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PxarDirStack {
|
impl PxarDirStack {
|
||||||
pub fn new(parent: RawFd) -> Self {
|
pub fn new(root: Dir, metadata: Metadata) -> Self {
|
||||||
Self {
|
Self {
|
||||||
root: parent,
|
dirs: vec![PxarDir::with_dir(root, metadata)],
|
||||||
data: Vec::new(),
|
path: PathBuf::from("/"),
|
||||||
|
created: 1, // the root directory exists
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn push(&mut self, dir: PxarDir) {
|
pub fn is_empty(&self) -> bool {
|
||||||
self.data.push(dir);
|
self.dirs.is_empty()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pop(&mut self) -> Option<PxarDir> {
|
pub fn push(&mut self, file_name: OsString, metadata: Metadata) -> Result<(), Error> {
|
||||||
self.data.pop()
|
assert_single_path_component(&file_name)?;
|
||||||
|
self.path.push(&file_name);
|
||||||
|
self.dirs.push(PxarDir::new(file_name, metadata));
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn as_path_buf(&self) -> PathBuf {
|
pub fn pop(&mut self) -> Result<Option<PxarDir>, Error> {
|
||||||
let path: PathBuf = self.data.iter().map(|d| d.filename.clone()).collect();
|
let out = self.dirs.pop();
|
||||||
path
|
if !self.path.pop() {
|
||||||
}
|
if self.path.as_os_str() == "/" {
|
||||||
|
// we just finished the root directory, make sure this can only happen once:
|
||||||
pub fn last(&self) -> Option<&PxarDir> {
|
self.path = PathBuf::new();
|
||||||
self.data.last()
|
} else {
|
||||||
}
|
bail!("lost track of path");
|
||||||
|
|
||||||
pub fn last_mut(&mut self) -> Option<&mut PxarDir> {
|
|
||||||
self.data.last_mut()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn last_dir_fd(&self) -> Option<RawFd> {
|
|
||||||
let last_dir = self.data.last()?;
|
|
||||||
match &last_dir.dir {
|
|
||||||
Some(d) => Some(d.as_raw_fd()),
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn create_all_dirs(&mut self, create_new: bool) -> Result<RawFd, Error> {
|
|
||||||
let mut current_fd = self.root;
|
|
||||||
for d in &mut self.data {
|
|
||||||
match &d.dir {
|
|
||||||
Some(dir) => current_fd = dir.as_raw_fd(),
|
|
||||||
None => {
|
|
||||||
let dir = d
|
|
||||||
.create_dir(current_fd, create_new)
|
|
||||||
.map_err(|err| format_err!("create dir failed - {}", err))?;
|
|
||||||
current_fd = dir.as_raw_fd();
|
|
||||||
d.dir = Some(dir);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
self.created = self.created.min(self.dirs.len());
|
||||||
|
Ok(out)
|
||||||
|
}
|
||||||
|
|
||||||
Ok(current_fd)
|
pub fn last_dir_fd(&mut self, allow_existing_dirs: bool) -> Result<RawFd, Error> {
|
||||||
|
// should not be possible given the way we use it:
|
||||||
|
assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
|
||||||
|
|
||||||
|
let mut fd = self.dirs[self.created - 1]
|
||||||
|
.try_as_raw_fd()
|
||||||
|
.ok_or_else(|| format_err!("lost track of directory file descriptors"))?;
|
||||||
|
while self.created < self.dirs.len() {
|
||||||
|
fd = self.dirs[self.created].create_dir(fd, allow_existing_dirs)?;
|
||||||
|
self.created += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn create_last_dir(&mut self, allow_existing_dirs: bool) -> Result<(), Error> {
|
||||||
|
let _: RawFd = self.last_dir_fd(allow_existing_dirs)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn root_dir_fd(&self) -> Result<RawFd, Error> {
|
||||||
|
// should not be possible given the way we use it:
|
||||||
|
assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
|
||||||
|
|
||||||
|
self.dirs[0]
|
||||||
|
.try_as_raw_fd()
|
||||||
|
.ok_or_else(|| format_err!("lost track of directory file descriptors"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
1332
src/pxar/encoder.rs
1332
src/pxar/encoder.rs
File diff suppressed because it is too large
Load Diff
358
src/pxar/extract.rs
Normal file
358
src/pxar/extract.rs
Normal file
@ -0,0 +1,358 @@
|
|||||||
|
//! Code for extraction of pxar contents onto the file system.
|
||||||
|
|
||||||
|
use std::convert::TryFrom;
|
||||||
|
use std::ffi::{CStr, CString, OsStr, OsString};
|
||||||
|
use std::io;
|
||||||
|
use std::os::unix::ffi::OsStrExt;
|
||||||
|
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use nix::dir::Dir;
|
||||||
|
use nix::fcntl::OFlag;
|
||||||
|
use nix::sys::stat::Mode;
|
||||||
|
|
||||||
|
use pathpatterns::{MatchEntry, MatchList, MatchType};
|
||||||
|
use pxar::format::Device;
|
||||||
|
use pxar::Metadata;
|
||||||
|
|
||||||
|
use proxmox::c_result;
|
||||||
|
use proxmox::tools::fs::{create_path, CreateOptions};
|
||||||
|
|
||||||
|
use crate::pxar::dir_stack::PxarDirStack;
|
||||||
|
use crate::pxar::Flags;
|
||||||
|
use crate::pxar::metadata;
|
||||||
|
|
||||||
|
pub fn extract_archive<T, F>(
|
||||||
|
mut decoder: pxar::decoder::Decoder<T>,
|
||||||
|
destination: &Path,
|
||||||
|
match_list: &[MatchEntry],
|
||||||
|
feature_flags: Flags,
|
||||||
|
allow_existing_dirs: bool,
|
||||||
|
mut callback: F,
|
||||||
|
) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: pxar::decoder::SeqRead,
|
||||||
|
F: FnMut(&Path),
|
||||||
|
{
|
||||||
|
// we use this to keep track of our directory-traversal
|
||||||
|
decoder.enable_goodbye_entries(true);
|
||||||
|
|
||||||
|
let root = decoder
|
||||||
|
.next()
|
||||||
|
.ok_or_else(|| format_err!("found empty pxar archive"))?
|
||||||
|
.map_err(|err| format_err!("error reading pxar archive: {}", err))?;
|
||||||
|
|
||||||
|
if !root.is_dir() {
|
||||||
|
bail!("pxar archive does not start with a directory entry!");
|
||||||
|
}
|
||||||
|
|
||||||
|
create_path(
|
||||||
|
&destination,
|
||||||
|
None,
|
||||||
|
Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
|
||||||
|
)
|
||||||
|
.map_err(|err| format_err!("error creating directory {:?}: {}", destination, err))?;
|
||||||
|
|
||||||
|
let dir = Dir::open(
|
||||||
|
destination,
|
||||||
|
OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
|
||||||
|
Mode::empty(),
|
||||||
|
)
|
||||||
|
.map_err(|err| format_err!("unable to open target directory {:?}: {}", destination, err,))?;
|
||||||
|
|
||||||
|
let mut extractor = Extractor::new(
|
||||||
|
dir,
|
||||||
|
root.metadata().clone(),
|
||||||
|
allow_existing_dirs,
|
||||||
|
feature_flags,
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut match_stack = Vec::new();
|
||||||
|
let mut current_match = true;
|
||||||
|
while let Some(entry) = decoder.next() {
|
||||||
|
use pxar::EntryKind;
|
||||||
|
|
||||||
|
let entry = entry.map_err(|err| format_err!("error reading pxar archive: {}", err))?;
|
||||||
|
|
||||||
|
let file_name_os = entry.file_name();
|
||||||
|
|
||||||
|
// safety check: a file entry in an archive must never contain slashes:
|
||||||
|
if file_name_os.as_bytes().contains(&b'/') {
|
||||||
|
bail!("archive file entry contains slashes, which is invalid and a security concern");
|
||||||
|
}
|
||||||
|
|
||||||
|
let file_name = CString::new(file_name_os.as_bytes())
|
||||||
|
.map_err(|_| format_err!("encountered file name with null-bytes"))?;
|
||||||
|
|
||||||
|
let metadata = entry.metadata();
|
||||||
|
|
||||||
|
let match_result = match_list.matches(
|
||||||
|
entry.path().as_os_str().as_bytes(),
|
||||||
|
Some(metadata.file_type() as u32),
|
||||||
|
);
|
||||||
|
|
||||||
|
let did_match = match match_result {
|
||||||
|
Some(MatchType::Include) => true,
|
||||||
|
Some(MatchType::Exclude) => false,
|
||||||
|
None => current_match,
|
||||||
|
};
|
||||||
|
match (did_match, entry.kind()) {
|
||||||
|
(_, EntryKind::Directory) => {
|
||||||
|
callback(entry.path());
|
||||||
|
|
||||||
|
let create = current_match && match_result != Some(MatchType::Exclude);
|
||||||
|
extractor.enter_directory(file_name_os.to_owned(), metadata.clone(), create)?;
|
||||||
|
|
||||||
|
// We're starting a new directory, push our old matching state and replace it with
|
||||||
|
// our new one:
|
||||||
|
match_stack.push(current_match);
|
||||||
|
current_match = did_match;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
(_, EntryKind::GoodbyeTable) => {
|
||||||
|
// go up a directory
|
||||||
|
extractor
|
||||||
|
.leave_directory()
|
||||||
|
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
|
||||||
|
|
||||||
|
// We left a directory, also get back our previous matching state. This is in sync
|
||||||
|
// with `dir_stack` so this should never be empty except for the final goodbye
|
||||||
|
// table, in which case we get back to the default of `true`.
|
||||||
|
current_match = match_stack.pop().unwrap_or(true);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
(true, EntryKind::Symlink(link)) => {
|
||||||
|
callback(entry.path());
|
||||||
|
extractor.extract_symlink(&file_name, metadata, link.as_ref())
|
||||||
|
}
|
||||||
|
(true, EntryKind::Hardlink(link)) => {
|
||||||
|
callback(entry.path());
|
||||||
|
extractor.extract_hardlink(&file_name, link.as_os_str())
|
||||||
|
}
|
||||||
|
(true, EntryKind::Device(dev)) => {
|
||||||
|
if extractor.contains_flags(Flags::WITH_DEVICE_NODES) {
|
||||||
|
callback(entry.path());
|
||||||
|
extractor.extract_device(&file_name, metadata, dev)
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(true, EntryKind::Fifo) => {
|
||||||
|
if extractor.contains_flags(Flags::WITH_FIFOS) {
|
||||||
|
callback(entry.path());
|
||||||
|
extractor.extract_special(&file_name, metadata, 0)
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(true, EntryKind::Socket) => {
|
||||||
|
if extractor.contains_flags(Flags::WITH_SOCKETS) {
|
||||||
|
callback(entry.path());
|
||||||
|
extractor.extract_special(&file_name, metadata, 0)
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(true, EntryKind::File { size, .. }) => extractor.extract_file(
|
||||||
|
&file_name,
|
||||||
|
metadata,
|
||||||
|
*size,
|
||||||
|
&mut decoder.contents().ok_or_else(|| {
|
||||||
|
format_err!("found regular file entry without contents in archive")
|
||||||
|
})?,
|
||||||
|
),
|
||||||
|
(false, _) => Ok(()), // skip this
|
||||||
|
}
|
||||||
|
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if !extractor.dir_stack.is_empty() {
|
||||||
|
bail!("unexpected eof while decoding pxar archive");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Common state for file extraction.
|
||||||
|
pub(crate) struct Extractor {
|
||||||
|
feature_flags: Flags,
|
||||||
|
allow_existing_dirs: bool,
|
||||||
|
dir_stack: PxarDirStack,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Extractor {
|
||||||
|
/// Create a new extractor state for a target directory.
|
||||||
|
pub fn new(
|
||||||
|
root_dir: Dir,
|
||||||
|
metadata: Metadata,
|
||||||
|
allow_existing_dirs: bool,
|
||||||
|
feature_flags: Flags,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
dir_stack: PxarDirStack::new(root_dir, metadata),
|
||||||
|
allow_existing_dirs,
|
||||||
|
feature_flags,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When encountering a directory during extraction, this is used to keep track of it. If
|
||||||
|
/// `create` is true it is immediately created and its metadata will be updated once we leave
|
||||||
|
/// it. If `create` is false it will only be created if it is going to have any actual content.
|
||||||
|
pub fn enter_directory(
|
||||||
|
&mut self,
|
||||||
|
file_name: OsString,
|
||||||
|
metadata: Metadata,
|
||||||
|
create: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
self.dir_stack.push(file_name, metadata)?;
|
||||||
|
|
||||||
|
if create {
|
||||||
|
self.dir_stack.create_last_dir(self.allow_existing_dirs)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When done with a directory we need to make sure we're
|
||||||
|
pub fn leave_directory(&mut self) -> Result<(), Error> {
|
||||||
|
let dir = self
|
||||||
|
.dir_stack
|
||||||
|
.pop()
|
||||||
|
.map_err(|err| format_err!("unexpected end of directory entry: {}", err))?
|
||||||
|
.ok_or_else(|| format_err!("broken pxar archive (directory stack underrun)"))?;
|
||||||
|
|
||||||
|
if let Some(fd) = dir.try_as_raw_fd() {
|
||||||
|
metadata::apply(
|
||||||
|
self.feature_flags,
|
||||||
|
dir.metadata(),
|
||||||
|
fd,
|
||||||
|
&CString::new(dir.file_name().as_bytes())?,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn contains_flags(&self, flag: Flags) -> bool {
|
||||||
|
self.feature_flags.contains(flag)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parent_fd(&mut self) -> Result<RawFd, Error> {
|
||||||
|
self.dir_stack.last_dir_fd(self.allow_existing_dirs)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn extract_symlink(
|
||||||
|
&mut self,
|
||||||
|
file_name: &CStr,
|
||||||
|
metadata: &Metadata,
|
||||||
|
link: &OsStr,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let parent = self.parent_fd()?;
|
||||||
|
nix::unistd::symlinkat(link, Some(parent), file_name)?;
|
||||||
|
metadata::apply_at(self.feature_flags, metadata, parent, file_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn extract_hardlink(
|
||||||
|
&mut self,
|
||||||
|
file_name: &CStr,
|
||||||
|
link: &OsStr,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
crate::pxar::tools::assert_relative_path(link)?;
|
||||||
|
|
||||||
|
let parent = self.parent_fd()?;
|
||||||
|
let root = self.dir_stack.root_dir_fd()?;
|
||||||
|
let target = CString::new(link.as_bytes())?;
|
||||||
|
nix::unistd::linkat(
|
||||||
|
Some(root),
|
||||||
|
target.as_c_str(),
|
||||||
|
Some(parent),
|
||||||
|
file_name,
|
||||||
|
nix::unistd::LinkatFlags::NoSymlinkFollow,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn extract_device(
|
||||||
|
&mut self,
|
||||||
|
file_name: &CStr,
|
||||||
|
metadata: &Metadata,
|
||||||
|
device: &Device,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
self.extract_special(file_name, metadata, device.to_dev_t())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn extract_special(
|
||||||
|
&mut self,
|
||||||
|
file_name: &CStr,
|
||||||
|
metadata: &Metadata,
|
||||||
|
device: libc::dev_t,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let mode = metadata.stat.mode;
|
||||||
|
let mode = u32::try_from(mode).map_err(|_| {
|
||||||
|
format_err!(
|
||||||
|
"device node's mode contains illegal bits: 0x{:x} (0o{:o})",
|
||||||
|
mode,
|
||||||
|
mode,
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
let parent = self.parent_fd()?;
|
||||||
|
unsafe { c_result!(libc::mknodat(parent, file_name.as_ptr(), mode, device)) }
|
||||||
|
.map_err(|err| format_err!("failed to create device node: {}", err))?;
|
||||||
|
|
||||||
|
metadata::apply_at(self.feature_flags, metadata, parent, file_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn extract_file(
|
||||||
|
&mut self,
|
||||||
|
file_name: &CStr,
|
||||||
|
metadata: &Metadata,
|
||||||
|
size: u64,
|
||||||
|
contents: &mut dyn io::Read,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let parent = self.parent_fd()?;
|
||||||
|
let mut file = unsafe {
|
||||||
|
std::fs::File::from_raw_fd(nix::fcntl::openat(
|
||||||
|
parent,
|
||||||
|
file_name,
|
||||||
|
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
||||||
|
Mode::from_bits(0o600).unwrap(),
|
||||||
|
)?)
|
||||||
|
};
|
||||||
|
|
||||||
|
let extracted = io::copy(&mut *contents, &mut file)?;
|
||||||
|
if size != extracted {
|
||||||
|
bail!("extracted {} bytes of a file of {} bytes", extracted, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata::apply(self.feature_flags, metadata, file.as_raw_fd(), file_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn async_extract_file<T: tokio::io::AsyncRead + Unpin>(
|
||||||
|
&mut self,
|
||||||
|
file_name: &CStr,
|
||||||
|
metadata: &Metadata,
|
||||||
|
size: u64,
|
||||||
|
contents: &mut T,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let parent = self.parent_fd()?;
|
||||||
|
let mut file = tokio::fs::File::from_std(unsafe {
|
||||||
|
std::fs::File::from_raw_fd(nix::fcntl::openat(
|
||||||
|
parent,
|
||||||
|
file_name,
|
||||||
|
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
||||||
|
Mode::from_bits(0o600).unwrap(),
|
||||||
|
)?)
|
||||||
|
});
|
||||||
|
|
||||||
|
let extracted = tokio::io::copy(&mut *contents, &mut file).await?;
|
||||||
|
if size != extracted {
|
||||||
|
bail!("extracted {} bytes of a file of {} bytes", extracted, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata::apply(self.feature_flags, metadata, file.as_raw_fd(), file_name)
|
||||||
|
}
|
||||||
|
}
|
@ -3,315 +3,332 @@
|
|||||||
//! Flags for known supported features for a given filesystem can be derived
|
//! Flags for known supported features for a given filesystem can be derived
|
||||||
//! from the superblocks magic number.
|
//! from the superblocks magic number.
|
||||||
|
|
||||||
/// FAT-style 2s time granularity
|
use bitflags::bitflags;
|
||||||
pub const WITH_2SEC_TIME: u64 = 0x40;
|
|
||||||
/// Preserve read only flag of files
|
|
||||||
pub const WITH_READ_ONLY: u64 = 0x80;
|
|
||||||
/// Preserve unix permissions
|
|
||||||
pub const WITH_PERMISSIONS: u64 = 0x100;
|
|
||||||
/// Include symbolik links
|
|
||||||
pub const WITH_SYMLINKS: u64 = 0x200;
|
|
||||||
/// Include device nodes
|
|
||||||
pub const WITH_DEVICE_NODES: u64 = 0x400;
|
|
||||||
/// Include FIFOs
|
|
||||||
pub const WITH_FIFOS: u64 = 0x800;
|
|
||||||
/// Include Sockets
|
|
||||||
pub const WITH_SOCKETS: u64 = 0x1000;
|
|
||||||
|
|
||||||
/// Preserve DOS file flag `HIDDEN`
|
bitflags! {
|
||||||
pub const WITH_FLAG_HIDDEN: u64 = 0x2000;
|
pub struct Flags: u64 {
|
||||||
/// Preserve DOS file flag `SYSTEM`
|
/// FAT-style 2s time granularity
|
||||||
pub const WITH_FLAG_SYSTEM: u64 = 0x4000;
|
const WITH_2SEC_TIME = 0x40;
|
||||||
/// Preserve DOS file flag `ARCHIVE`
|
/// Preserve read only flag of files
|
||||||
pub const WITH_FLAG_ARCHIVE: u64 = 0x8000;
|
const WITH_READ_ONLY = 0x80;
|
||||||
|
/// Preserve unix permissions
|
||||||
|
const WITH_PERMISSIONS = 0x100;
|
||||||
|
/// Include symbolik links
|
||||||
|
const WITH_SYMLINKS = 0x200;
|
||||||
|
/// Include device nodes
|
||||||
|
const WITH_DEVICE_NODES = 0x400;
|
||||||
|
/// Include FIFOs
|
||||||
|
const WITH_FIFOS = 0x800;
|
||||||
|
/// Include Sockets
|
||||||
|
const WITH_SOCKETS = 0x1000;
|
||||||
|
|
||||||
// chattr() flags
|
/// Preserve DOS file flag `HIDDEN`
|
||||||
/// Linux file attribute `APPEND`
|
const WITH_FLAG_HIDDEN = 0x2000;
|
||||||
pub const WITH_FLAG_APPEND: u64 = 0x10000;
|
/// Preserve DOS file flag `SYSTEM`
|
||||||
/// Linux file attribute `NOATIME`
|
const WITH_FLAG_SYSTEM = 0x4000;
|
||||||
pub const WITH_FLAG_NOATIME: u64 = 0x20000;
|
/// Preserve DOS file flag `ARCHIVE`
|
||||||
/// Linux file attribute `COMPR`
|
const WITH_FLAG_ARCHIVE = 0x8000;
|
||||||
pub const WITH_FLAG_COMPR: u64 = 0x40000;
|
|
||||||
/// Linux file attribute `NOCOW`
|
// chattr() flags
|
||||||
pub const WITH_FLAG_NOCOW: u64 = 0x80000;
|
/// Linux file attribute `APPEND`
|
||||||
/// Linux file attribute `NODUMP`
|
const WITH_FLAG_APPEND = 0x10000;
|
||||||
pub const WITH_FLAG_NODUMP: u64 = 0x0010_0000;
|
/// Linux file attribute `NOATIME`
|
||||||
/// Linux file attribute `DIRSYNC`
|
const WITH_FLAG_NOATIME = 0x20000;
|
||||||
pub const WITH_FLAG_DIRSYNC: u64 = 0x0020_0000;
|
/// Linux file attribute `COMPR`
|
||||||
/// Linux file attribute `IMMUTABLE`
|
const WITH_FLAG_COMPR = 0x40000;
|
||||||
pub const WITH_FLAG_IMMUTABLE: u64 = 0x0040_0000;
|
/// Linux file attribute `NOCOW`
|
||||||
/// Linux file attribute `SYNC`
|
const WITH_FLAG_NOCOW = 0x80000;
|
||||||
pub const WITH_FLAG_SYNC: u64 = 0x0080_0000;
|
/// Linux file attribute `NODUMP`
|
||||||
/// Linux file attribute `NOCOMP`
|
const WITH_FLAG_NODUMP = 0x0010_0000;
|
||||||
pub const WITH_FLAG_NOCOMP: u64 = 0x0100_0000;
|
/// Linux file attribute `DIRSYNC`
|
||||||
/// Linux file attribute `PROJINHERIT`
|
const WITH_FLAG_DIRSYNC = 0x0020_0000;
|
||||||
pub const WITH_FLAG_PROJINHERIT: u64 = 0x0200_0000;
|
/// Linux file attribute `IMMUTABLE`
|
||||||
|
const WITH_FLAG_IMMUTABLE = 0x0040_0000;
|
||||||
|
/// Linux file attribute `SYNC`
|
||||||
|
const WITH_FLAG_SYNC = 0x0080_0000;
|
||||||
|
/// Linux file attribute `NOCOMP`
|
||||||
|
const WITH_FLAG_NOCOMP = 0x0100_0000;
|
||||||
|
/// Linux file attribute `PROJINHERIT`
|
||||||
|
const WITH_FLAG_PROJINHERIT = 0x0200_0000;
|
||||||
|
|
||||||
|
|
||||||
/// Preserve BTRFS subvolume flag
|
/// Preserve BTRFS subvolume flag
|
||||||
pub const WITH_SUBVOLUME: u64 = 0x0400_0000;
|
const WITH_SUBVOLUME = 0x0400_0000;
|
||||||
/// Preserve BTRFS read-only subvolume flag
|
/// Preserve BTRFS read-only subvolume flag
|
||||||
pub const WITH_SUBVOLUME_RO: u64 = 0x0800_0000;
|
const WITH_SUBVOLUME_RO = 0x0800_0000;
|
||||||
|
|
||||||
/// Preserve Extended Attribute metadata
|
/// Preserve Extended Attribute metadata
|
||||||
pub const WITH_XATTRS: u64 = 0x1000_0000;
|
const WITH_XATTRS = 0x1000_0000;
|
||||||
/// Preserve Access Control List metadata
|
/// Preserve Access Control List metadata
|
||||||
pub const WITH_ACL: u64 = 0x2000_0000;
|
const WITH_ACL = 0x2000_0000;
|
||||||
/// Preserve SELinux security context
|
/// Preserve SELinux security context
|
||||||
pub const WITH_SELINUX: u64 = 0x4000_0000;
|
const WITH_SELINUX = 0x4000_0000;
|
||||||
/// Preserve "security.capability" xattr
|
/// Preserve "security.capability" xattr
|
||||||
pub const WITH_FCAPS: u64 = 0x8000_0000;
|
const WITH_FCAPS = 0x8000_0000;
|
||||||
|
|
||||||
/// Preserve XFS/ext4/ZFS project quota ID
|
/// Preserve XFS/ext4/ZFS project quota ID
|
||||||
pub const WITH_QUOTA_PROJID: u64 = 0x0001_0000_0000;
|
const WITH_QUOTA_PROJID = 0x0001_0000_0000;
|
||||||
|
|
||||||
/// Support ".pxarexclude" files
|
/// Support ".pxarexclude" files
|
||||||
pub const EXCLUDE_FILE: u64 = 0x1000_0000_0000_0000;
|
const EXCLUDE_FILE = 0x1000_0000_0000_0000;
|
||||||
/// Exclude submounts
|
/// Exclude submounts
|
||||||
pub const EXCLUDE_SUBMOUNTS: u64 = 0x4000_0000_0000_0000;
|
const EXCLUDE_SUBMOUNTS = 0x4000_0000_0000_0000;
|
||||||
/// Exclude entries with chattr flag NODUMP
|
/// Exclude entries with chattr flag NODUMP
|
||||||
pub const EXCLUDE_NODUMP: u64 = 0x8000_0000_0000_0000;
|
const EXCLUDE_NODUMP = 0x8000_0000_0000_0000;
|
||||||
|
|
||||||
/// Definitions of typical feature flags for the *pxar* encoder/decoder.
|
// Definitions of typical feature flags for the *pxar* encoder/decoder.
|
||||||
/// By this expensive syscalls for unsupported features are avoided.
|
// By this expensive syscalls for unsupported features are avoided.
|
||||||
|
|
||||||
/// All chattr file attributes
|
/// All chattr file attributes
|
||||||
pub const WITH_CHATTR: u64 =
|
const WITH_CHATTR =
|
||||||
WITH_FLAG_APPEND|
|
Flags::WITH_FLAG_APPEND.bits() |
|
||||||
WITH_FLAG_NOATIME|
|
Flags::WITH_FLAG_NOATIME.bits() |
|
||||||
WITH_FLAG_COMPR|
|
Flags::WITH_FLAG_COMPR.bits() |
|
||||||
WITH_FLAG_NOCOW|
|
Flags::WITH_FLAG_NOCOW.bits() |
|
||||||
WITH_FLAG_NODUMP|
|
Flags::WITH_FLAG_NODUMP.bits() |
|
||||||
WITH_FLAG_DIRSYNC|
|
Flags::WITH_FLAG_DIRSYNC.bits() |
|
||||||
WITH_FLAG_IMMUTABLE|
|
Flags::WITH_FLAG_IMMUTABLE.bits() |
|
||||||
WITH_FLAG_SYNC|
|
Flags::WITH_FLAG_SYNC.bits() |
|
||||||
WITH_FLAG_NOCOMP|
|
Flags::WITH_FLAG_NOCOMP.bits() |
|
||||||
WITH_FLAG_PROJINHERIT;
|
Flags::WITH_FLAG_PROJINHERIT.bits();
|
||||||
|
|
||||||
/// All FAT file attributes
|
/// All FAT file attributes
|
||||||
pub const WITH_FAT_ATTRS: u64 =
|
const WITH_FAT_ATTRS =
|
||||||
WITH_FLAG_HIDDEN|
|
Flags::WITH_FLAG_HIDDEN.bits() |
|
||||||
WITH_FLAG_SYSTEM|
|
Flags::WITH_FLAG_SYSTEM.bits() |
|
||||||
WITH_FLAG_ARCHIVE;
|
Flags::WITH_FLAG_ARCHIVE.bits();
|
||||||
|
|
||||||
/// All bits that may also be exposed via fuse
|
/// All bits that may also be exposed via fuse
|
||||||
pub const WITH_FUSE: u64 =
|
const WITH_FUSE =
|
||||||
WITH_2SEC_TIME|
|
Flags::WITH_2SEC_TIME.bits() |
|
||||||
WITH_READ_ONLY|
|
Flags::WITH_READ_ONLY.bits() |
|
||||||
WITH_PERMISSIONS|
|
Flags::WITH_PERMISSIONS.bits() |
|
||||||
WITH_SYMLINKS|
|
Flags::WITH_SYMLINKS.bits() |
|
||||||
WITH_DEVICE_NODES|
|
Flags::WITH_DEVICE_NODES.bits() |
|
||||||
WITH_FIFOS|
|
Flags::WITH_FIFOS.bits() |
|
||||||
WITH_SOCKETS|
|
Flags::WITH_SOCKETS.bits() |
|
||||||
WITH_FAT_ATTRS|
|
Flags::WITH_FAT_ATTRS.bits() |
|
||||||
WITH_CHATTR|
|
Flags::WITH_CHATTR.bits() |
|
||||||
WITH_XATTRS;
|
Flags::WITH_XATTRS.bits();
|
||||||
|
|
||||||
|
|
||||||
/// Default feature flags for encoder/decoder
|
/// Default feature flags for encoder/decoder
|
||||||
pub const DEFAULT: u64 =
|
const DEFAULT =
|
||||||
WITH_SYMLINKS|
|
Flags::WITH_SYMLINKS.bits() |
|
||||||
WITH_DEVICE_NODES|
|
Flags::WITH_DEVICE_NODES.bits() |
|
||||||
WITH_FIFOS|
|
Flags::WITH_FIFOS.bits() |
|
||||||
WITH_SOCKETS|
|
Flags::WITH_SOCKETS.bits() |
|
||||||
WITH_FLAG_HIDDEN|
|
Flags::WITH_FLAG_HIDDEN.bits() |
|
||||||
WITH_FLAG_SYSTEM|
|
Flags::WITH_FLAG_SYSTEM.bits() |
|
||||||
WITH_FLAG_ARCHIVE|
|
Flags::WITH_FLAG_ARCHIVE.bits() |
|
||||||
WITH_FLAG_APPEND|
|
Flags::WITH_FLAG_APPEND.bits() |
|
||||||
WITH_FLAG_NOATIME|
|
Flags::WITH_FLAG_NOATIME.bits() |
|
||||||
WITH_FLAG_COMPR|
|
Flags::WITH_FLAG_COMPR.bits() |
|
||||||
WITH_FLAG_NOCOW|
|
Flags::WITH_FLAG_NOCOW.bits() |
|
||||||
//WITH_FLAG_NODUMP|
|
//WITH_FLAG_NODUMP.bits() |
|
||||||
WITH_FLAG_DIRSYNC|
|
Flags::WITH_FLAG_DIRSYNC.bits() |
|
||||||
WITH_FLAG_IMMUTABLE|
|
Flags::WITH_FLAG_IMMUTABLE.bits() |
|
||||||
WITH_FLAG_SYNC|
|
Flags::WITH_FLAG_SYNC.bits() |
|
||||||
WITH_FLAG_NOCOMP|
|
Flags::WITH_FLAG_NOCOMP.bits() |
|
||||||
WITH_FLAG_PROJINHERIT|
|
Flags::WITH_FLAG_PROJINHERIT.bits() |
|
||||||
WITH_SUBVOLUME|
|
Flags::WITH_SUBVOLUME.bits() |
|
||||||
WITH_SUBVOLUME_RO|
|
Flags::WITH_SUBVOLUME_RO.bits() |
|
||||||
WITH_XATTRS|
|
Flags::WITH_XATTRS.bits() |
|
||||||
WITH_ACL|
|
Flags::WITH_ACL.bits() |
|
||||||
WITH_SELINUX|
|
Flags::WITH_SELINUX.bits() |
|
||||||
WITH_FCAPS|
|
Flags::WITH_FCAPS.bits() |
|
||||||
WITH_QUOTA_PROJID|
|
Flags::WITH_QUOTA_PROJID.bits() |
|
||||||
EXCLUDE_NODUMP|
|
Flags::EXCLUDE_NODUMP.bits() |
|
||||||
EXCLUDE_FILE;
|
Flags::EXCLUDE_FILE.bits();
|
||||||
|
}
|
||||||
// form /usr/include/linux/fs.h
|
}
|
||||||
const FS_APPEND_FL: u32 = 0x0000_0020;
|
|
||||||
const FS_NOATIME_FL: u32 = 0x0000_0080;
|
impl Default for Flags {
|
||||||
const FS_COMPR_FL: u32 = 0x0000_0004;
|
fn default() -> Flags {
|
||||||
const FS_NOCOW_FL: u32 = 0x0080_0000;
|
Flags::DEFAULT
|
||||||
const FS_NODUMP_FL: u32 = 0x0000_0040;
|
}
|
||||||
const FS_DIRSYNC_FL: u32 = 0x0001_0000;
|
}
|
||||||
const FS_IMMUTABLE_FL: u32 = 0x0000_0010;
|
|
||||||
const FS_SYNC_FL: u32 = 0x0000_0008;
|
impl Flags {
|
||||||
const FS_NOCOMP_FL: u32 = 0x0000_0400;
|
/// Get a set of feature flags from file attributes.
|
||||||
const FS_PROJINHERIT_FL: u32 = 0x2000_0000;
|
pub fn from_chattr(attr: u32) -> Flags {
|
||||||
|
// form /usr/include/linux/fs.h
|
||||||
static CHATTR_MAP: [(u64, u32); 10] = [
|
const FS_APPEND_FL: u32 = 0x0000_0020;
|
||||||
( WITH_FLAG_APPEND, FS_APPEND_FL ),
|
const FS_NOATIME_FL: u32 = 0x0000_0080;
|
||||||
( WITH_FLAG_NOATIME, FS_NOATIME_FL ),
|
const FS_COMPR_FL: u32 = 0x0000_0004;
|
||||||
( WITH_FLAG_COMPR, FS_COMPR_FL ),
|
const FS_NOCOW_FL: u32 = 0x0080_0000;
|
||||||
( WITH_FLAG_NOCOW, FS_NOCOW_FL ),
|
const FS_NODUMP_FL: u32 = 0x0000_0040;
|
||||||
( WITH_FLAG_NODUMP, FS_NODUMP_FL ),
|
const FS_DIRSYNC_FL: u32 = 0x0001_0000;
|
||||||
( WITH_FLAG_DIRSYNC, FS_DIRSYNC_FL ),
|
const FS_IMMUTABLE_FL: u32 = 0x0000_0010;
|
||||||
( WITH_FLAG_IMMUTABLE, FS_IMMUTABLE_FL ),
|
const FS_SYNC_FL: u32 = 0x0000_0008;
|
||||||
( WITH_FLAG_SYNC, FS_SYNC_FL ),
|
const FS_NOCOMP_FL: u32 = 0x0000_0400;
|
||||||
( WITH_FLAG_NOCOMP, FS_NOCOMP_FL ),
|
const FS_PROJINHERIT_FL: u32 = 0x2000_0000;
|
||||||
( WITH_FLAG_PROJINHERIT, FS_PROJINHERIT_FL ),
|
|
||||||
];
|
const CHATTR_MAP: [(Flags, u32); 10] = [
|
||||||
|
( Flags::WITH_FLAG_APPEND, FS_APPEND_FL ),
|
||||||
pub fn feature_flags_from_chattr(attr: u32) -> u64 {
|
( Flags::WITH_FLAG_NOATIME, FS_NOATIME_FL ),
|
||||||
|
( Flags::WITH_FLAG_COMPR, FS_COMPR_FL ),
|
||||||
let mut flags = 0u64;
|
( Flags::WITH_FLAG_NOCOW, FS_NOCOW_FL ),
|
||||||
|
( Flags::WITH_FLAG_NODUMP, FS_NODUMP_FL ),
|
||||||
for (fe_flag, fs_flag) in &CHATTR_MAP {
|
( Flags::WITH_FLAG_DIRSYNC, FS_DIRSYNC_FL ),
|
||||||
if (attr & fs_flag) != 0 { flags |= fe_flag; }
|
( Flags::WITH_FLAG_IMMUTABLE, FS_IMMUTABLE_FL ),
|
||||||
}
|
( Flags::WITH_FLAG_SYNC, FS_SYNC_FL ),
|
||||||
|
( Flags::WITH_FLAG_NOCOMP, FS_NOCOMP_FL ),
|
||||||
flags
|
( Flags::WITH_FLAG_PROJINHERIT, FS_PROJINHERIT_FL ),
|
||||||
}
|
];
|
||||||
|
|
||||||
// from /usr/include/linux/msdos_fs.h
|
let mut flags = Flags::empty();
|
||||||
const ATTR_HIDDEN: u32 = 2;
|
|
||||||
const ATTR_SYS: u32 = 4;
|
for (fe_flag, fs_flag) in &CHATTR_MAP {
|
||||||
const ATTR_ARCH: u32 = 32;
|
if (attr & fs_flag) != 0 {
|
||||||
|
flags |= *fe_flag;
|
||||||
static FAT_ATTR_MAP: [(u64, u32); 3] = [
|
}
|
||||||
( WITH_FLAG_HIDDEN, ATTR_HIDDEN ),
|
}
|
||||||
( WITH_FLAG_SYSTEM, ATTR_SYS ),
|
|
||||||
( WITH_FLAG_ARCHIVE, ATTR_ARCH ),
|
flags
|
||||||
];
|
}
|
||||||
|
|
||||||
pub fn feature_flags_from_fat_attr(attr: u32) -> u64 {
|
/// Get a set of feature flags from FAT attributes.
|
||||||
|
pub fn from_fat_attr(attr: u32) -> Flags {
|
||||||
let mut flags = 0u64;
|
// from /usr/include/linux/msdos_fs.h
|
||||||
|
const ATTR_HIDDEN: u32 = 2;
|
||||||
for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
|
const ATTR_SYS: u32 = 4;
|
||||||
if (attr & fs_flag) != 0 { flags |= fe_flag; }
|
const ATTR_ARCH: u32 = 32;
|
||||||
}
|
|
||||||
|
const FAT_ATTR_MAP: [(Flags, u32); 3] = [
|
||||||
flags
|
( Flags::WITH_FLAG_HIDDEN, ATTR_HIDDEN ),
|
||||||
}
|
( Flags::WITH_FLAG_SYSTEM, ATTR_SYS ),
|
||||||
|
( Flags::WITH_FLAG_ARCHIVE, ATTR_ARCH ),
|
||||||
|
];
|
||||||
/// Return the supported *pxar* feature flags based on the magic number of the filesystem.
|
|
||||||
pub fn feature_flags_from_magic(magic: i64) -> u64 {
|
let mut flags = Flags::empty();
|
||||||
use proxmox::sys::linux::magic::*;
|
|
||||||
match magic {
|
for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
|
||||||
MSDOS_SUPER_MAGIC => {
|
if (attr & fs_flag) != 0 {
|
||||||
WITH_2SEC_TIME|
|
flags |= *fe_flag;
|
||||||
WITH_READ_ONLY|
|
}
|
||||||
WITH_FAT_ATTRS
|
}
|
||||||
},
|
|
||||||
EXT4_SUPER_MAGIC => {
|
flags
|
||||||
WITH_2SEC_TIME|
|
}
|
||||||
WITH_READ_ONLY|
|
|
||||||
WITH_PERMISSIONS|
|
/// Return the supported *pxar* feature flags based on the magic number of the filesystem.
|
||||||
WITH_SYMLINKS|
|
pub fn from_magic(magic: i64) -> Flags {
|
||||||
WITH_DEVICE_NODES|
|
use proxmox::sys::linux::magic::*;
|
||||||
WITH_FIFOS|
|
match magic {
|
||||||
WITH_SOCKETS|
|
MSDOS_SUPER_MAGIC => {
|
||||||
WITH_FLAG_APPEND|
|
Flags::WITH_2SEC_TIME |
|
||||||
WITH_FLAG_NOATIME|
|
Flags::WITH_READ_ONLY |
|
||||||
WITH_FLAG_NODUMP|
|
Flags::WITH_FAT_ATTRS
|
||||||
WITH_FLAG_DIRSYNC|
|
},
|
||||||
WITH_FLAG_IMMUTABLE|
|
EXT4_SUPER_MAGIC => {
|
||||||
WITH_FLAG_SYNC|
|
Flags::WITH_2SEC_TIME |
|
||||||
WITH_XATTRS|
|
Flags::WITH_READ_ONLY |
|
||||||
WITH_ACL|
|
Flags::WITH_PERMISSIONS |
|
||||||
WITH_SELINUX|
|
Flags::WITH_SYMLINKS |
|
||||||
WITH_FCAPS|
|
Flags::WITH_DEVICE_NODES |
|
||||||
WITH_QUOTA_PROJID
|
Flags::WITH_FIFOS |
|
||||||
},
|
Flags::WITH_SOCKETS |
|
||||||
XFS_SUPER_MAGIC => {
|
Flags::WITH_FLAG_APPEND |
|
||||||
WITH_2SEC_TIME|
|
Flags::WITH_FLAG_NOATIME |
|
||||||
WITH_READ_ONLY|
|
Flags::WITH_FLAG_NODUMP |
|
||||||
WITH_PERMISSIONS|
|
Flags::WITH_FLAG_DIRSYNC |
|
||||||
WITH_SYMLINKS|
|
Flags::WITH_FLAG_IMMUTABLE |
|
||||||
WITH_DEVICE_NODES|
|
Flags::WITH_FLAG_SYNC |
|
||||||
WITH_FIFOS|
|
Flags::WITH_XATTRS |
|
||||||
WITH_SOCKETS|
|
Flags::WITH_ACL |
|
||||||
WITH_FLAG_APPEND|
|
Flags::WITH_SELINUX |
|
||||||
WITH_FLAG_NOATIME|
|
Flags::WITH_FCAPS |
|
||||||
WITH_FLAG_NODUMP|
|
Flags::WITH_QUOTA_PROJID
|
||||||
WITH_FLAG_IMMUTABLE|
|
},
|
||||||
WITH_FLAG_SYNC|
|
XFS_SUPER_MAGIC => {
|
||||||
WITH_XATTRS|
|
Flags::WITH_2SEC_TIME |
|
||||||
WITH_ACL|
|
Flags::WITH_READ_ONLY |
|
||||||
WITH_SELINUX|
|
Flags::WITH_PERMISSIONS |
|
||||||
WITH_FCAPS|
|
Flags::WITH_SYMLINKS |
|
||||||
WITH_QUOTA_PROJID
|
Flags::WITH_DEVICE_NODES |
|
||||||
},
|
Flags::WITH_FIFOS |
|
||||||
ZFS_SUPER_MAGIC => {
|
Flags::WITH_SOCKETS |
|
||||||
WITH_2SEC_TIME|
|
Flags::WITH_FLAG_APPEND |
|
||||||
WITH_READ_ONLY|
|
Flags::WITH_FLAG_NOATIME |
|
||||||
WITH_PERMISSIONS|
|
Flags::WITH_FLAG_NODUMP |
|
||||||
WITH_SYMLINKS|
|
Flags::WITH_FLAG_IMMUTABLE |
|
||||||
WITH_DEVICE_NODES|
|
Flags::WITH_FLAG_SYNC |
|
||||||
WITH_FIFOS|
|
Flags::WITH_XATTRS |
|
||||||
WITH_SOCKETS|
|
Flags::WITH_ACL |
|
||||||
WITH_FLAG_APPEND|
|
Flags::WITH_SELINUX |
|
||||||
WITH_FLAG_NOATIME|
|
Flags::WITH_FCAPS |
|
||||||
WITH_FLAG_NODUMP|
|
Flags::WITH_QUOTA_PROJID
|
||||||
WITH_FLAG_DIRSYNC|
|
},
|
||||||
WITH_FLAG_IMMUTABLE|
|
ZFS_SUPER_MAGIC => {
|
||||||
WITH_FLAG_SYNC|
|
Flags::WITH_2SEC_TIME |
|
||||||
WITH_XATTRS|
|
Flags::WITH_READ_ONLY |
|
||||||
WITH_ACL|
|
Flags::WITH_PERMISSIONS |
|
||||||
WITH_SELINUX|
|
Flags::WITH_SYMLINKS |
|
||||||
WITH_FCAPS|
|
Flags::WITH_DEVICE_NODES |
|
||||||
WITH_QUOTA_PROJID
|
Flags::WITH_FIFOS |
|
||||||
},
|
Flags::WITH_SOCKETS |
|
||||||
BTRFS_SUPER_MAGIC => {
|
Flags::WITH_FLAG_APPEND |
|
||||||
WITH_2SEC_TIME|
|
Flags::WITH_FLAG_NOATIME |
|
||||||
WITH_READ_ONLY|
|
Flags::WITH_FLAG_NODUMP |
|
||||||
WITH_PERMISSIONS|
|
Flags::WITH_FLAG_DIRSYNC |
|
||||||
WITH_SYMLINKS|
|
Flags::WITH_FLAG_IMMUTABLE |
|
||||||
WITH_DEVICE_NODES|
|
Flags::WITH_FLAG_SYNC |
|
||||||
WITH_FIFOS|
|
Flags::WITH_XATTRS |
|
||||||
WITH_SOCKETS|
|
Flags::WITH_ACL |
|
||||||
WITH_FLAG_APPEND|
|
Flags::WITH_SELINUX |
|
||||||
WITH_FLAG_NOATIME|
|
Flags::WITH_FCAPS |
|
||||||
WITH_FLAG_COMPR|
|
Flags::WITH_QUOTA_PROJID
|
||||||
WITH_FLAG_NOCOW|
|
},
|
||||||
WITH_FLAG_NODUMP|
|
BTRFS_SUPER_MAGIC => {
|
||||||
WITH_FLAG_DIRSYNC|
|
Flags::WITH_2SEC_TIME |
|
||||||
WITH_FLAG_IMMUTABLE|
|
Flags::WITH_READ_ONLY |
|
||||||
WITH_FLAG_SYNC|
|
Flags::WITH_PERMISSIONS |
|
||||||
WITH_FLAG_NOCOMP|
|
Flags::WITH_SYMLINKS |
|
||||||
WITH_XATTRS|
|
Flags::WITH_DEVICE_NODES |
|
||||||
WITH_ACL|
|
Flags::WITH_FIFOS |
|
||||||
WITH_SELINUX|
|
Flags::WITH_SOCKETS |
|
||||||
WITH_SUBVOLUME|
|
Flags::WITH_FLAG_APPEND |
|
||||||
WITH_SUBVOLUME_RO|
|
Flags::WITH_FLAG_NOATIME |
|
||||||
WITH_FCAPS
|
Flags::WITH_FLAG_COMPR |
|
||||||
},
|
Flags::WITH_FLAG_NOCOW |
|
||||||
TMPFS_MAGIC => {
|
Flags::WITH_FLAG_NODUMP |
|
||||||
WITH_2SEC_TIME|
|
Flags::WITH_FLAG_DIRSYNC |
|
||||||
WITH_READ_ONLY|
|
Flags::WITH_FLAG_IMMUTABLE |
|
||||||
WITH_PERMISSIONS|
|
Flags::WITH_FLAG_SYNC |
|
||||||
WITH_SYMLINKS|
|
Flags::WITH_FLAG_NOCOMP |
|
||||||
WITH_DEVICE_NODES|
|
Flags::WITH_XATTRS |
|
||||||
WITH_FIFOS|
|
Flags::WITH_ACL |
|
||||||
WITH_SOCKETS|
|
Flags::WITH_SELINUX |
|
||||||
WITH_ACL|
|
Flags::WITH_SUBVOLUME |
|
||||||
WITH_SELINUX
|
Flags::WITH_SUBVOLUME_RO |
|
||||||
},
|
Flags::WITH_FCAPS
|
||||||
// FUSE mounts are special as the supported feature set
|
},
|
||||||
// is not clear a priori.
|
TMPFS_MAGIC => {
|
||||||
FUSE_SUPER_MAGIC => {
|
Flags::WITH_2SEC_TIME |
|
||||||
WITH_FUSE
|
Flags::WITH_READ_ONLY |
|
||||||
},
|
Flags::WITH_PERMISSIONS |
|
||||||
_ => {
|
Flags::WITH_SYMLINKS |
|
||||||
WITH_2SEC_TIME|
|
Flags::WITH_DEVICE_NODES |
|
||||||
WITH_READ_ONLY|
|
Flags::WITH_FIFOS |
|
||||||
WITH_PERMISSIONS|
|
Flags::WITH_SOCKETS |
|
||||||
WITH_SYMLINKS|
|
Flags::WITH_ACL |
|
||||||
WITH_DEVICE_NODES|
|
Flags::WITH_SELINUX
|
||||||
WITH_FIFOS|
|
},
|
||||||
WITH_SOCKETS
|
// FUSE mounts are special as the supported feature set
|
||||||
},
|
// is not clear a priori.
|
||||||
|
FUSE_SUPER_MAGIC => {
|
||||||
|
Flags::WITH_FUSE
|
||||||
|
},
|
||||||
|
_ => {
|
||||||
|
Flags::WITH_2SEC_TIME |
|
||||||
|
Flags::WITH_READ_ONLY |
|
||||||
|
Flags::WITH_PERMISSIONS |
|
||||||
|
Flags::WITH_SYMLINKS |
|
||||||
|
Flags::WITH_DEVICE_NODES |
|
||||||
|
Flags::WITH_FIFOS |
|
||||||
|
Flags::WITH_SOCKETS
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,263 +0,0 @@
|
|||||||
//! *pxar* binary format definition
|
|
||||||
//!
|
|
||||||
//! Please note the all values are stored in little endian ordering.
|
|
||||||
//!
|
|
||||||
//! The Archive contains a list of items. Each item starts with a
|
|
||||||
//! `PxarHeader`, followed by the item data.
|
|
||||||
use std::cmp::Ordering;
|
|
||||||
|
|
||||||
use endian_trait::Endian;
|
|
||||||
use anyhow::{bail, Error};
|
|
||||||
use siphasher::sip::SipHasher24;
|
|
||||||
|
|
||||||
|
|
||||||
/// Header types identifying items stored in the archive
|
|
||||||
pub const PXAR_ENTRY: u64 = 0x1396fabcea5bbb51;
|
|
||||||
pub const PXAR_FILENAME: u64 = 0x6dbb6ebcb3161f0b;
|
|
||||||
pub const PXAR_SYMLINK: u64 = 0x664a6fb6830e0d6c;
|
|
||||||
pub const PXAR_DEVICE: u64 = 0xac3dace369dfe643;
|
|
||||||
pub const PXAR_XATTR: u64 = 0xb8157091f80bc486;
|
|
||||||
pub const PXAR_ACL_USER: u64 = 0x297dc88b2ef12faf;
|
|
||||||
pub const PXAR_ACL_GROUP: u64 = 0x36f2acb56cb3dd0b;
|
|
||||||
pub const PXAR_ACL_GROUP_OBJ: u64 = 0x23047110441f38f3;
|
|
||||||
pub const PXAR_ACL_DEFAULT: u64 = 0xfe3eeda6823c8cd0;
|
|
||||||
pub const PXAR_ACL_DEFAULT_USER: u64 = 0xbdf03df9bd010a91;
|
|
||||||
pub const PXAR_ACL_DEFAULT_GROUP: u64 = 0xa0cb1168782d1f51;
|
|
||||||
pub const PXAR_FCAPS: u64 = 0xf7267db0afed0629;
|
|
||||||
pub const PXAR_QUOTA_PROJID: u64 = 0x161baf2d8772a72b;
|
|
||||||
|
|
||||||
/// Marks item as hardlink
|
|
||||||
/// compute_goodbye_hash(b"__PROXMOX_FORMAT_HARDLINK__");
|
|
||||||
pub const PXAR_FORMAT_HARDLINK: u64 = 0x2c5e06f634f65b86;
|
|
||||||
/// Marks the beginning of the payload (actual content) of regular files
|
|
||||||
pub const PXAR_PAYLOAD: u64 = 0x8b9e1d93d6dcffc9;
|
|
||||||
/// Marks item as entry of goodbye table
|
|
||||||
pub const PXAR_GOODBYE: u64 = 0xdfd35c5e8327c403;
|
|
||||||
/// The end marker used in the GOODBYE object
|
|
||||||
pub const PXAR_GOODBYE_TAIL_MARKER: u64 = 0x57446fa533702943;
|
|
||||||
|
|
||||||
#[derive(Debug, Endian)]
|
|
||||||
#[repr(C)]
|
|
||||||
pub struct PxarHeader {
|
|
||||||
/// The item type (see `PXAR_` constants).
|
|
||||||
pub htype: u64,
|
|
||||||
/// The size of the item, including the size of `PxarHeader`.
|
|
||||||
pub size: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Endian)]
|
|
||||||
#[repr(C)]
|
|
||||||
pub struct PxarEntry {
|
|
||||||
pub mode: u64,
|
|
||||||
pub flags: u64,
|
|
||||||
pub uid: u32,
|
|
||||||
pub gid: u32,
|
|
||||||
pub mtime: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Endian)]
|
|
||||||
#[repr(C)]
|
|
||||||
pub struct PxarDevice {
|
|
||||||
pub major: u64,
|
|
||||||
pub minor: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Endian)]
|
|
||||||
#[repr(C)]
|
|
||||||
pub struct PxarGoodbyeItem {
|
|
||||||
/// SipHash24 of the directory item name. The last GOODBYE item
|
|
||||||
/// uses the special hash value `PXAR_GOODBYE_TAIL_MARKER`.
|
|
||||||
pub hash: u64,
|
|
||||||
/// The offset from the start of the GOODBYE object to the start
|
|
||||||
/// of the matching directory item (point to a FILENAME). The last
|
|
||||||
/// GOODBYE item points to the start of the matching ENTRY
|
|
||||||
/// object.
|
|
||||||
pub offset: u64,
|
|
||||||
/// The overall size of the directory item. The last GOODBYE item
|
|
||||||
/// repeats the size of the GOODBYE item.
|
|
||||||
pub size: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Helper function to extract file names from binary archive.
|
|
||||||
pub fn read_os_string(buffer: &[u8]) -> std::ffi::OsString {
|
|
||||||
let len = buffer.len();
|
|
||||||
|
|
||||||
use std::os::unix::ffi::OsStrExt;
|
|
||||||
|
|
||||||
let name = if len > 0 && buffer[len - 1] == 0 {
|
|
||||||
std::ffi::OsStr::from_bytes(&buffer[0..len - 1])
|
|
||||||
} else {
|
|
||||||
std::ffi::OsStr::from_bytes(&buffer)
|
|
||||||
};
|
|
||||||
|
|
||||||
name.into()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Eq)]
|
|
||||||
#[repr(C)]
|
|
||||||
pub struct PxarXAttr {
|
|
||||||
pub name: Vec<u8>,
|
|
||||||
pub value: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Ord for PxarXAttr {
|
|
||||||
fn cmp(&self, other: &PxarXAttr) -> Ordering {
|
|
||||||
self.name.cmp(&other.name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialOrd for PxarXAttr {
|
|
||||||
fn partial_cmp(&self, other: &PxarXAttr) -> Option<Ordering> {
|
|
||||||
Some(self.cmp(other))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialEq for PxarXAttr {
|
|
||||||
fn eq(&self, other: &PxarXAttr) -> bool {
|
|
||||||
self.name == other.name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
#[repr(C)]
|
|
||||||
pub struct PxarFCaps {
|
|
||||||
pub data: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Endian, Eq)]
|
|
||||||
#[repr(C)]
|
|
||||||
pub struct PxarACLUser {
|
|
||||||
pub uid: u64,
|
|
||||||
pub permissions: u64,
|
|
||||||
//pub name: Vec<u64>, not impl for now
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO if also name is impl, sort by uid, then by name and last by permissions
|
|
||||||
impl Ord for PxarACLUser {
|
|
||||||
fn cmp(&self, other: &PxarACLUser) -> Ordering {
|
|
||||||
match self.uid.cmp(&other.uid) {
|
|
||||||
// uids are equal, entries ordered by permissions
|
|
||||||
Ordering::Equal => self.permissions.cmp(&other.permissions),
|
|
||||||
// uids are different, entries ordered by uid
|
|
||||||
uid_order => uid_order,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialOrd for PxarACLUser {
|
|
||||||
fn partial_cmp(&self, other: &PxarACLUser) -> Option<Ordering> {
|
|
||||||
Some(self.cmp(other))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialEq for PxarACLUser {
|
|
||||||
fn eq(&self, other: &PxarACLUser) -> bool {
|
|
||||||
self.uid == other.uid && self.permissions == other.permissions
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Endian, Eq)]
|
|
||||||
#[repr(C)]
|
|
||||||
pub struct PxarACLGroup {
|
|
||||||
pub gid: u64,
|
|
||||||
pub permissions: u64,
|
|
||||||
//pub name: Vec<u64>, not impl for now
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO if also name is impl, sort by gid, then by name and last by permissions
|
|
||||||
impl Ord for PxarACLGroup {
|
|
||||||
fn cmp(&self, other: &PxarACLGroup) -> Ordering {
|
|
||||||
match self.gid.cmp(&other.gid) {
|
|
||||||
// gids are equal, entries are ordered by permissions
|
|
||||||
Ordering::Equal => self.permissions.cmp(&other.permissions),
|
|
||||||
// gids are different, entries ordered by gid
|
|
||||||
gid_ordering => gid_ordering,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialOrd for PxarACLGroup {
|
|
||||||
fn partial_cmp(&self, other: &PxarACLGroup) -> Option<Ordering> {
|
|
||||||
Some(self.cmp(other))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialEq for PxarACLGroup {
|
|
||||||
fn eq(&self, other: &PxarACLGroup) -> bool {
|
|
||||||
self.gid == other.gid && self.permissions == other.permissions
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Endian)]
|
|
||||||
#[repr(C)]
|
|
||||||
pub struct PxarACLGroupObj {
|
|
||||||
pub permissions: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Endian)]
|
|
||||||
#[repr(C)]
|
|
||||||
pub struct PxarACLDefault {
|
|
||||||
pub user_obj_permissions: u64,
|
|
||||||
pub group_obj_permissions: u64,
|
|
||||||
pub other_permissions: u64,
|
|
||||||
pub mask_permissions: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) struct PxarACL {
|
|
||||||
pub users: Vec<PxarACLUser>,
|
|
||||||
pub groups: Vec<PxarACLGroup>,
|
|
||||||
pub group_obj: Option<PxarACLGroupObj>,
|
|
||||||
pub default: Option<PxarACLDefault>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const PXAR_ACL_PERMISSION_READ: u64 = 4;
|
|
||||||
pub const PXAR_ACL_PERMISSION_WRITE: u64 = 2;
|
|
||||||
pub const PXAR_ACL_PERMISSION_EXECUTE: u64 = 1;
|
|
||||||
|
|
||||||
#[derive(Debug, Endian)]
|
|
||||||
#[repr(C)]
|
|
||||||
pub struct PxarQuotaProjID {
|
|
||||||
pub projid: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
pub struct PxarAttributes {
|
|
||||||
pub xattrs: Vec<PxarXAttr>,
|
|
||||||
pub fcaps: Option<PxarFCaps>,
|
|
||||||
pub quota_projid: Option<PxarQuotaProjID>,
|
|
||||||
pub acl_user: Vec<PxarACLUser>,
|
|
||||||
pub acl_group: Vec<PxarACLGroup>,
|
|
||||||
pub acl_group_obj: Option<PxarACLGroupObj>,
|
|
||||||
pub acl_default: Option<PxarACLDefault>,
|
|
||||||
pub acl_default_user: Vec<PxarACLUser>,
|
|
||||||
pub acl_default_group: Vec<PxarACLGroup>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create SipHash values for goodby tables.
|
|
||||||
//pub fn compute_goodbye_hash(name: &std::ffi::CStr) -> u64 {
|
|
||||||
pub fn compute_goodbye_hash(name: &[u8]) -> u64 {
|
|
||||||
use std::hash::Hasher;
|
|
||||||
let mut hasher = SipHasher24::new_with_keys(0x8574442b0f1d84b3, 0x2736ed30d1c22ec1);
|
|
||||||
hasher.write(name);
|
|
||||||
hasher.finish()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn check_ca_header<T>(head: &PxarHeader, htype: u64) -> Result<(), Error> {
|
|
||||||
if head.htype != htype {
|
|
||||||
bail!(
|
|
||||||
"got wrong header type ({:016x} != {:016x})",
|
|
||||||
head.htype,
|
|
||||||
htype
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if head.size != (std::mem::size_of::<T>() + std::mem::size_of::<PxarHeader>()) as u64 {
|
|
||||||
bail!("got wrong header size for type {:016x}", htype);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The format requires to build sorted directory lookup tables in
|
|
||||||
/// memory, so we restrict the number of allowed entries to limit
|
|
||||||
/// maximum memory usage.
|
|
||||||
pub const ENCODER_MAX_ENTRIES: usize = 1024 * 1024;
|
|
1428
src/pxar/fuse.rs
1428
src/pxar/fuse.rs
File diff suppressed because it is too large
Load Diff
@ -1,36 +0,0 @@
|
|||||||
use libc;
|
|
||||||
use nix::sys::stat::FileStat;
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn is_directory(stat: &FileStat) -> bool {
|
|
||||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFDIR
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn is_symlink(stat: &FileStat) -> bool {
|
|
||||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFLNK
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn is_reg_file(stat: &FileStat) -> bool {
|
|
||||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFREG
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn is_block_dev(stat: &FileStat) -> bool {
|
|
||||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFBLK
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn is_char_dev(stat: &FileStat) -> bool {
|
|
||||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFCHR
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn is_fifo(stat: &FileStat) -> bool {
|
|
||||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFIFO
|
|
||||||
}
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn is_socket(stat: &FileStat) -> bool {
|
|
||||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFSOCK
|
|
||||||
}
|
|
@ -1,514 +0,0 @@
|
|||||||
//! `MatchPattern` defines a match pattern used to match filenames encountered
|
|
||||||
//! during encoding or decoding of a `pxar` archive.
|
|
||||||
//! `fnmatch` is used internally to match filenames against the patterns.
|
|
||||||
//! Shell wildcard pattern can be used to match multiple filenames, see manpage
|
|
||||||
//! `glob(7)`.
|
|
||||||
//! `**` is treated special, as it matches multiple directories in a path.
|
|
||||||
|
|
||||||
use std::ffi::{CStr, CString};
|
|
||||||
use std::fs::File;
|
|
||||||
use std::io::Read;
|
|
||||||
use std::os::unix::io::{FromRawFd, RawFd};
|
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
|
||||||
use libc::{c_char, c_int};
|
|
||||||
use nix::errno::Errno;
|
|
||||||
use nix::fcntl;
|
|
||||||
use nix::fcntl::{AtFlags, OFlag};
|
|
||||||
use nix::sys::stat;
|
|
||||||
use nix::sys::stat::{FileStat, Mode};
|
|
||||||
use nix::NixPath;
|
|
||||||
|
|
||||||
pub const FNM_NOMATCH: c_int = 1;
|
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
fn fnmatch(pattern: *const c_char, string: *const c_char, flags: c_int) -> c_int;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
|
||||||
pub enum MatchType {
|
|
||||||
None,
|
|
||||||
Positive,
|
|
||||||
Negative,
|
|
||||||
PartialPositive,
|
|
||||||
PartialNegative,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// `MatchPattern` provides functionality for filename glob pattern matching
|
|
||||||
/// based on glibc's `fnmatch`.
|
|
||||||
/// Positive matches return `MatchType::PartialPositive` or `MatchType::Positive`.
|
|
||||||
/// Patterns starting with `!` are interpreted as negation, meaning they will
|
|
||||||
/// return `MatchType::PartialNegative` or `MatchType::Negative`.
|
|
||||||
/// No matches result in `MatchType::None`.
|
|
||||||
/// # Examples:
|
|
||||||
/// ```
|
|
||||||
/// # use std::ffi::CString;
|
|
||||||
/// # use self::proxmox_backup::pxar::{MatchPattern, MatchType};
|
|
||||||
/// # fn main() -> Result<(), anyhow::Error> {
|
|
||||||
/// let filename = CString::new("some.conf")?;
|
|
||||||
/// let is_dir = false;
|
|
||||||
///
|
|
||||||
/// /// Positive match of any file ending in `.conf` in any subdirectory
|
|
||||||
/// let positive = MatchPattern::from_line(b"**/*.conf")?.unwrap();
|
|
||||||
/// let m_positive = positive.as_slice().matches_filename(&filename, is_dir)?;
|
|
||||||
/// assert!(m_positive == MatchType::Positive);
|
|
||||||
///
|
|
||||||
/// /// Negative match of filenames starting with `s`
|
|
||||||
/// let negative = MatchPattern::from_line(b"![s]*")?.unwrap();
|
|
||||||
/// let m_negative = negative.as_slice().matches_filename(&filename, is_dir)?;
|
|
||||||
/// assert!(m_negative == MatchType::Negative);
|
|
||||||
/// # Ok(())
|
|
||||||
/// # }
|
|
||||||
/// ```
|
|
||||||
#[derive(Clone, Eq, PartialOrd)]
|
|
||||||
pub struct MatchPattern {
|
|
||||||
pattern: Vec<u8>,
|
|
||||||
match_positive: bool,
|
|
||||||
match_dir_only: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::cmp::PartialEq for MatchPattern {
|
|
||||||
fn eq(&self, other: &Self) -> bool {
|
|
||||||
self.pattern == other.pattern
|
|
||||||
&& self.match_positive == other.match_positive
|
|
||||||
&& self.match_dir_only == other.match_dir_only
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::cmp::Ord for MatchPattern {
|
|
||||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
|
||||||
(&self.pattern, &self.match_positive, &self.match_dir_only)
|
|
||||||
.cmp(&(&other.pattern, &other.match_positive, &other.match_dir_only))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MatchPattern {
|
|
||||||
/// Read a list of `MatchPattern` from file.
|
|
||||||
/// The file is read line by line (lines terminated by newline character),
|
|
||||||
/// each line may only contain one pattern.
|
|
||||||
/// Leading `/` are ignored and lines starting with `#` are interpreted as
|
|
||||||
/// comments and not included in the resulting list.
|
|
||||||
/// Patterns ending in `/` will match only directories.
|
|
||||||
///
|
|
||||||
/// On success, a list of match pattern is returned as well as the raw file
|
|
||||||
/// byte buffer together with the files stats.
|
|
||||||
/// This is done in order to avoid reading the file more than once during
|
|
||||||
/// encoding of the archive.
|
|
||||||
pub fn from_file<P: ?Sized + NixPath>(
|
|
||||||
parent_fd: RawFd,
|
|
||||||
filename: &P,
|
|
||||||
) -> Result<Option<(Vec<MatchPattern>, Vec<u8>, FileStat)>, nix::Error> {
|
|
||||||
let stat = match stat::fstatat(parent_fd, filename, AtFlags::AT_SYMLINK_NOFOLLOW) {
|
|
||||||
Ok(stat) => stat,
|
|
||||||
Err(nix::Error::Sys(Errno::ENOENT)) => return Ok(None),
|
|
||||||
Err(err) => return Err(err),
|
|
||||||
};
|
|
||||||
|
|
||||||
let filefd = fcntl::openat(parent_fd, filename, OFlag::O_NOFOLLOW, Mode::empty())?;
|
|
||||||
let mut file = unsafe { File::from_raw_fd(filefd) };
|
|
||||||
|
|
||||||
let mut content_buffer = Vec::new();
|
|
||||||
let _bytes = file.read_to_end(&mut content_buffer)
|
|
||||||
.map_err(|_| Errno::EIO)?;
|
|
||||||
|
|
||||||
let mut match_pattern = Vec::new();
|
|
||||||
for line in content_buffer.split(|&c| c == b'\n') {
|
|
||||||
if line.is_empty() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if let Some(pattern) = Self::from_line(line)? {
|
|
||||||
match_pattern.push(pattern);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Some((match_pattern, content_buffer, stat)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Interpret a byte buffer as a sinlge line containing a valid
|
|
||||||
/// `MatchPattern`.
|
|
||||||
/// Pattern starting with `#` are interpreted as comments, returning `Ok(None)`.
|
|
||||||
/// Pattern starting with '!' are interpreted as negative match pattern.
|
|
||||||
/// Pattern with trailing `/` match only against directories.
|
|
||||||
/// `.` as well as `..` and any pattern containing `\0` are invalid and will
|
|
||||||
/// result in an error with Errno::EINVAL.
|
|
||||||
pub fn from_line(line: &[u8]) -> Result<Option<MatchPattern>, nix::Error> {
|
|
||||||
let mut input = line;
|
|
||||||
|
|
||||||
if input.starts_with(b"#") {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
let match_positive = if input.starts_with(b"!") {
|
|
||||||
// Reduce slice view to exclude "!"
|
|
||||||
input = &input[1..];
|
|
||||||
false
|
|
||||||
} else {
|
|
||||||
true
|
|
||||||
};
|
|
||||||
|
|
||||||
// Paths ending in / match only directory names (no filenames)
|
|
||||||
let match_dir_only = if input.ends_with(b"/") {
|
|
||||||
let len = input.len();
|
|
||||||
input = &input[..len - 1];
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
};
|
|
||||||
|
|
||||||
// Ignore initial slash
|
|
||||||
if input.starts_with(b"/") {
|
|
||||||
input = &input[1..];
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.is_empty() || input == b"." || input == b".." || input.contains(&b'\0') {
|
|
||||||
return Err(nix::Error::Sys(Errno::EINVAL));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Some(MatchPattern {
|
|
||||||
pattern: input.to_vec(),
|
|
||||||
match_positive,
|
|
||||||
match_dir_only,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/// Create a `MatchPatternSlice` of the `MatchPattern` to give a view of the
|
|
||||||
/// `MatchPattern` without copying its content.
|
|
||||||
pub fn as_slice<'a>(&'a self) -> MatchPatternSlice<'a> {
|
|
||||||
MatchPatternSlice {
|
|
||||||
pattern: self.pattern.as_slice(),
|
|
||||||
match_positive: self.match_positive,
|
|
||||||
match_dir_only: self.match_dir_only,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Dump the content of the `MatchPattern` to stdout.
|
|
||||||
/// Intended for debugging purposes only.
|
|
||||||
pub fn dump(&self) {
|
|
||||||
match (self.match_positive, self.match_dir_only) {
|
|
||||||
(true, true) => println!("{:#?}/", self.pattern),
|
|
||||||
(true, false) => println!("{:#?}", self.pattern),
|
|
||||||
(false, true) => println!("!{:#?}/", self.pattern),
|
|
||||||
(false, false) => println!("!{:#?}", self.pattern),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Convert a list of MatchPattern to bytes in order to write them to e.g.
|
|
||||||
/// a file.
|
|
||||||
pub fn to_bytes(patterns: &[MatchPattern]) -> Vec<u8> {
|
|
||||||
let mut slices = Vec::new();
|
|
||||||
for pattern in patterns {
|
|
||||||
slices.push(pattern.as_slice());
|
|
||||||
}
|
|
||||||
|
|
||||||
MatchPatternSlice::to_bytes(&slices)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Invert the match type for this MatchPattern.
|
|
||||||
pub fn invert(&mut self) {
|
|
||||||
self.match_positive = !self.match_positive;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct MatchPatternSlice<'a> {
|
|
||||||
pattern: &'a [u8],
|
|
||||||
match_positive: bool,
|
|
||||||
match_dir_only: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> MatchPatternSlice<'a> {
|
|
||||||
/// Returns the pattern before the first `/` encountered as `MatchPatternSlice`.
|
|
||||||
/// If no slash is encountered, the `MatchPatternSlice` will be a copy of the
|
|
||||||
/// original pattern.
|
|
||||||
/// ```
|
|
||||||
/// # use self::proxmox_backup::pxar::{MatchPattern, MatchPatternSlice, MatchType};
|
|
||||||
/// # fn main() -> Result<(), anyhow::Error> {
|
|
||||||
/// let pattern = MatchPattern::from_line(b"some/match/pattern/")?.unwrap();
|
|
||||||
/// let slice = pattern.as_slice();
|
|
||||||
/// let front = slice.get_front_pattern();
|
|
||||||
/// /// ... will be the same as ...
|
|
||||||
/// let front_pattern = MatchPattern::from_line(b"some")?.unwrap();
|
|
||||||
/// let front_slice = front_pattern.as_slice();
|
|
||||||
/// # Ok(())
|
|
||||||
/// # }
|
|
||||||
/// ```
|
|
||||||
pub fn get_front_pattern(&'a self) -> MatchPatternSlice<'a> {
|
|
||||||
let (front, _) = self.split_at_slash();
|
|
||||||
MatchPatternSlice {
|
|
||||||
pattern: front,
|
|
||||||
match_positive: self.match_positive,
|
|
||||||
match_dir_only: self.match_dir_only,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the pattern after the first encountered `/` as `MatchPatternSlice`.
|
|
||||||
/// If no slash is encountered, the `MatchPatternSlice` will be empty.
|
|
||||||
/// ```
|
|
||||||
/// # use self::proxmox_backup::pxar::{MatchPattern, MatchPatternSlice, MatchType};
|
|
||||||
/// # fn main() -> Result<(), anyhow::Error> {
|
|
||||||
/// let pattern = MatchPattern::from_line(b"some/match/pattern/")?.unwrap();
|
|
||||||
/// let slice = pattern.as_slice();
|
|
||||||
/// let rest = slice.get_rest_pattern();
|
|
||||||
/// /// ... will be the same as ...
|
|
||||||
/// let rest_pattern = MatchPattern::from_line(b"match/pattern/")?.unwrap();
|
|
||||||
/// let rest_slice = rest_pattern.as_slice();
|
|
||||||
/// # Ok(())
|
|
||||||
/// # }
|
|
||||||
/// ```
|
|
||||||
pub fn get_rest_pattern(&'a self) -> MatchPatternSlice<'a> {
|
|
||||||
let (_, rest) = self.split_at_slash();
|
|
||||||
MatchPatternSlice {
|
|
||||||
pattern: rest,
|
|
||||||
match_positive: self.match_positive,
|
|
||||||
match_dir_only: self.match_dir_only,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Splits the `MatchPatternSlice` at the first slash encountered and returns the
|
|
||||||
/// content before (front pattern) and after the slash (rest pattern),
|
|
||||||
/// omitting the slash itself.
|
|
||||||
/// Slices starting with `**/` are an exception to this, as the corresponding
|
|
||||||
/// `MatchPattern` is intended to match multiple directories.
|
|
||||||
/// These pattern slices therefore return a `*` as front pattern and the original
|
|
||||||
/// pattern itself as rest pattern.
|
|
||||||
fn split_at_slash(&'a self) -> (&'a [u8], &'a [u8]) {
|
|
||||||
let pattern = if self.pattern.starts_with(b"./") {
|
|
||||||
&self.pattern[2..]
|
|
||||||
} else {
|
|
||||||
self.pattern
|
|
||||||
};
|
|
||||||
|
|
||||||
let (mut front, mut rest) = match pattern.iter().position(|&c| c == b'/') {
|
|
||||||
Some(ind) => {
|
|
||||||
let (front, rest) = pattern.split_at(ind);
|
|
||||||
(front, &rest[1..])
|
|
||||||
}
|
|
||||||
None => (pattern, &pattern[0..0]),
|
|
||||||
};
|
|
||||||
// '**' is treated such that it maches any directory
|
|
||||||
if front == b"**" {
|
|
||||||
front = b"*";
|
|
||||||
rest = pattern;
|
|
||||||
}
|
|
||||||
|
|
||||||
(front, rest)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Convert a list of `MatchPatternSlice`s to bytes in order to write them to e.g.
|
|
||||||
/// a file.
|
|
||||||
pub fn to_bytes(patterns: &[MatchPatternSlice]) -> Vec<u8> {
|
|
||||||
let mut buffer = Vec::new();
|
|
||||||
for pattern in patterns {
|
|
||||||
if !pattern.match_positive { buffer.push(b'!'); }
|
|
||||||
buffer.extend_from_slice(&pattern.pattern);
|
|
||||||
if pattern.match_dir_only { buffer.push(b'/'); }
|
|
||||||
buffer.push(b'\n');
|
|
||||||
}
|
|
||||||
buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Match the given filename against this `MatchPatternSlice`.
|
|
||||||
/// If the filename matches the pattern completely, `MatchType::Positive` or
|
|
||||||
/// `MatchType::Negative` is returned, depending if the match pattern is was
|
|
||||||
/// declared as positive (no `!` prefix) or negative (`!` prefix).
|
|
||||||
/// If the pattern matched only up to the first slash of the pattern,
|
|
||||||
/// `MatchType::PartialPositive` or `MatchType::PartialNegatie` is returned.
|
|
||||||
/// If the pattern was postfixed by a trailing `/` a match is only valid if
|
|
||||||
/// the parameter `is_dir` equals `true`.
|
|
||||||
/// No match results in `MatchType::None`.
|
|
||||||
pub fn matches_filename(&self, filename: &CStr, is_dir: bool) -> Result<MatchType, Error> {
|
|
||||||
let mut res = MatchType::None;
|
|
||||||
let (front, _) = self.split_at_slash();
|
|
||||||
|
|
||||||
let front = CString::new(front).unwrap();
|
|
||||||
let fnmatch_res = unsafe {
|
|
||||||
let front_ptr = front.as_ptr() as *const libc::c_char;
|
|
||||||
let filename_ptr = filename.as_ptr() as *const libc::c_char;
|
|
||||||
fnmatch(front_ptr, filename_ptr, 0)
|
|
||||||
};
|
|
||||||
if fnmatch_res < 0 {
|
|
||||||
bail!("error in fnmatch inside of MatchPattern");
|
|
||||||
}
|
|
||||||
if fnmatch_res == 0 {
|
|
||||||
res = if self.match_positive {
|
|
||||||
MatchType::PartialPositive
|
|
||||||
} else {
|
|
||||||
MatchType::PartialNegative
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
let full = if self.pattern.starts_with(b"**/") {
|
|
||||||
CString::new(&self.pattern[3..]).unwrap()
|
|
||||||
} else {
|
|
||||||
CString::new(&self.pattern[..]).unwrap()
|
|
||||||
};
|
|
||||||
let fnmatch_res = unsafe {
|
|
||||||
let full_ptr = full.as_ptr() as *const libc::c_char;
|
|
||||||
let filename_ptr = filename.as_ptr() as *const libc::c_char;
|
|
||||||
fnmatch(full_ptr, filename_ptr, 0)
|
|
||||||
};
|
|
||||||
if fnmatch_res < 0 {
|
|
||||||
bail!("error in fnmatch inside of MatchPattern");
|
|
||||||
}
|
|
||||||
if fnmatch_res == 0 {
|
|
||||||
res = if self.match_positive {
|
|
||||||
MatchType::Positive
|
|
||||||
} else {
|
|
||||||
MatchType::Negative
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
if !is_dir && self.match_dir_only {
|
|
||||||
res = MatchType::None;
|
|
||||||
}
|
|
||||||
|
|
||||||
if !is_dir && (res == MatchType::PartialPositive || res == MatchType::PartialNegative) {
|
|
||||||
res = MatchType::None;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Match the given filename against the set of `MatchPatternSlice`s.
|
|
||||||
///
|
|
||||||
/// A positive match is intended to includes the full subtree (unless another
|
|
||||||
/// negative match excludes entries later).
|
|
||||||
/// The `MatchType` together with an updated `MatchPatternSlice` list for passing
|
|
||||||
/// to the matched child is returned.
|
|
||||||
/// ```
|
|
||||||
/// # use std::ffi::CString;
|
|
||||||
/// # use self::proxmox_backup::pxar::{MatchPattern, MatchPatternSlice, MatchType};
|
|
||||||
/// # fn main() -> Result<(), anyhow::Error> {
|
|
||||||
/// let patterns = vec![
|
|
||||||
/// MatchPattern::from_line(b"some/match/pattern/")?.unwrap(),
|
|
||||||
/// MatchPattern::from_line(b"to_match/")?.unwrap()
|
|
||||||
/// ];
|
|
||||||
/// let mut slices = Vec::new();
|
|
||||||
/// for pattern in &patterns {
|
|
||||||
/// slices.push(pattern.as_slice());
|
|
||||||
/// }
|
|
||||||
/// let filename = CString::new("some")?;
|
|
||||||
/// let is_dir = true;
|
|
||||||
/// let (match_type, child_pattern) = MatchPatternSlice::match_filename_include(
|
|
||||||
/// &filename,
|
|
||||||
/// is_dir,
|
|
||||||
/// &slices
|
|
||||||
/// )?;
|
|
||||||
/// assert_eq!(match_type, MatchType::PartialPositive);
|
|
||||||
/// /// child pattern will be the same as ...
|
|
||||||
/// let pattern = MatchPattern::from_line(b"match/pattern/")?.unwrap();
|
|
||||||
/// let slice = pattern.as_slice();
|
|
||||||
///
|
|
||||||
/// let filename = CString::new("to_match")?;
|
|
||||||
/// let is_dir = true;
|
|
||||||
/// let (match_type, child_pattern) = MatchPatternSlice::match_filename_include(
|
|
||||||
/// &filename,
|
|
||||||
/// is_dir,
|
|
||||||
/// &slices
|
|
||||||
/// )?;
|
|
||||||
/// assert_eq!(match_type, MatchType::Positive);
|
|
||||||
/// /// child pattern will be the same as ...
|
|
||||||
/// let pattern = MatchPattern::from_line(b"**/*")?.unwrap();
|
|
||||||
/// let slice = pattern.as_slice();
|
|
||||||
/// # Ok(())
|
|
||||||
/// # }
|
|
||||||
/// ```
|
|
||||||
pub fn match_filename_include(
|
|
||||||
filename: &CStr,
|
|
||||||
is_dir: bool,
|
|
||||||
match_pattern: &'a [MatchPatternSlice<'a>],
|
|
||||||
) -> Result<(MatchType, Vec<MatchPatternSlice<'a>>), Error> {
|
|
||||||
let mut child_pattern = Vec::new();
|
|
||||||
let mut match_state = MatchType::None;
|
|
||||||
|
|
||||||
for pattern in match_pattern {
|
|
||||||
match pattern.matches_filename(filename, is_dir)? {
|
|
||||||
MatchType::None => continue,
|
|
||||||
MatchType::Positive => match_state = MatchType::Positive,
|
|
||||||
MatchType::Negative => match_state = MatchType::Negative,
|
|
||||||
MatchType::PartialPositive => {
|
|
||||||
if match_state != MatchType::Negative && match_state != MatchType::Positive {
|
|
||||||
match_state = MatchType::PartialPositive;
|
|
||||||
}
|
|
||||||
child_pattern.push(pattern.get_rest_pattern());
|
|
||||||
}
|
|
||||||
MatchType::PartialNegative => {
|
|
||||||
if match_state == MatchType::PartialPositive {
|
|
||||||
match_state = MatchType::PartialNegative;
|
|
||||||
}
|
|
||||||
child_pattern.push(pattern.get_rest_pattern());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((match_state, child_pattern))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Match the given filename against the set of `MatchPatternSlice`s.
|
|
||||||
///
|
|
||||||
/// A positive match is intended to exclude the full subtree, independent of
|
|
||||||
/// matches deeper down the tree.
|
|
||||||
/// The `MatchType` together with an updated `MatchPattern` list for passing
|
|
||||||
/// to the matched child is returned.
|
|
||||||
/// ```
|
|
||||||
/// # use std::ffi::CString;
|
|
||||||
/// # use self::proxmox_backup::pxar::{MatchPattern, MatchPatternSlice, MatchType};
|
|
||||||
/// # fn main() -> Result<(), anyhow::Error> {
|
|
||||||
/// let patterns = vec![
|
|
||||||
/// MatchPattern::from_line(b"some/match/pattern/")?.unwrap(),
|
|
||||||
/// MatchPattern::from_line(b"to_match/")?.unwrap()
|
|
||||||
/// ];
|
|
||||||
/// let mut slices = Vec::new();
|
|
||||||
/// for pattern in &patterns {
|
|
||||||
/// slices.push(pattern.as_slice());
|
|
||||||
/// }
|
|
||||||
/// let filename = CString::new("some")?;
|
|
||||||
/// let is_dir = true;
|
|
||||||
/// let (match_type, child_pattern) = MatchPatternSlice::match_filename_exclude(
|
|
||||||
/// &filename,
|
|
||||||
/// is_dir,
|
|
||||||
/// &slices,
|
|
||||||
/// )?;
|
|
||||||
/// assert_eq!(match_type, MatchType::PartialPositive);
|
|
||||||
/// /// child pattern will be the same as ...
|
|
||||||
/// let pattern = MatchPattern::from_line(b"match/pattern/")?.unwrap();
|
|
||||||
/// let slice = pattern.as_slice();
|
|
||||||
///
|
|
||||||
/// let filename = CString::new("to_match")?;
|
|
||||||
/// let is_dir = true;
|
|
||||||
/// let (match_type, child_pattern) = MatchPatternSlice::match_filename_exclude(
|
|
||||||
/// &filename,
|
|
||||||
/// is_dir,
|
|
||||||
/// &slices,
|
|
||||||
/// )?;
|
|
||||||
/// assert_eq!(match_type, MatchType::Positive);
|
|
||||||
/// /// child pattern will be empty
|
|
||||||
/// # Ok(())
|
|
||||||
/// # }
|
|
||||||
/// ```
|
|
||||||
pub fn match_filename_exclude(
|
|
||||||
filename: &CStr,
|
|
||||||
is_dir: bool,
|
|
||||||
match_pattern: &'a [MatchPatternSlice<'a>],
|
|
||||||
) -> Result<(MatchType, Vec<MatchPatternSlice<'a>>), Error> {
|
|
||||||
let mut child_pattern = Vec::new();
|
|
||||||
let mut match_state = MatchType::None;
|
|
||||||
|
|
||||||
for pattern in match_pattern {
|
|
||||||
match pattern.matches_filename(filename, is_dir)? {
|
|
||||||
MatchType::None => {}
|
|
||||||
MatchType::Positive => match_state = MatchType::Positive,
|
|
||||||
MatchType::Negative => match_state = MatchType::Negative,
|
|
||||||
match_type => {
|
|
||||||
if match_state != MatchType::Positive && match_state != MatchType::Negative {
|
|
||||||
match_state = match_type;
|
|
||||||
}
|
|
||||||
child_pattern.push(pattern.get_rest_pattern());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((match_state, child_pattern))
|
|
||||||
}
|
|
||||||
}
|
|
319
src/pxar/metadata.rs
Normal file
319
src/pxar/metadata.rs
Normal file
@ -0,0 +1,319 @@
|
|||||||
|
use std::ffi::{CStr, CString};
|
||||||
|
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use nix::errno::Errno;
|
||||||
|
use nix::fcntl::OFlag;
|
||||||
|
use nix::sys::stat::Mode;
|
||||||
|
|
||||||
|
use pxar::Metadata;
|
||||||
|
|
||||||
|
use proxmox::c_result;
|
||||||
|
use proxmox::sys::error::SysError;
|
||||||
|
use proxmox::tools::fd::RawFdNum;
|
||||||
|
|
||||||
|
use crate::pxar::tools::perms_from_metadata;
|
||||||
|
use crate::pxar::Flags;
|
||||||
|
use crate::tools::{acl, fs, xattr};
|
||||||
|
|
||||||
|
//
|
||||||
|
// utility functions
|
||||||
|
//
|
||||||
|
|
||||||
|
fn allow_notsupp<E: SysError>(err: E) -> Result<(), E> {
|
||||||
|
if err.is_errno(Errno::EOPNOTSUPP) {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn allow_notsupp_remember<E: SysError>(err: E, not_supp: &mut bool) -> Result<(), E> {
|
||||||
|
if err.is_errno(Errno::EOPNOTSUPP) {
|
||||||
|
*not_supp = true;
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn nsec_to_update_timespec(mtime_nsec: u64) -> [libc::timespec; 2] {
|
||||||
|
// restore mtime
|
||||||
|
const UTIME_OMIT: i64 = (1 << 30) - 2;
|
||||||
|
const NANOS_PER_SEC: i64 = 1_000_000_000;
|
||||||
|
|
||||||
|
let sec = (mtime_nsec as i64) / NANOS_PER_SEC;
|
||||||
|
let nsec = (mtime_nsec as i64) % NANOS_PER_SEC;
|
||||||
|
|
||||||
|
let times: [libc::timespec; 2] = [
|
||||||
|
libc::timespec {
|
||||||
|
tv_sec: 0,
|
||||||
|
tv_nsec: UTIME_OMIT,
|
||||||
|
},
|
||||||
|
libc::timespec {
|
||||||
|
tv_sec: sec,
|
||||||
|
tv_nsec: nsec,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
times
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// metadata application:
|
||||||
|
//
|
||||||
|
|
||||||
|
pub fn apply_at(
|
||||||
|
flags: Flags,
|
||||||
|
metadata: &Metadata,
|
||||||
|
parent: RawFd,
|
||||||
|
file_name: &CStr,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let fd = proxmox::tools::fd::Fd::openat(
|
||||||
|
&unsafe { RawFdNum::from_raw_fd(parent) },
|
||||||
|
file_name,
|
||||||
|
OFlag::O_PATH | OFlag::O_CLOEXEC | OFlag::O_NOFOLLOW,
|
||||||
|
Mode::empty(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
apply(flags, metadata, fd.as_raw_fd(), file_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn apply(flags: Flags, metadata: &Metadata, fd: RawFd, file_name: &CStr) -> Result<(), Error> {
|
||||||
|
let c_proc_path = CString::new(format!("/proc/self/fd/{}", fd)).unwrap();
|
||||||
|
|
||||||
|
if metadata.stat.flags != 0 {
|
||||||
|
todo!("apply flags!");
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
// UID and GID first, as this fails if we lose access anyway.
|
||||||
|
c_result!(libc::chown(
|
||||||
|
c_proc_path.as_ptr(),
|
||||||
|
metadata.stat.uid,
|
||||||
|
metadata.stat.gid
|
||||||
|
))
|
||||||
|
.map(drop)
|
||||||
|
.or_else(allow_notsupp)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut skip_xattrs = false;
|
||||||
|
apply_xattrs(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)?;
|
||||||
|
add_fcaps(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)?;
|
||||||
|
apply_acls(flags, &c_proc_path, metadata)?;
|
||||||
|
apply_quota_project_id(flags, fd, metadata)?;
|
||||||
|
|
||||||
|
// Finally mode and time. We may lose access with mode, but the changing the mode also
|
||||||
|
// affects times.
|
||||||
|
if !metadata.is_symlink() {
|
||||||
|
c_result!(unsafe {
|
||||||
|
libc::chmod(c_proc_path.as_ptr(), perms_from_metadata(metadata)?.bits())
|
||||||
|
})
|
||||||
|
.map(drop)
|
||||||
|
.or_else(allow_notsupp)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = c_result!(unsafe {
|
||||||
|
libc::utimensat(
|
||||||
|
libc::AT_FDCWD,
|
||||||
|
c_proc_path.as_ptr(),
|
||||||
|
nsec_to_update_timespec(metadata.stat.mtime).as_ptr(),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
});
|
||||||
|
match res {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(ref err) if err.is_errno(Errno::EOPNOTSUPP) => (),
|
||||||
|
Err(ref err) if err.is_errno(Errno::EPERM) => {
|
||||||
|
println!(
|
||||||
|
"failed to restore mtime attribute on {:?}: {}",
|
||||||
|
file_name, err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(err) => return Err(err.into()),
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_fcaps(
|
||||||
|
flags: Flags,
|
||||||
|
c_proc_path: *const libc::c_char,
|
||||||
|
metadata: &Metadata,
|
||||||
|
skip_xattrs: &mut bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
if *skip_xattrs || !flags.contains(Flags::WITH_FCAPS) {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
let fcaps = match metadata.fcaps.as_ref() {
|
||||||
|
Some(fcaps) => fcaps,
|
||||||
|
None => return Ok(()),
|
||||||
|
};
|
||||||
|
|
||||||
|
c_result!(unsafe {
|
||||||
|
libc::setxattr(
|
||||||
|
c_proc_path,
|
||||||
|
xattr::xattr_name_fcaps().as_ptr(),
|
||||||
|
fcaps.data.as_ptr() as *const libc::c_void,
|
||||||
|
fcaps.data.len(),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.map(drop)
|
||||||
|
.or_else(|err| allow_notsupp_remember(err, skip_xattrs))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn apply_xattrs(
|
||||||
|
flags: Flags,
|
||||||
|
c_proc_path: *const libc::c_char,
|
||||||
|
metadata: &Metadata,
|
||||||
|
skip_xattrs: &mut bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
if *skip_xattrs || !flags.contains(Flags::WITH_XATTRS) {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
for xattr in &metadata.xattrs {
|
||||||
|
if *skip_xattrs {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
if !xattr::is_valid_xattr_name(xattr.name()) {
|
||||||
|
println!("skipping invalid xattr named {:?}", xattr.name());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
c_result!(unsafe {
|
||||||
|
libc::setxattr(
|
||||||
|
c_proc_path,
|
||||||
|
xattr.name().as_ptr() as *const libc::c_char,
|
||||||
|
xattr.value().as_ptr() as *const libc::c_void,
|
||||||
|
xattr.value().len(),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.map(drop)
|
||||||
|
.or_else(|err| allow_notsupp_remember(err, &mut *skip_xattrs))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn apply_acls(flags: Flags, c_proc_path: &CStr, metadata: &Metadata) -> Result<(), Error> {
|
||||||
|
if !flags.contains(Flags::WITH_ACL) || metadata.acl.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut acl = acl::ACL::init(5)?;
|
||||||
|
|
||||||
|
// acl type access:
|
||||||
|
acl.add_entry_full(
|
||||||
|
acl::ACL_USER_OBJ,
|
||||||
|
None,
|
||||||
|
acl::mode_user_to_acl_permissions(metadata.stat.mode),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
acl.add_entry_full(
|
||||||
|
acl::ACL_OTHER,
|
||||||
|
None,
|
||||||
|
acl::mode_other_to_acl_permissions(metadata.stat.mode),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
match metadata.acl.group_obj.as_ref() {
|
||||||
|
Some(group_obj) => {
|
||||||
|
acl.add_entry_full(
|
||||||
|
acl::ACL_MASK,
|
||||||
|
None,
|
||||||
|
acl::mode_group_to_acl_permissions(metadata.stat.mode),
|
||||||
|
)?;
|
||||||
|
acl.add_entry_full(acl::ACL_GROUP_OBJ, None, group_obj.permissions.0)?;
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
acl.add_entry_full(
|
||||||
|
acl::ACL_GROUP_OBJ,
|
||||||
|
None,
|
||||||
|
acl::mode_group_to_acl_permissions(metadata.stat.mode),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for user in &metadata.acl.users {
|
||||||
|
acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
for group in &metadata.acl.groups {
|
||||||
|
acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if !acl.is_valid() {
|
||||||
|
bail!("Error while restoring ACL - ACL invalid");
|
||||||
|
}
|
||||||
|
|
||||||
|
acl.set_file(c_proc_path, acl::ACL_TYPE_ACCESS)?;
|
||||||
|
drop(acl);
|
||||||
|
|
||||||
|
// acl type default:
|
||||||
|
if let Some(default) = metadata.acl.default.as_ref() {
|
||||||
|
let mut acl = acl::ACL::init(5)?;
|
||||||
|
|
||||||
|
acl.add_entry_full(acl::ACL_USER_OBJ, None, default.user_obj_permissions.0)?;
|
||||||
|
|
||||||
|
acl.add_entry_full(acl::ACL_GROUP_OBJ, None, default.group_obj_permissions.0)?;
|
||||||
|
|
||||||
|
acl.add_entry_full(acl::ACL_OTHER, None, default.other_permissions.0)?;
|
||||||
|
|
||||||
|
if default.mask_permissions != pxar::format::acl::Permissions::NO_MASK {
|
||||||
|
acl.add_entry_full(acl::ACL_MASK, None, default.mask_permissions.0)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
for user in &metadata.acl.default_users {
|
||||||
|
acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
for group in &metadata.acl.default_groups {
|
||||||
|
acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if !acl.is_valid() {
|
||||||
|
bail!("Error while restoring ACL - ACL invalid");
|
||||||
|
}
|
||||||
|
|
||||||
|
acl.set_file(c_proc_path, acl::ACL_TYPE_DEFAULT)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn apply_quota_project_id(flags: Flags, fd: RawFd, metadata: &Metadata) -> Result<(), Error> {
|
||||||
|
if !flags.contains(Flags::WITH_QUOTA_PROJID) {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let projid = match metadata.quota_project_id {
|
||||||
|
Some(projid) => projid,
|
||||||
|
None => return Ok(()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut fsxattr = fs::FSXAttr::default();
|
||||||
|
unsafe {
|
||||||
|
fs::fs_ioc_fsgetxattr(fd, &mut fsxattr).map_err(|err| {
|
||||||
|
format_err!(
|
||||||
|
"error while getting fsxattr to restore quota project id - {}",
|
||||||
|
err
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
fsxattr.fsx_projid = projid.projid as u32;
|
||||||
|
|
||||||
|
fs::fs_ioc_fssetxattr(fd, &fsxattr).map_err(|err| {
|
||||||
|
format_err!(
|
||||||
|
"error while setting fsxattr to restore quota project id - {}",
|
||||||
|
err
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -47,33 +47,23 @@
|
|||||||
//! (user, group, acl, ...) because this is already defined by the
|
//! (user, group, acl, ...) because this is already defined by the
|
||||||
//! linked `ENTRY`.
|
//! linked `ENTRY`.
|
||||||
|
|
||||||
mod binary_search_tree;
|
|
||||||
pub use binary_search_tree::*;
|
|
||||||
|
|
||||||
pub mod flags;
|
|
||||||
pub use flags::*;
|
|
||||||
|
|
||||||
mod format_definition;
|
|
||||||
pub use format_definition::*;
|
|
||||||
|
|
||||||
mod encoder;
|
|
||||||
pub use encoder::*;
|
|
||||||
|
|
||||||
mod sequential_decoder;
|
|
||||||
pub use sequential_decoder::*;
|
|
||||||
|
|
||||||
mod decoder;
|
|
||||||
pub use decoder::*;
|
|
||||||
|
|
||||||
mod match_pattern;
|
|
||||||
pub use match_pattern::*;
|
|
||||||
|
|
||||||
mod dir_stack;
|
|
||||||
pub use dir_stack::*;
|
|
||||||
|
|
||||||
pub mod fuse;
|
|
||||||
pub use fuse::*;
|
|
||||||
|
|
||||||
pub mod catalog;
|
pub mod catalog;
|
||||||
|
pub(crate) mod create;
|
||||||
|
pub(crate) mod dir_stack;
|
||||||
|
pub(crate) mod extract;
|
||||||
|
pub(crate) mod metadata;
|
||||||
|
pub mod fuse;
|
||||||
|
pub(crate) mod tools;
|
||||||
|
|
||||||
mod helper;
|
mod flags;
|
||||||
|
pub use flags::Flags;
|
||||||
|
|
||||||
|
pub use create::create_archive;
|
||||||
|
pub use extract::extract_archive;
|
||||||
|
|
||||||
|
/// The format requires to build sorted directory lookup tables in
|
||||||
|
/// memory, so we restrict the number of allowed entries to limit
|
||||||
|
/// maximum memory usage.
|
||||||
|
pub const ENCODER_MAX_ENTRIES: usize = 1024 * 1024;
|
||||||
|
|
||||||
|
pub use tools::{format_multi_line_entry, format_single_line_entry};
|
File diff suppressed because it is too large
Load Diff
203
src/pxar/tools.rs
Normal file
203
src/pxar/tools.rs
Normal file
@ -0,0 +1,203 @@
|
|||||||
|
//! Some common methods used within the pxar code.
|
||||||
|
|
||||||
|
use std::convert::TryFrom;
|
||||||
|
use std::ffi::OsStr;
|
||||||
|
use std::os::unix::ffi::OsStrExt;
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use nix::sys::stat::Mode;
|
||||||
|
|
||||||
|
use pxar::{mode, Entry, EntryKind, Metadata};
|
||||||
|
|
||||||
|
/// Get the file permissions as `nix::Mode`
|
||||||
|
pub fn perms_from_metadata(meta: &Metadata) -> Result<Mode, Error> {
|
||||||
|
let mode = meta.stat.get_permission_bits();
|
||||||
|
u32::try_from(mode)
|
||||||
|
.map_err(drop)
|
||||||
|
.and_then(|mode| Mode::from_bits(mode).ok_or(()))
|
||||||
|
.map_err(|_| format_err!("mode contains illegal bits: 0x{:x} (0o{:o})", mode, mode))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Make sure path is relative and not '.' or '..'.
|
||||||
|
pub fn assert_relative_path<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
|
||||||
|
assert_relative_path_do(Path::new(path))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Make sure path is a single component and not '.' or '..'.
|
||||||
|
pub fn assert_single_path_component<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
|
||||||
|
assert_single_path_component_do(Path::new(path))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn assert_relative_path_do(path: &Path) -> Result<(), Error> {
|
||||||
|
if !path.is_relative() {
|
||||||
|
bail!("bad absolute file name in archive: {:?}", path);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn assert_single_path_component_do(path: &Path) -> Result<(), Error> {
|
||||||
|
assert_relative_path_do(path)?;
|
||||||
|
|
||||||
|
let mut components = path.components();
|
||||||
|
match components.next() {
|
||||||
|
Some(std::path::Component::Normal(_)) => (),
|
||||||
|
_ => bail!("invalid path component in archive: {:?}", path),
|
||||||
|
}
|
||||||
|
|
||||||
|
if components.next().is_some() {
|
||||||
|
bail!(
|
||||||
|
"invalid path with multiple components in archive: {:?}",
|
||||||
|
path
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
fn symbolic_mode(c: u64, special: bool, special_x: u8, special_no_x: u8) -> [u8; 3] {
|
||||||
|
[
|
||||||
|
if 0 != c & 4 { b'r' } else { b'-' },
|
||||||
|
if 0 != c & 2 { b'w' } else { b'-' },
|
||||||
|
match (c & 1, special) {
|
||||||
|
(0, false) => b'-',
|
||||||
|
(0, true) => special_no_x,
|
||||||
|
(_, false) => b'x',
|
||||||
|
(_, true) => special_x,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mode_string(entry: &Entry) -> String {
|
||||||
|
// https://www.gnu.org/software/coreutils/manual/html_node/What-information-is-listed.html#What-information-is-listed
|
||||||
|
// additionally we use:
|
||||||
|
// file type capital 'L' hard links
|
||||||
|
// a second '+' after the mode to show non-acl xattr presence
|
||||||
|
//
|
||||||
|
// Trwxrwxrwx++ uid/gid size mtime filename [-> destination]
|
||||||
|
|
||||||
|
let meta = entry.metadata();
|
||||||
|
let mode = meta.stat.mode;
|
||||||
|
let type_char = if entry.is_hardlink() {
|
||||||
|
'L'
|
||||||
|
} else {
|
||||||
|
match mode & mode::IFMT {
|
||||||
|
mode::IFREG => '-',
|
||||||
|
mode::IFBLK => 'b',
|
||||||
|
mode::IFCHR => 'c',
|
||||||
|
mode::IFDIR => 'd',
|
||||||
|
mode::IFLNK => 'l',
|
||||||
|
mode::IFIFO => 'p',
|
||||||
|
mode::IFSOCK => 's',
|
||||||
|
_ => '?',
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let fmt_u = symbolic_mode((mode >> 6) & 7, 0 != mode & mode::ISUID, b's', b'S');
|
||||||
|
let fmt_g = symbolic_mode((mode >> 3) & 7, 0 != mode & mode::ISGID, b's', b'S');
|
||||||
|
let fmt_o = symbolic_mode((mode >> 3) & 7, 0 != mode & mode::ISVTX, b't', b'T');
|
||||||
|
|
||||||
|
let has_acls = if meta.acl.is_empty() { ' ' } else { '+' };
|
||||||
|
|
||||||
|
let has_xattrs = if meta.xattrs.is_empty() { ' ' } else { '+' };
|
||||||
|
|
||||||
|
format!(
|
||||||
|
"{}{}{}{}{}{}",
|
||||||
|
type_char,
|
||||||
|
unsafe { std::str::from_utf8_unchecked(&fmt_u) },
|
||||||
|
unsafe { std::str::from_utf8_unchecked(&fmt_g) },
|
||||||
|
unsafe { std::str::from_utf8_unchecked(&fmt_o) },
|
||||||
|
has_acls,
|
||||||
|
has_xattrs,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn format_single_line_entry(entry: &Entry) -> String {
|
||||||
|
use chrono::offset::TimeZone;
|
||||||
|
|
||||||
|
let mode_string = mode_string(entry);
|
||||||
|
|
||||||
|
let meta = entry.metadata();
|
||||||
|
let mtime = meta.mtime_as_duration();
|
||||||
|
let mtime = chrono::Local.timestamp(mtime.as_secs() as i64, mtime.subsec_nanos());
|
||||||
|
|
||||||
|
let (size, link) = match entry.kind() {
|
||||||
|
EntryKind::File { size, .. } => (format!("{}", *size), String::new()),
|
||||||
|
EntryKind::Symlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
|
||||||
|
EntryKind::Hardlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
|
||||||
|
EntryKind::Device(dev) => (format!("{},{}", dev.major, dev.minor), String::new()),
|
||||||
|
_ => ("0".to_string(), String::new()),
|
||||||
|
};
|
||||||
|
|
||||||
|
format!(
|
||||||
|
"{} {:<13} {} {:>8} {:?}{}",
|
||||||
|
mode_string,
|
||||||
|
format!("{}/{}", meta.stat.uid, meta.stat.gid),
|
||||||
|
mtime.format("%Y-%m-%d %H:%M:%S"),
|
||||||
|
size,
|
||||||
|
entry.path(),
|
||||||
|
link,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn format_multi_line_entry(entry: &Entry) -> String {
|
||||||
|
use chrono::offset::TimeZone;
|
||||||
|
|
||||||
|
let mode_string = mode_string(entry);
|
||||||
|
|
||||||
|
let meta = entry.metadata();
|
||||||
|
let mtime = meta.mtime_as_duration();
|
||||||
|
let mtime = chrono::Local.timestamp(mtime.as_secs() as i64, mtime.subsec_nanos());
|
||||||
|
|
||||||
|
let (size, link, type_name) = match entry.kind() {
|
||||||
|
EntryKind::File { size, .. } => (format!("{}", *size), String::new(), "file"),
|
||||||
|
EntryKind::Symlink(link) => (
|
||||||
|
"0".to_string(),
|
||||||
|
format!(" -> {:?}", link.as_os_str()),
|
||||||
|
"symlink",
|
||||||
|
),
|
||||||
|
EntryKind::Hardlink(link) => (
|
||||||
|
"0".to_string(),
|
||||||
|
format!(" -> {:?}", link.as_os_str()),
|
||||||
|
"symlink",
|
||||||
|
),
|
||||||
|
EntryKind::Device(dev) => (
|
||||||
|
format!("{},{}", dev.major, dev.minor),
|
||||||
|
String::new(),
|
||||||
|
if meta.stat.is_chardev() {
|
||||||
|
"characters pecial file"
|
||||||
|
} else if meta.stat.is_blockdev() {
|
||||||
|
"block special file"
|
||||||
|
} else {
|
||||||
|
"device"
|
||||||
|
},
|
||||||
|
),
|
||||||
|
EntryKind::Socket => ("0".to_string(), String::new(), "socket"),
|
||||||
|
EntryKind::Fifo => ("0".to_string(), String::new(), "fifo"),
|
||||||
|
EntryKind::Directory => ("0".to_string(), String::new(), "directory"),
|
||||||
|
EntryKind::GoodbyeTable => ("0".to_string(), String::new(), "bad entry"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let file_name = match std::str::from_utf8(entry.path().as_os_str().as_bytes()) {
|
||||||
|
Ok(name) => std::borrow::Cow::Borrowed(name),
|
||||||
|
Err(_) => std::borrow::Cow::Owned(format!("{:?}", entry.path())),
|
||||||
|
};
|
||||||
|
|
||||||
|
format!(
|
||||||
|
" File: {}{}\n \
|
||||||
|
Size: {:<13} Type: {}\n\
|
||||||
|
Access: ({:o}/{}) Uid: {:<5} Gid: {:<5}\n\
|
||||||
|
Modify: {}\n",
|
||||||
|
file_name,
|
||||||
|
link,
|
||||||
|
size,
|
||||||
|
type_name,
|
||||||
|
meta.file_mode(),
|
||||||
|
mode_string,
|
||||||
|
meta.stat.uid,
|
||||||
|
meta.stat.gid,
|
||||||
|
mtime.format("%Y-%m-%d %H:%M:%S"),
|
||||||
|
)
|
||||||
|
}
|
@ -1,15 +1,14 @@
|
|||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::{RwLock};
|
use std::sync::{RwLock};
|
||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use serde_json::{json, Value};
|
|
||||||
|
|
||||||
use proxmox::tools::fs::{create_path, CreateOptions};
|
use proxmox::tools::fs::{create_path, CreateOptions};
|
||||||
|
|
||||||
use crate::api2::types::{RRDMode, RRDTimeFrameResolution};
|
use crate::api2::types::{RRDMode, RRDTimeFrameResolution};
|
||||||
|
use crate::tools::epoch_now_f64;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
@ -35,11 +34,6 @@ pub fn create_rrdb_dir() -> Result<(), Error> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn now() -> Result<f64, Error> {
|
|
||||||
let time = SystemTime::now().duration_since(UNIX_EPOCH)?;
|
|
||||||
Ok(time.as_secs_f64())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn update_value(rel_path: &str, value: f64, dst: DST, save: bool) -> Result<(), Error> {
|
pub fn update_value(rel_path: &str, value: f64, dst: DST, save: bool) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut path = PathBuf::from(PBS_RRD_BASEDIR);
|
let mut path = PathBuf::from(PBS_RRD_BASEDIR);
|
||||||
@ -48,7 +42,7 @@ pub fn update_value(rel_path: &str, value: f64, dst: DST, save: bool) -> Result<
|
|||||||
std::fs::create_dir_all(path.parent().unwrap())?;
|
std::fs::create_dir_all(path.parent().unwrap())?;
|
||||||
|
|
||||||
let mut map = RRD_CACHE.write().unwrap();
|
let mut map = RRD_CACHE.write().unwrap();
|
||||||
let now = now()?;
|
let now = epoch_now_f64()?;
|
||||||
|
|
||||||
if let Some(rrd) = map.get_mut(rel_path) {
|
if let Some(rrd) = map.get_mut(rel_path) {
|
||||||
rrd.update(now, value);
|
rrd.update(now, value);
|
||||||
@ -71,41 +65,18 @@ pub fn update_value(rel_path: &str, value: f64, dst: DST, save: bool) -> Result<
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn extract_data(
|
pub fn extract_cached_data(
|
||||||
base: &str,
|
base: &str,
|
||||||
items: &[&str],
|
name: &str,
|
||||||
|
now: f64,
|
||||||
timeframe: RRDTimeFrameResolution,
|
timeframe: RRDTimeFrameResolution,
|
||||||
mode: RRDMode,
|
mode: RRDMode,
|
||||||
) -> Result<Value, Error> {
|
) -> Option<(u64, u64, Vec<Option<f64>>)> {
|
||||||
|
|
||||||
let now = now()?;
|
|
||||||
|
|
||||||
let map = RRD_CACHE.read().unwrap();
|
let map = RRD_CACHE.read().unwrap();
|
||||||
|
|
||||||
let mut result = Vec::new();
|
match map.get(&format!("{}/{}", base, name)) {
|
||||||
|
Some(rrd) => Some(rrd.extract_data(now, timeframe, mode)),
|
||||||
for name in items.iter() {
|
None => None,
|
||||||
let rrd = match map.get(&format!("{}/{}", base, name)) {
|
|
||||||
Some(rrd) => rrd,
|
|
||||||
None => continue,
|
|
||||||
};
|
|
||||||
let (start, reso, list) = rrd.extract_data(now, timeframe, mode);
|
|
||||||
let mut t = start;
|
|
||||||
for index in 0..RRD_DATA_ENTRIES {
|
|
||||||
if result.len() <= index {
|
|
||||||
if let Some(value) = list[index] {
|
|
||||||
result.push(json!({ "time": t, *name: value }));
|
|
||||||
} else {
|
|
||||||
result.push(json!({ "time": t }));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if let Some(value) = list[index] {
|
|
||||||
result[index][name] = value.into();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t += reso;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(result.into())
|
|
||||||
}
|
}
|
||||||
|
@ -323,7 +323,7 @@ fn get_index(username: Option<String>, token: Option<String>, template: &Handleb
|
|||||||
|
|
||||||
if let Some(query_str) = parts.uri.query() {
|
if let Some(query_str) = parts.uri.query() {
|
||||||
for (k, v) in form_urlencoded::parse(query_str.as_bytes()).into_owned() {
|
for (k, v) in form_urlencoded::parse(query_str.as_bytes()).into_owned() {
|
||||||
if k == "debug" && v == "1" || v == "true" {
|
if k == "debug" && v != "0" && v != "false" {
|
||||||
debug = true;
|
debug = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -213,6 +213,8 @@ pub fn upid_read_status(upid: &UPID) -> Result<String, Error> {
|
|||||||
Some(rest) => {
|
Some(rest) => {
|
||||||
if rest == "OK" {
|
if rest == "OK" {
|
||||||
status = String::from(rest);
|
status = String::from(rest);
|
||||||
|
} else if rest.starts_with("WARNINGS: ") {
|
||||||
|
status = String::from(rest);
|
||||||
} else if rest.starts_with("ERROR: ") {
|
} else if rest.starts_with("ERROR: ") {
|
||||||
status = String::from(&rest[7..]);
|
status = String::from(&rest[7..]);
|
||||||
}
|
}
|
||||||
@ -234,7 +236,7 @@ pub struct TaskListInfo {
|
|||||||
pub upid_str: String,
|
pub upid_str: String,
|
||||||
/// Task `(endtime, status)` if already finished
|
/// Task `(endtime, status)` if already finished
|
||||||
///
|
///
|
||||||
/// The `status` ise iether `unknown`, `OK`, or `ERROR: ...`
|
/// The `status` is either `unknown`, `OK`, `WARN`, or `ERROR: ...`
|
||||||
pub state: Option<(i64, String)>, // endtime, status
|
pub state: Option<(i64, String)>, // endtime, status
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -385,6 +387,7 @@ impl std::fmt::Display for WorkerTask {
|
|||||||
struct WorkerTaskData {
|
struct WorkerTaskData {
|
||||||
logger: FileLogger,
|
logger: FileLogger,
|
||||||
progress: f64, // 0..1
|
progress: f64, // 0..1
|
||||||
|
warn_count: u64,
|
||||||
pub abort_listeners: Vec<oneshot::Sender<()>>,
|
pub abort_listeners: Vec<oneshot::Sender<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -424,6 +427,7 @@ impl WorkerTask {
|
|||||||
data: Mutex::new(WorkerTaskData {
|
data: Mutex::new(WorkerTaskData {
|
||||||
logger,
|
logger,
|
||||||
progress: 0.0,
|
progress: 0.0,
|
||||||
|
warn_count: 0,
|
||||||
abort_listeners: vec![],
|
abort_listeners: vec![],
|
||||||
}),
|
}),
|
||||||
});
|
});
|
||||||
@ -507,8 +511,11 @@ impl WorkerTask {
|
|||||||
/// Log task result, remove task from running list
|
/// Log task result, remove task from running list
|
||||||
pub fn log_result(&self, result: &Result<(), Error>) {
|
pub fn log_result(&self, result: &Result<(), Error>) {
|
||||||
|
|
||||||
|
let warn_count = self.data.lock().unwrap().warn_count;
|
||||||
if let Err(err) = result {
|
if let Err(err) = result {
|
||||||
self.log(&format!("TASK ERROR: {}", err));
|
self.log(&format!("TASK ERROR: {}", err));
|
||||||
|
} else if warn_count > 0 {
|
||||||
|
self.log(format!("TASK WARNINGS: {}", warn_count));
|
||||||
} else {
|
} else {
|
||||||
self.log("TASK OK");
|
self.log("TASK OK");
|
||||||
}
|
}
|
||||||
@ -524,6 +531,13 @@ impl WorkerTask {
|
|||||||
data.logger.log(msg);
|
data.logger.log(msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Log a message as warning.
|
||||||
|
pub fn warn<S: AsRef<str>>(&self, msg: S) {
|
||||||
|
let mut data = self.data.lock().unwrap();
|
||||||
|
data.logger.log(format!("WARN: {}", msg.as_ref()));
|
||||||
|
data.warn_count += 1;
|
||||||
|
}
|
||||||
|
|
||||||
/// Set progress indicator
|
/// Set progress indicator
|
||||||
pub fn progress(&self, progress: f64) {
|
pub fn progress(&self, progress: f64) {
|
||||||
if progress >= 0.0 && progress <= 1.0 {
|
if progress >= 0.0 && progress <= 1.0 {
|
||||||
|
82
src/tools.rs
82
src/tools.rs
@ -5,11 +5,11 @@ use std::any::Any;
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::hash::BuildHasher;
|
use std::hash::BuildHasher;
|
||||||
use std::fs::{File, OpenOptions};
|
use std::fs::{File, OpenOptions};
|
||||||
use std::io::ErrorKind;
|
use std::io::{self, BufRead, ErrorKind, Read};
|
||||||
use std::io::Read;
|
|
||||||
use std::os::unix::io::{AsRawFd, RawFd};
|
use std::os::unix::io::{AsRawFd, RawFd};
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use std::time::{SystemTime, SystemTimeError, UNIX_EPOCH};
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
@ -31,7 +31,9 @@ pub mod lru_cache;
|
|||||||
pub mod runtime;
|
pub mod runtime;
|
||||||
pub mod ticket;
|
pub mod ticket;
|
||||||
pub mod timer;
|
pub mod timer;
|
||||||
|
pub mod statistics;
|
||||||
pub mod systemd;
|
pub mod systemd;
|
||||||
|
pub mod nom;
|
||||||
|
|
||||||
mod wrapped_reader_stream;
|
mod wrapped_reader_stream;
|
||||||
pub use wrapped_reader_stream::*;
|
pub use wrapped_reader_stream::*;
|
||||||
@ -480,7 +482,7 @@ pub fn normalize_uri_path(path: &str) -> Result<(String, Vec<&str>), Error> {
|
|||||||
/// is considered successful.
|
/// is considered successful.
|
||||||
pub fn command_output(
|
pub fn command_output(
|
||||||
output: std::process::Output,
|
output: std::process::Output,
|
||||||
exit_code_check: Option<fn(i32) -> bool>
|
exit_code_check: Option<fn(i32) -> bool>,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
if !output.status.success() {
|
if !output.status.success() {
|
||||||
@ -507,6 +509,19 @@ pub fn command_output(
|
|||||||
Ok(output)
|
Ok(output)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn run_command(
|
||||||
|
mut command: std::process::Command,
|
||||||
|
exit_code_check: Option<fn(i32) -> bool>,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
|
let output = command.output()
|
||||||
|
.map_err(|err| format_err!("failed to execute {:?} - {}", command, err))?;
|
||||||
|
|
||||||
|
let output = crate::tools::command_output(output, exit_code_check)
|
||||||
|
.map_err(|err| format_err!("command {:?} failed - {}", command, err))?;
|
||||||
|
|
||||||
|
Ok(output)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn fd_change_cloexec(fd: RawFd, on: bool) -> Result<(), Error> {
|
pub fn fd_change_cloexec(fd: RawFd, on: bool) -> Result<(), Error> {
|
||||||
use nix::fcntl::{fcntl, FdFlag, F_GETFD, F_SETFD};
|
use nix::fcntl::{fcntl, FdFlag, F_GETFD, F_SETFD};
|
||||||
@ -538,12 +553,27 @@ pub fn fail_on_shutdown() -> Result<(), Error> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// wrap nix::unistd::pipe2 + O_CLOEXEC into something returning guarded file descriptors
|
/// safe wrapper for `nix::unistd::pipe2` defaulting to `O_CLOEXEC` and guarding the file
|
||||||
|
/// descriptors.
|
||||||
pub fn pipe() -> Result<(Fd, Fd), Error> {
|
pub fn pipe() -> Result<(Fd, Fd), Error> {
|
||||||
let (pin, pout) = nix::unistd::pipe2(nix::fcntl::OFlag::O_CLOEXEC)?;
|
let (pin, pout) = nix::unistd::pipe2(nix::fcntl::OFlag::O_CLOEXEC)?;
|
||||||
Ok((Fd(pin), Fd(pout)))
|
Ok((Fd(pin), Fd(pout)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// safe wrapper for `nix::sys::socket::socketpair` defaulting to `O_CLOEXEC` and guarding the file
|
||||||
|
/// descriptors.
|
||||||
|
pub fn socketpair() -> Result<(Fd, Fd), Error> {
|
||||||
|
use nix::sys::socket;
|
||||||
|
let (pa, pb) = socket::socketpair(
|
||||||
|
socket::AddressFamily::Unix,
|
||||||
|
socket::SockType::Stream,
|
||||||
|
None,
|
||||||
|
socket::SockFlag::SOCK_CLOEXEC,
|
||||||
|
)?;
|
||||||
|
Ok((Fd(pa), Fd(pb)))
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/// An easy way to convert types to Any
|
/// An easy way to convert types to Any
|
||||||
///
|
///
|
||||||
/// Mostly useful to downcast trait objects (see RpcEnvironment).
|
/// Mostly useful to downcast trait objects (see RpcEnvironment).
|
||||||
@ -572,3 +602,47 @@ pub const DEFAULT_ENCODE_SET: &AsciiSet = &percent_encoding::CONTROLS // 0..1f a
|
|||||||
.add(b'?')
|
.add(b'?')
|
||||||
.add(b'{')
|
.add(b'{')
|
||||||
.add(b'}');
|
.add(b'}');
|
||||||
|
|
||||||
|
/// Get an iterator over lines of a file, skipping empty lines and comments (lines starting with a
|
||||||
|
/// `#`).
|
||||||
|
pub fn file_get_non_comment_lines<P: AsRef<Path>>(
|
||||||
|
path: P,
|
||||||
|
) -> Result<impl Iterator<Item = io::Result<String>>, Error> {
|
||||||
|
let path = path.as_ref();
|
||||||
|
|
||||||
|
Ok(io::BufReader::new(
|
||||||
|
File::open(path).map_err(|err| format_err!("error opening {:?}: {}", path, err))?,
|
||||||
|
)
|
||||||
|
.lines()
|
||||||
|
.filter_map(|line| match line {
|
||||||
|
Ok(line) => {
|
||||||
|
let line = line.trim();
|
||||||
|
if line.is_empty() || line.starts_with('#') {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(Ok(line.to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(err) => Some(Err(err)),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn epoch_now() -> Result<Duration, SystemTimeError> {
|
||||||
|
SystemTime::now().duration_since(UNIX_EPOCH)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn epoch_now_f64() -> Result<f64, SystemTimeError> {
|
||||||
|
Ok(epoch_now()?.as_secs_f64())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn epoch_now_u64() -> Result<u64, SystemTimeError> {
|
||||||
|
Ok(epoch_now()?.as_secs())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn setup_safe_path_env() {
|
||||||
|
std::env::set_var("PATH", "/sbin:/bin:/usr/sbin:/usr/bin");
|
||||||
|
// Make %ENV safer - as suggested by https://perldoc.perl.org/perlsec.html
|
||||||
|
for name in &["IFS", "CDPATH", "ENV", "BASH_ENV"] {
|
||||||
|
std::env::remove_var(name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -12,6 +12,7 @@ use std::ptr;
|
|||||||
|
|
||||||
use libc::{c_char, c_int, c_uint, c_void};
|
use libc::{c_char, c_int, c_uint, c_void};
|
||||||
use nix::errno::Errno;
|
use nix::errno::Errno;
|
||||||
|
use nix::NixPath;
|
||||||
|
|
||||||
// from: acl/include/acl.h
|
// from: acl/include/acl.h
|
||||||
pub const ACL_UNDEFINED_ID: u32 = 0xffffffff;
|
pub const ACL_UNDEFINED_ID: u32 = 0xffffffff;
|
||||||
@ -100,14 +101,11 @@ impl ACL {
|
|||||||
Ok(ACL { ptr })
|
Ok(ACL { ptr })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_file<P: AsRef<Path>>(&self, path: P, acl_type: ACLType) -> Result<(), nix::errno::Errno> {
|
pub fn set_file<P: NixPath + ?Sized>(&self, path: &P, acl_type: ACLType) -> nix::Result<()> {
|
||||||
let path_cstr = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap();
|
path.with_nix_path(|path| {
|
||||||
let res = unsafe { acl_set_file(path_cstr.as_ptr(), acl_type, self.ptr) };
|
Errno::result(unsafe { acl_set_file(path.as_ptr(), acl_type, self.ptr) })
|
||||||
if res < 0 {
|
})?
|
||||||
return Err(Errno::last());
|
.map(drop)
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_fd(fd: RawFd) -> Result<ACL, nix::errno::Errno> {
|
pub fn get_fd(fd: RawFd) -> Result<ACL, nix::errno::Errno> {
|
||||||
|
@ -2,12 +2,14 @@
|
|||||||
|
|
||||||
use std::ffi::CString;
|
use std::ffi::CString;
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::os::raw::{c_char, c_int};
|
use std::io::{Read, Write};
|
||||||
|
use std::os::raw::{c_char, c_uchar, c_int};
|
||||||
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
|
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
|
||||||
use std::os::unix::ffi::OsStrExt;
|
use std::os::unix::ffi::OsStrExt;
|
||||||
use std::panic::UnwindSafe;
|
use std::panic::UnwindSafe;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
@ -16,6 +18,11 @@ use proxmox::tools::io::{ReadExt, WriteExt};
|
|||||||
use crate::server;
|
use crate::server;
|
||||||
use crate::tools::{fd_change_cloexec, self};
|
use crate::tools::{fd_change_cloexec, self};
|
||||||
|
|
||||||
|
#[link(name = "systemd")]
|
||||||
|
extern "C" {
|
||||||
|
fn sd_journal_stream_fd(identifier: *const c_uchar, priority: c_int, level_prefix: c_int) -> c_int;
|
||||||
|
}
|
||||||
|
|
||||||
// Unfortunately FnBox is nightly-only and Box<FnOnce> is unusable, so just use Box<Fn>...
|
// Unfortunately FnBox is nightly-only and Box<FnOnce> is unusable, so just use Box<Fn>...
|
||||||
pub type BoxedStoreFunc = Box<dyn FnMut() -> Result<String, Error> + UnwindSafe + Send>;
|
pub type BoxedStoreFunc = Box<dyn FnMut() -> Result<String, Error> + UnwindSafe + Send>;
|
||||||
|
|
||||||
@ -31,6 +38,7 @@ pub trait Reloadable: Sized {
|
|||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct Reloader {
|
pub struct Reloader {
|
||||||
pre_exec: Vec<PreExecEntry>,
|
pre_exec: Vec<PreExecEntry>,
|
||||||
|
self_exe: PathBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Currently we only need environment variables for storage, but in theory we could also add
|
// Currently we only need environment variables for storage, but in theory we could also add
|
||||||
@ -41,10 +49,13 @@ struct PreExecEntry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Reloader {
|
impl Reloader {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Result<Self, Error> {
|
||||||
Self {
|
Ok(Self {
|
||||||
pre_exec: Vec::new(),
|
pre_exec: Vec::new(),
|
||||||
}
|
|
||||||
|
// Get the path to our executable as PathBuf
|
||||||
|
self_exe: std::fs::read_link("/proc/self/exe")?,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Restore an object from an environment variable of the given name, or, if none exists, uses
|
/// Restore an object from an environment variable of the given name, or, if none exists, uses
|
||||||
@ -78,13 +89,6 @@ impl Reloader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn fork_restart(self) -> Result<(), Error> {
|
pub fn fork_restart(self) -> Result<(), Error> {
|
||||||
// Get the path to our executable as CString
|
|
||||||
let exe = CString::new(
|
|
||||||
std::fs::read_link("/proc/self/exe")?
|
|
||||||
.into_os_string()
|
|
||||||
.as_bytes()
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// Get our parameters as Vec<CString>
|
// Get our parameters as Vec<CString>
|
||||||
let args = std::env::args_os();
|
let args = std::env::args_os();
|
||||||
let mut new_args = Vec::with_capacity(args.len());
|
let mut new_args = Vec::with_capacity(args.len());
|
||||||
@ -93,7 +97,7 @@ impl Reloader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Synchronisation pipe:
|
// Synchronisation pipe:
|
||||||
let (pin, pout) = super::pipe()?;
|
let (pold, pnew) = super::socketpair()?;
|
||||||
|
|
||||||
// Start ourselves in the background:
|
// Start ourselves in the background:
|
||||||
use nix::unistd::{fork, ForkResult};
|
use nix::unistd::{fork, ForkResult};
|
||||||
@ -102,30 +106,60 @@ impl Reloader {
|
|||||||
// Double fork so systemd can supervise us without nagging...
|
// Double fork so systemd can supervise us without nagging...
|
||||||
match fork() {
|
match fork() {
|
||||||
Ok(ForkResult::Child) => {
|
Ok(ForkResult::Child) => {
|
||||||
std::mem::drop(pin);
|
std::mem::drop(pold);
|
||||||
// At this point we call pre-exec helpers. We must be certain that if they fail for
|
// At this point we call pre-exec helpers. We must be certain that if they fail for
|
||||||
// whatever reason we can still call `_exit()`, so use catch_unwind.
|
// whatever reason we can still call `_exit()`, so use catch_unwind.
|
||||||
match std::panic::catch_unwind(move || {
|
match std::panic::catch_unwind(move || {
|
||||||
let mut pout = unsafe {
|
let mut pnew = unsafe {
|
||||||
std::fs::File::from_raw_fd(pout.into_raw_fd())
|
std::fs::File::from_raw_fd(pnew.into_raw_fd())
|
||||||
};
|
};
|
||||||
let pid = nix::unistd::Pid::this();
|
let pid = nix::unistd::Pid::this();
|
||||||
if let Err(e) = unsafe { pout.write_host_value(pid.as_raw()) } {
|
if let Err(e) = unsafe { pnew.write_host_value(pid.as_raw()) } {
|
||||||
log::error!("failed to send new server PID to parent: {}", e);
|
log::error!("failed to send new server PID to parent: {}", e);
|
||||||
unsafe {
|
unsafe {
|
||||||
libc::_exit(-1);
|
libc::_exit(-1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::mem::drop(pout);
|
|
||||||
self.do_exec(exe, new_args)
|
let mut ok = [0u8];
|
||||||
|
if let Err(e) = pnew.read_exact(&mut ok) {
|
||||||
|
log::error!("parent vanished before notifying systemd: {}", e);
|
||||||
|
unsafe {
|
||||||
|
libc::_exit(-1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert_eq!(ok[0], 1, "reload handshake should have sent a 1 byte");
|
||||||
|
|
||||||
|
std::mem::drop(pnew);
|
||||||
|
|
||||||
|
// Try to reopen STDOUT/STDERR journald streams to get correct PID in logs
|
||||||
|
let ident = CString::new(self.self_exe.file_name().unwrap().as_bytes()).unwrap();
|
||||||
|
let ident = ident.as_bytes();
|
||||||
|
let fd = unsafe { sd_journal_stream_fd(ident.as_ptr(), libc::LOG_INFO, 1) };
|
||||||
|
if fd >= 0 && fd != 1 {
|
||||||
|
let fd = proxmox::tools::fd::Fd(fd); // add drop handler
|
||||||
|
nix::unistd::dup2(fd.as_raw_fd(), 1)?;
|
||||||
|
} else {
|
||||||
|
log::error!("failed to update STDOUT journal redirection ({})", fd);
|
||||||
|
}
|
||||||
|
let fd = unsafe { sd_journal_stream_fd(ident.as_ptr(), libc::LOG_ERR, 1) };
|
||||||
|
if fd >= 0 && fd != 2 {
|
||||||
|
let fd = proxmox::tools::fd::Fd(fd); // add drop handler
|
||||||
|
nix::unistd::dup2(fd.as_raw_fd(), 2)?;
|
||||||
|
} else {
|
||||||
|
log::error!("failed to update STDERR journal redirection ({})", fd);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.do_reexec(new_args)
|
||||||
})
|
})
|
||||||
{
|
{
|
||||||
Ok(_) => eprintln!("do_exec returned unexpectedly!"),
|
Ok(Ok(())) => eprintln!("do_reexec returned!"),
|
||||||
|
Ok(Err(err)) => eprintln!("do_reexec failed: {}", err),
|
||||||
Err(_) => eprintln!("panic in re-exec"),
|
Err(_) => eprintln!("panic in re-exec"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(ForkResult::Parent { child }) => {
|
Ok(ForkResult::Parent { child }) => {
|
||||||
std::mem::drop((pin, pout));
|
std::mem::drop((pold, pnew));
|
||||||
log::debug!("forked off a new server (second pid: {})", child);
|
log::debug!("forked off a new server (second pid: {})", child);
|
||||||
}
|
}
|
||||||
Err(e) => log::error!("fork() failed, restart delayed: {}", e),
|
Err(e) => log::error!("fork() failed, restart delayed: {}", e),
|
||||||
@ -137,11 +171,11 @@ impl Reloader {
|
|||||||
}
|
}
|
||||||
Ok(ForkResult::Parent { child }) => {
|
Ok(ForkResult::Parent { child }) => {
|
||||||
log::debug!("forked off a new server (first pid: {}), waiting for 2nd pid", child);
|
log::debug!("forked off a new server (first pid: {}), waiting for 2nd pid", child);
|
||||||
std::mem::drop(pout);
|
std::mem::drop(pnew);
|
||||||
let mut pin = unsafe {
|
let mut pold = unsafe {
|
||||||
std::fs::File::from_raw_fd(pin.into_raw_fd())
|
std::fs::File::from_raw_fd(pold.into_raw_fd())
|
||||||
};
|
};
|
||||||
let child = nix::unistd::Pid::from_raw(match unsafe { pin.read_le_value() } {
|
let child = nix::unistd::Pid::from_raw(match unsafe { pold.read_le_value() } {
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
log::error!("failed to receive pid of double-forked child process: {}", e);
|
log::error!("failed to receive pid of double-forked child process: {}", e);
|
||||||
@ -153,6 +187,12 @@ impl Reloader {
|
|||||||
if let Err(e) = systemd_notify(SystemdNotify::MainPid(child)) {
|
if let Err(e) = systemd_notify(SystemdNotify::MainPid(child)) {
|
||||||
log::error!("failed to notify systemd about the new main pid: {}", e);
|
log::error!("failed to notify systemd about the new main pid: {}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// notify child that it is now the new main process:
|
||||||
|
if let Err(e) = pold.write_all(&[1u8]) {
|
||||||
|
log::error!("child vanished during reload: {}", e);
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@ -162,12 +202,13 @@ impl Reloader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn do_exec(self, exe: CString, args: Vec<CString>) -> Result<(), Error> {
|
fn do_reexec(self, args: Vec<CString>) -> Result<(), Error> {
|
||||||
|
let exe = CString::new(self.self_exe.as_os_str().as_bytes())?;
|
||||||
self.pre_exec()?;
|
self.pre_exec()?;
|
||||||
nix::unistd::setsid()?;
|
nix::unistd::setsid()?;
|
||||||
let args: Vec<&std::ffi::CStr> = args.iter().map(|s| s.as_ref()).collect();
|
let args: Vec<&std::ffi::CStr> = args.iter().map(|s| s.as_ref()).collect();
|
||||||
nix::unistd::execvp(&exe, &args)?;
|
nix::unistd::execvp(&exe, &args)?;
|
||||||
Ok(())
|
panic!("exec misbehaved");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -223,7 +264,7 @@ where
|
|||||||
F: FnOnce(tokio::net::TcpListener, NotifyReady) -> Result<S, Error>,
|
F: FnOnce(tokio::net::TcpListener, NotifyReady) -> Result<S, Error>,
|
||||||
S: Future<Output = ()>,
|
S: Future<Output = ()>,
|
||||||
{
|
{
|
||||||
let mut reloader = Reloader::new();
|
let mut reloader = Reloader::new()?;
|
||||||
|
|
||||||
let listener: tokio::net::TcpListener = reloader.restore(
|
let listener: tokio::net::TcpListener = reloader.restore(
|
||||||
"PROXMOX_BACKUP_LISTEN_FD",
|
"PROXMOX_BACKUP_LISTEN_FD",
|
||||||
|
@ -4,44 +4,37 @@ use std::collections::{HashMap, HashSet};
|
|||||||
use std::ffi::{OsStr, OsString};
|
use std::ffi::{OsStr, OsString};
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::os::unix::ffi::{OsStrExt, OsStringExt};
|
use std::os::unix::ffi::{OsStrExt, OsStringExt};
|
||||||
|
use std::os::unix::fs::MetadataExt;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use bitflags::bitflags;
|
use anyhow::{bail, format_err, Error};
|
||||||
use anyhow::{format_err, Error};
|
|
||||||
use libc::dev_t;
|
use libc::dev_t;
|
||||||
use once_cell::sync::OnceCell;
|
use once_cell::sync::OnceCell;
|
||||||
|
|
||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::sys::error::io_err_other;
|
use proxmox::sys::error::io_err_other;
|
||||||
use proxmox::sys::linux::procfs::{MountInfo, mountinfo::Device};
|
use proxmox::sys::linux::procfs::{MountInfo, mountinfo::Device};
|
||||||
use proxmox::{io_bail, io_format_err};
|
use proxmox::{io_bail, io_format_err};
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
|
use crate::api2::types::{BLOCKDEVICE_NAME_REGEX, StorageStatus};
|
||||||
|
|
||||||
mod zfs;
|
mod zfs;
|
||||||
pub use zfs::*;
|
pub use zfs::*;
|
||||||
|
mod zpool_status;
|
||||||
|
pub use zpool_status::*;
|
||||||
|
mod zpool_list;
|
||||||
|
pub use zpool_list::*;
|
||||||
mod lvm;
|
mod lvm;
|
||||||
pub use lvm::*;
|
pub use lvm::*;
|
||||||
|
mod smart;
|
||||||
|
pub use smart::*;
|
||||||
|
|
||||||
bitflags! {
|
lazy_static::lazy_static!{
|
||||||
/// Ways a device is being used.
|
static ref ISCSI_PATH_REGEX: regex::Regex =
|
||||||
pub struct DiskUse: u32 {
|
regex::Regex::new(r"host[^/]*/session[^/]*").unwrap();
|
||||||
/// Currently mounted.
|
|
||||||
const MOUNTED = 0x0000_0001;
|
|
||||||
|
|
||||||
/// Currently used as member of a device-mapper device.
|
|
||||||
const DEVICE_MAPPER = 0x0000_0002;
|
|
||||||
|
|
||||||
/// Contains partitions.
|
|
||||||
const PARTITIONS = 0x0001_0000;
|
|
||||||
|
|
||||||
/// The disk has a partition type which belongs to an LVM PV.
|
|
||||||
const LVM = 0x0002_0000;
|
|
||||||
|
|
||||||
/// The disk has a partition type which belongs to a zpool.
|
|
||||||
const ZFS = 0x0004_0000;
|
|
||||||
|
|
||||||
/// The disk is used by ceph.
|
|
||||||
const CEPH = 0x0008_0000;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Disk management context.
|
/// Disk management context.
|
||||||
@ -70,8 +63,6 @@ impl DiskManage {
|
|||||||
|
|
||||||
/// Get a `Disk` from a device node (eg. `/dev/sda`).
|
/// Get a `Disk` from a device node (eg. `/dev/sda`).
|
||||||
pub fn disk_by_node<P: AsRef<Path>>(self: Arc<Self>, devnode: P) -> io::Result<Disk> {
|
pub fn disk_by_node<P: AsRef<Path>>(self: Arc<Self>, devnode: P) -> io::Result<Disk> {
|
||||||
use std::os::unix::fs::MetadataExt;
|
|
||||||
|
|
||||||
let devnode = devnode.as_ref();
|
let devnode = devnode.as_ref();
|
||||||
|
|
||||||
let meta = std::fs::metadata(devnode)?;
|
let meta = std::fs::metadata(devnode)?;
|
||||||
@ -101,10 +92,14 @@ impl DiskManage {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get a `Disk` for a name in `/sys/block/<name>`.
|
||||||
|
pub fn disk_by_name(self: Arc<Self>, name: &str) -> io::Result<Disk> {
|
||||||
|
let syspath = format!("/sys/block/{}", name);
|
||||||
|
self.disk_by_sys_path(&syspath)
|
||||||
|
}
|
||||||
|
|
||||||
/// Gather information about mounted disks:
|
/// Gather information about mounted disks:
|
||||||
fn mounted_devices(&self) -> Result<&HashSet<dev_t>, Error> {
|
fn mounted_devices(&self) -> Result<&HashSet<dev_t>, Error> {
|
||||||
use std::os::unix::fs::MetadataExt;
|
|
||||||
|
|
||||||
self.mounted_devices
|
self.mounted_devices
|
||||||
.get_or_try_init(|| -> Result<_, Error> {
|
.get_or_try_init(|| -> Result<_, Error> {
|
||||||
let mut mounted = HashSet::new();
|
let mut mounted = HashSet::new();
|
||||||
@ -264,12 +259,17 @@ impl Disk {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Convenience wrapper for reading a `/sys` file which contains just a simple `OsString`.
|
/// Convenience wrapper for reading a `/sys` file which contains just a simple `OsString`.
|
||||||
fn read_sys_os_str<P: AsRef<Path>>(&self, path: P) -> io::Result<Option<OsString>> {
|
pub fn read_sys_os_str<P: AsRef<Path>>(&self, path: P) -> io::Result<Option<OsString>> {
|
||||||
Ok(self.read_sys(path.as_ref())?.map(OsString::from_vec))
|
Ok(self.read_sys(path.as_ref())?.map(|mut v| {
|
||||||
|
if Some(&b'\n') == v.last() {
|
||||||
|
v.pop();
|
||||||
|
}
|
||||||
|
OsString::from_vec(v)
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convenience wrapper for reading a `/sys` file which contains just a simple utf-8 string.
|
/// Convenience wrapper for reading a `/sys` file which contains just a simple utf-8 string.
|
||||||
fn read_sys_str<P: AsRef<Path>>(&self, path: P) -> io::Result<Option<String>> {
|
pub fn read_sys_str<P: AsRef<Path>>(&self, path: P) -> io::Result<Option<String>> {
|
||||||
Ok(match self.read_sys(path.as_ref())? {
|
Ok(match self.read_sys(path.as_ref())? {
|
||||||
Some(data) => Some(String::from_utf8(data).map_err(io_err_other)?),
|
Some(data) => Some(String::from_utf8(data).map_err(io_err_other)?),
|
||||||
None => None,
|
None => None,
|
||||||
@ -277,7 +277,7 @@ impl Disk {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Convenience wrapper for unsigned integer `/sys` values up to 64 bit.
|
/// Convenience wrapper for unsigned integer `/sys` values up to 64 bit.
|
||||||
fn read_sys_u64<P: AsRef<Path>>(&self, path: P) -> io::Result<Option<u64>> {
|
pub fn read_sys_u64<P: AsRef<Path>>(&self, path: P) -> io::Result<Option<u64>> {
|
||||||
Ok(match self.read_sys_str(path)? {
|
Ok(match self.read_sys_str(path)? {
|
||||||
Some(data) => Some(data.trim().parse().map_err(io_err_other)?),
|
Some(data) => Some(data.trim().parse().map_err(io_err_other)?),
|
||||||
None => None,
|
None => None,
|
||||||
@ -287,7 +287,7 @@ impl Disk {
|
|||||||
/// Get the disk's size in bytes.
|
/// Get the disk's size in bytes.
|
||||||
pub fn size(&self) -> io::Result<u64> {
|
pub fn size(&self) -> io::Result<u64> {
|
||||||
Ok(*self.info.size.get_or_try_init(|| {
|
Ok(*self.info.size.get_or_try_init(|| {
|
||||||
self.read_sys_u64("size")?.ok_or_else(|| {
|
self.read_sys_u64("size")?.map(|s| s*512).ok_or_else(|| {
|
||||||
io_format_err!(
|
io_format_err!(
|
||||||
"failed to get disk size from {:?}",
|
"failed to get disk size from {:?}",
|
||||||
self.syspath().join("size"),
|
self.syspath().join("size"),
|
||||||
@ -400,8 +400,9 @@ impl Disk {
|
|||||||
/// Attempt to guess the disk type.
|
/// Attempt to guess the disk type.
|
||||||
pub fn guess_disk_type(&self) -> io::Result<DiskType> {
|
pub fn guess_disk_type(&self) -> io::Result<DiskType> {
|
||||||
Ok(match self.rotational()? {
|
Ok(match self.rotational()? {
|
||||||
|
Some(false) => DiskType::Ssd,
|
||||||
Some(true) => DiskType::Hdd,
|
Some(true) => DiskType::Hdd,
|
||||||
_ => match self.ata_rotation_rate_rpm() {
|
None => match self.ata_rotation_rate_rpm() {
|
||||||
Some(_) => DiskType::Hdd,
|
Some(_) => DiskType::Hdd,
|
||||||
None => match self.bus() {
|
None => match self.bus() {
|
||||||
Some(bus) if bus == "usb" => DiskType::Usb,
|
Some(bus) if bus == "usb" => DiskType::Usb,
|
||||||
@ -430,17 +431,19 @@ impl Disk {
|
|||||||
/// another kernel driver like the device mapper.
|
/// another kernel driver like the device mapper.
|
||||||
pub fn has_holders(&self) -> io::Result<bool> {
|
pub fn has_holders(&self) -> io::Result<bool> {
|
||||||
Ok(*self
|
Ok(*self
|
||||||
.info
|
.info
|
||||||
.has_holders
|
.has_holders
|
||||||
.get_or_try_init(|| -> io::Result<bool> {
|
.get_or_try_init(|| -> io::Result<bool> {
|
||||||
for entry in std::fs::read_dir(self.syspath())? {
|
let mut subdir = self.syspath().to_owned();
|
||||||
match entry?.file_name().as_bytes() {
|
subdir.push("holders");
|
||||||
b"." | b".." => (),
|
for entry in std::fs::read_dir(subdir)? {
|
||||||
_ => return Ok(true),
|
match entry?.file_name().as_bytes() {
|
||||||
}
|
b"." | b".." => (),
|
||||||
}
|
_ => return Ok(true),
|
||||||
Ok(false)
|
}
|
||||||
})?)
|
}
|
||||||
|
Ok(false)
|
||||||
|
})?)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if this disk is mounted.
|
/// Check if this disk is mounted.
|
||||||
@ -473,10 +476,40 @@ impl Disk {
|
|||||||
}
|
}
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// List device partitions
|
||||||
|
pub fn partitions(&self) -> Result<HashMap<u64, Disk>, Error> {
|
||||||
|
|
||||||
|
let sys_path = self.syspath();
|
||||||
|
let device = self.sysname().to_string_lossy().to_string();
|
||||||
|
|
||||||
|
let mut map = HashMap::new();
|
||||||
|
|
||||||
|
for item in crate::tools::fs::read_subdir(libc::AT_FDCWD, sys_path)? {
|
||||||
|
let item = item?;
|
||||||
|
let name = match item.file_name().to_str() {
|
||||||
|
Ok(name) => name,
|
||||||
|
Err(_) => continue, // skip non utf8 entries
|
||||||
|
};
|
||||||
|
|
||||||
|
if !name.starts_with(&device) { continue; }
|
||||||
|
|
||||||
|
let mut part_path = sys_path.to_owned();
|
||||||
|
part_path.push(name);
|
||||||
|
|
||||||
|
let disk_part = self.manager.clone().disk_by_sys_path(&part_path)?;
|
||||||
|
|
||||||
|
if let Some(partition) = disk_part.read_sys_u64("partition")? {
|
||||||
|
map.insert(partition, disk_part);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(map)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns disk usage information (total, used, avail)
|
/// Returns disk usage information (total, used, avail)
|
||||||
pub fn disk_usage(path: &std::path::Path) -> Result<(u64, u64, u64), Error> {
|
pub fn disk_usage(path: &std::path::Path) -> Result<StorageStatus, Error> {
|
||||||
|
|
||||||
let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
|
let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
|
||||||
|
|
||||||
@ -487,9 +520,16 @@ pub fn disk_usage(path: &std::path::Path) -> Result<(u64, u64, u64), Error> {
|
|||||||
|
|
||||||
let bsize = stat.f_bsize as u64;
|
let bsize = stat.f_bsize as u64;
|
||||||
|
|
||||||
Ok((stat.f_blocks*bsize, (stat.f_blocks-stat.f_bfree)*bsize, stat.f_bavail*bsize))
|
Ok(StorageStatus{
|
||||||
|
total: stat.f_blocks*bsize,
|
||||||
|
used: (stat.f_blocks-stat.f_bfree)*bsize,
|
||||||
|
avail: stat.f_bavail*bsize,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all="lowercase")]
|
||||||
/// This is just a rough estimate for a "type" of disk.
|
/// This is just a rough estimate for a "type" of disk.
|
||||||
pub enum DiskType {
|
pub enum DiskType {
|
||||||
/// We know nothing.
|
/// We know nothing.
|
||||||
@ -518,16 +558,10 @@ pub struct BlockDevStat {
|
|||||||
/// Use lsblk to read partition type uuids.
|
/// Use lsblk to read partition type uuids.
|
||||||
pub fn get_partition_type_info() -> Result<HashMap<String, Vec<String>>, Error> {
|
pub fn get_partition_type_info() -> Result<HashMap<String, Vec<String>>, Error> {
|
||||||
|
|
||||||
const LSBLK_BIN_PATH: &str = "/usr/bin/lsblk";
|
let mut command = std::process::Command::new("lsblk");
|
||||||
|
|
||||||
let mut command = std::process::Command::new(LSBLK_BIN_PATH);
|
|
||||||
command.args(&["--json", "-o", "path,parttype"]);
|
command.args(&["--json", "-o", "path,parttype"]);
|
||||||
|
|
||||||
let output = command.output()
|
let output = crate::tools::run_command(command, None)?;
|
||||||
.map_err(|err| format_err!("failed to execute '{}' - {}", LSBLK_BIN_PATH, err))?;
|
|
||||||
|
|
||||||
let output = crate::tools::command_output(output, None)
|
|
||||||
.map_err(|err| format_err!("lsblk command failed: {}", err))?;
|
|
||||||
|
|
||||||
let mut res: HashMap<String, Vec<String>> = HashMap::new();
|
let mut res: HashMap<String, Vec<String>> = HashMap::new();
|
||||||
|
|
||||||
@ -553,3 +587,412 @@ pub fn get_partition_type_info() -> Result<HashMap<String, Vec<String>>, Error>
|
|||||||
}
|
}
|
||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||||
|
#[serde(rename_all="lowercase")]
|
||||||
|
pub enum DiskUsageType {
|
||||||
|
/// Disk is not used (as far we can tell)
|
||||||
|
Unused,
|
||||||
|
/// Disk is mounted
|
||||||
|
Mounted,
|
||||||
|
/// Disk is used by LVM
|
||||||
|
LVM,
|
||||||
|
/// Disk is used by ZFS
|
||||||
|
ZFS,
|
||||||
|
/// Disk is used by device-mapper
|
||||||
|
DeviceMapper,
|
||||||
|
/// Disk has partitions
|
||||||
|
Partitions,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
used: {
|
||||||
|
type: DiskUsageType,
|
||||||
|
},
|
||||||
|
"disk-type": {
|
||||||
|
type: DiskType,
|
||||||
|
},
|
||||||
|
status: {
|
||||||
|
type: SmartStatus,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Information about how a Disk is used
|
||||||
|
pub struct DiskUsageInfo {
|
||||||
|
/// Disk name (/sys/block/<name>)
|
||||||
|
pub name: String,
|
||||||
|
pub used: DiskUsageType,
|
||||||
|
pub disk_type: DiskType,
|
||||||
|
pub status: SmartStatus,
|
||||||
|
/// Disk wearout
|
||||||
|
pub wearout: Option<f64>,
|
||||||
|
/// Vendor
|
||||||
|
pub vendor: Option<String>,
|
||||||
|
/// Model
|
||||||
|
pub model: Option<String>,
|
||||||
|
/// WWN
|
||||||
|
pub wwn: Option<String>,
|
||||||
|
/// Disk size
|
||||||
|
pub size: u64,
|
||||||
|
/// Serisal number
|
||||||
|
pub serial: Option<String>,
|
||||||
|
/// Linux device path (/dev/xxx)
|
||||||
|
pub devpath: Option<String>,
|
||||||
|
/// Set if disk contains a GPT partition table
|
||||||
|
pub gpt: bool,
|
||||||
|
/// RPM
|
||||||
|
pub rpm: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn scan_partitions(
|
||||||
|
disk_manager: Arc<DiskManage>,
|
||||||
|
lvm_devices: &HashSet<u64>,
|
||||||
|
zfs_devices: &HashSet<u64>,
|
||||||
|
device: &str,
|
||||||
|
) -> Result<DiskUsageType, Error> {
|
||||||
|
|
||||||
|
let mut sys_path = std::path::PathBuf::from("/sys/block");
|
||||||
|
sys_path.push(device);
|
||||||
|
|
||||||
|
let mut used = DiskUsageType::Unused;
|
||||||
|
|
||||||
|
let mut found_lvm = false;
|
||||||
|
let mut found_zfs = false;
|
||||||
|
let mut found_mountpoints = false;
|
||||||
|
let mut found_dm = false;
|
||||||
|
let mut found_partitions = false;
|
||||||
|
|
||||||
|
for item in crate::tools::fs::read_subdir(libc::AT_FDCWD, &sys_path)? {
|
||||||
|
let item = item?;
|
||||||
|
let name = match item.file_name().to_str() {
|
||||||
|
Ok(name) => name,
|
||||||
|
Err(_) => continue, // skip non utf8 entries
|
||||||
|
};
|
||||||
|
if !name.starts_with(device) { continue; }
|
||||||
|
|
||||||
|
found_partitions = true;
|
||||||
|
|
||||||
|
let mut part_path = sys_path.clone();
|
||||||
|
part_path.push(name);
|
||||||
|
|
||||||
|
let data = disk_manager.clone().disk_by_sys_path(&part_path)?;
|
||||||
|
|
||||||
|
let devnum = data.devnum()?;
|
||||||
|
|
||||||
|
if lvm_devices.contains(&devnum) {
|
||||||
|
found_lvm = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.is_mounted()? {
|
||||||
|
found_mountpoints = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.has_holders()? {
|
||||||
|
found_dm = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if zfs_devices.contains(&devnum) {
|
||||||
|
found_zfs = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if found_mountpoints {
|
||||||
|
used = DiskUsageType::Mounted;
|
||||||
|
} else if found_lvm {
|
||||||
|
used = DiskUsageType::LVM;
|
||||||
|
} else if found_zfs {
|
||||||
|
used = DiskUsageType::ZFS;
|
||||||
|
} else if found_dm {
|
||||||
|
used = DiskUsageType::DeviceMapper;
|
||||||
|
} else if found_partitions {
|
||||||
|
used = DiskUsageType::Partitions;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(used)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Get disk usage information for a single disk
|
||||||
|
pub fn get_disk_usage_info(
|
||||||
|
disk: &str,
|
||||||
|
no_smart: bool,
|
||||||
|
) -> Result<DiskUsageInfo, Error> {
|
||||||
|
let mut filter = Vec::new();
|
||||||
|
filter.push(disk.to_string());
|
||||||
|
let mut map = get_disks(Some(filter), no_smart)?;
|
||||||
|
if let Some(info) = map.remove(disk) {
|
||||||
|
return Ok(info);
|
||||||
|
} else {
|
||||||
|
bail!("failed to get disk usage info - internal error"); // should not happen
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get disk usage information for multiple disks
|
||||||
|
pub fn get_disks(
|
||||||
|
// filter - list of device names (without leading /dev)
|
||||||
|
disks: Option<Vec<String>>,
|
||||||
|
// do no include data from smartctl
|
||||||
|
no_smart: bool,
|
||||||
|
) -> Result<HashMap<String, DiskUsageInfo>, Error> {
|
||||||
|
|
||||||
|
let disk_manager = DiskManage::new();
|
||||||
|
|
||||||
|
let partition_type_map = get_partition_type_info()?;
|
||||||
|
|
||||||
|
let zfs_devices = zfs_devices(&partition_type_map, None)?;
|
||||||
|
|
||||||
|
let lvm_devices = get_lvm_devices(&partition_type_map)?;
|
||||||
|
|
||||||
|
// fixme: ceph journals/volumes
|
||||||
|
|
||||||
|
let mut result = HashMap::new();
|
||||||
|
|
||||||
|
for item in crate::tools::fs::scan_subdir(libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX)? {
|
||||||
|
let item = item?;
|
||||||
|
|
||||||
|
let name = item.file_name().to_str().unwrap().to_string();
|
||||||
|
|
||||||
|
if let Some(ref disks) = disks {
|
||||||
|
if !disks.contains(&name) { continue; }
|
||||||
|
}
|
||||||
|
|
||||||
|
let sys_path = format!("/sys/block/{}", name);
|
||||||
|
|
||||||
|
if let Ok(target) = std::fs::read_link(&sys_path) {
|
||||||
|
if let Some(target) = target.to_str() {
|
||||||
|
if ISCSI_PATH_REGEX.is_match(target) { continue; } // skip iSCSI devices
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let disk = disk_manager.clone().disk_by_sys_path(&sys_path)?;
|
||||||
|
|
||||||
|
let devnum = disk.devnum()?;
|
||||||
|
|
||||||
|
let size = match disk.size() {
|
||||||
|
Ok(size) => size,
|
||||||
|
Err(_) => continue, // skip devices with unreadable size
|
||||||
|
};
|
||||||
|
|
||||||
|
let disk_type = match disk.guess_disk_type() {
|
||||||
|
Ok(disk_type) => disk_type,
|
||||||
|
Err(_) => continue, // skip devices with undetectable type
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut usage = DiskUsageType::Unused;
|
||||||
|
|
||||||
|
if lvm_devices.contains(&devnum) {
|
||||||
|
usage = DiskUsageType::LVM;
|
||||||
|
}
|
||||||
|
|
||||||
|
match disk.is_mounted() {
|
||||||
|
Ok(true) => usage = DiskUsageType::Mounted,
|
||||||
|
Ok(false) => {},
|
||||||
|
Err(_) => continue, // skip devices with undetectable mount status
|
||||||
|
}
|
||||||
|
|
||||||
|
if zfs_devices.contains(&devnum) {
|
||||||
|
usage = DiskUsageType::ZFS;
|
||||||
|
}
|
||||||
|
|
||||||
|
let vendor = disk.vendor().unwrap_or(None).
|
||||||
|
map(|s| s.to_string_lossy().trim().to_string());
|
||||||
|
|
||||||
|
let model = disk.model().map(|s| s.to_string_lossy().into_owned());
|
||||||
|
|
||||||
|
let serial = disk.serial().map(|s| s.to_string_lossy().into_owned());
|
||||||
|
|
||||||
|
let devpath = disk.device_path().map(|p| p.to_owned())
|
||||||
|
.map(|p| p.to_string_lossy().to_string());
|
||||||
|
|
||||||
|
|
||||||
|
let wwn = disk.wwn().map(|s| s.to_string_lossy().into_owned());
|
||||||
|
|
||||||
|
if usage != DiskUsageType::Mounted {
|
||||||
|
match scan_partitions(disk_manager.clone(), &lvm_devices, &zfs_devices, &name) {
|
||||||
|
Ok(part_usage) => {
|
||||||
|
if part_usage != DiskUsageType::Unused {
|
||||||
|
usage = part_usage;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(_) => continue, // skip devices if scan_partitions fail
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut status = SmartStatus::Unknown;
|
||||||
|
let mut wearout = None;
|
||||||
|
|
||||||
|
if !no_smart {
|
||||||
|
if let Ok(smart) = get_smart_data(&disk, false) {
|
||||||
|
status = smart.status;
|
||||||
|
wearout = smart.wearout;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let info = DiskUsageInfo {
|
||||||
|
name: name.clone(),
|
||||||
|
vendor, model, serial, devpath, size, wwn, disk_type,
|
||||||
|
status, wearout,
|
||||||
|
used: usage,
|
||||||
|
gpt: disk.has_gpt(),
|
||||||
|
rpm: disk.ata_rotation_rate_rpm(),
|
||||||
|
};
|
||||||
|
|
||||||
|
result.insert(name, info);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Try to reload the partition table
|
||||||
|
pub fn reread_partition_table(disk: &Disk) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let disk_path = match disk.device_path() {
|
||||||
|
Some(path) => path,
|
||||||
|
None => bail!("disk {:?} has no node in /dev", disk.syspath()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut command = std::process::Command::new("blockdev");
|
||||||
|
command.arg("--rereadpt");
|
||||||
|
command.arg(disk_path);
|
||||||
|
|
||||||
|
crate::tools::run_command(command, None)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize disk by writing a GPT partition table
|
||||||
|
pub fn inititialize_gpt_disk(disk: &Disk, uuid: Option<&str>) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let disk_path = match disk.device_path() {
|
||||||
|
Some(path) => path,
|
||||||
|
None => bail!("disk {:?} has no node in /dev", disk.syspath()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let uuid = uuid.unwrap_or("R"); // R .. random disk GUID
|
||||||
|
|
||||||
|
let mut command = std::process::Command::new("sgdisk");
|
||||||
|
command.arg(disk_path);
|
||||||
|
command.args(&["-U", uuid]);
|
||||||
|
|
||||||
|
crate::tools::run_command(command, None)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a single linux partition using the whole available space
|
||||||
|
pub fn create_single_linux_partition(disk: &Disk) -> Result<Disk, Error> {
|
||||||
|
|
||||||
|
let disk_path = match disk.device_path() {
|
||||||
|
Some(path) => path,
|
||||||
|
None => bail!("disk {:?} has no node in /dev", disk.syspath()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut command = std::process::Command::new("sgdisk");
|
||||||
|
command.args(&["-n1", "-t1:8300"]);
|
||||||
|
command.arg(disk_path);
|
||||||
|
|
||||||
|
crate::tools::run_command(command, None)?;
|
||||||
|
|
||||||
|
let mut partitions = disk.partitions()?;
|
||||||
|
|
||||||
|
match partitions.remove(&1) {
|
||||||
|
Some(partition) => Ok(partition),
|
||||||
|
None => bail!("unable to lookup device partition"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq)]
|
||||||
|
#[serde(rename_all="lowercase")]
|
||||||
|
pub enum FileSystemType {
|
||||||
|
/// Linux Ext4
|
||||||
|
Ext4,
|
||||||
|
/// XFS
|
||||||
|
Xfs,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for FileSystemType {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
let text = match self {
|
||||||
|
FileSystemType::Ext4 => "ext4",
|
||||||
|
FileSystemType::Xfs => "xfs",
|
||||||
|
};
|
||||||
|
write!(f, "{}", text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::str::FromStr for FileSystemType {
|
||||||
|
type Err = serde_json::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
use serde::de::IntoDeserializer;
|
||||||
|
Self::deserialize(s.into_deserializer())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a file system on a disk or disk partition
|
||||||
|
pub fn create_file_system(disk: &Disk, fs_type: FileSystemType) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let disk_path = match disk.device_path() {
|
||||||
|
Some(path) => path,
|
||||||
|
None => bail!("disk {:?} has no node in /dev", disk.syspath()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let fs_type = fs_type.to_string();
|
||||||
|
|
||||||
|
let mut command = std::process::Command::new("mkfs");
|
||||||
|
command.args(&["-t", &fs_type]);
|
||||||
|
command.arg(disk_path);
|
||||||
|
|
||||||
|
crate::tools::run_command(command, None)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Block device name completion helper
|
||||||
|
pub fn complete_disk_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||||
|
let mut list = Vec::new();
|
||||||
|
|
||||||
|
let dir = match crate::tools::fs::scan_subdir(libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX) {
|
||||||
|
Ok(dir) => dir,
|
||||||
|
Err(_) => return list,
|
||||||
|
};
|
||||||
|
|
||||||
|
for item in dir {
|
||||||
|
if let Ok(item) = item {
|
||||||
|
let name = item.file_name().to_str().unwrap().to_string();
|
||||||
|
list.push(name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
list
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read the FS UUID (parse blkid output)
|
||||||
|
///
|
||||||
|
/// Note: Calling blkid is more reliable than using the udev ID_FS_UUID property.
|
||||||
|
pub fn get_fs_uuid(disk: &Disk) -> Result<String, Error> {
|
||||||
|
|
||||||
|
let disk_path = match disk.device_path() {
|
||||||
|
Some(path) => path,
|
||||||
|
None => bail!("disk {:?} has no node in /dev", disk.syspath()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut command = std::process::Command::new("blkid");
|
||||||
|
command.args(&["-o", "export"]);
|
||||||
|
command.arg(disk_path);
|
||||||
|
|
||||||
|
let output = crate::tools::run_command(command, None)?;
|
||||||
|
|
||||||
|
for line in output.lines() {
|
||||||
|
if line.starts_with("UUID=") {
|
||||||
|
return Ok(line[5..].to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bail!("get_fs_uuid failed - missing UUID");
|
||||||
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use std::collections::{HashSet, HashMap};
|
use std::collections::{HashSet, HashMap};
|
||||||
|
use std::os::unix::fs::MetadataExt;
|
||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
|
|
||||||
@ -12,29 +13,28 @@ lazy_static!{
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get list of devices used by LVM (pvs).
|
/// Get set of devices used by LVM (pvs).
|
||||||
|
///
|
||||||
|
/// The set is indexed by using the unix raw device number (dev_t is u64)
|
||||||
pub fn get_lvm_devices(
|
pub fn get_lvm_devices(
|
||||||
partition_type_map: &HashMap<String, Vec<String>>,
|
partition_type_map: &HashMap<String, Vec<String>>,
|
||||||
) -> Result<HashSet<String>, Error> {
|
) -> Result<HashSet<u64>, Error> {
|
||||||
|
|
||||||
const PVS_BIN_PATH: &str = "/sbin/pvs";
|
const PVS_BIN_PATH: &str = "pvs";
|
||||||
|
|
||||||
let mut command = std::process::Command::new(PVS_BIN_PATH);
|
let mut command = std::process::Command::new(PVS_BIN_PATH);
|
||||||
command.args(&["--reportformat", "json", "--noheadings", "--readonly", "-o", "pv_name"]);
|
command.args(&["--reportformat", "json", "--noheadings", "--readonly", "-o", "pv_name"]);
|
||||||
|
|
||||||
let output = command.output()
|
let output = crate::tools::run_command(command, None)?;
|
||||||
.map_err(|err| format_err!("failed to execute '{}' - {}", PVS_BIN_PATH, err))?;
|
|
||||||
|
|
||||||
let output = crate::tools::command_output(output, None)
|
let mut device_set: HashSet<u64> = HashSet::new();
|
||||||
.map_err(|err| format_err!("pvs command failed: {}", err))?;
|
|
||||||
|
|
||||||
let mut device_set: HashSet<String> = HashSet::new();
|
|
||||||
|
|
||||||
for device_list in partition_type_map.iter()
|
for device_list in partition_type_map.iter()
|
||||||
.filter_map(|(uuid, list)| if LVM_UUIDS.contains(uuid.as_str()) { Some(list) } else { None })
|
.filter_map(|(uuid, list)| if LVM_UUIDS.contains(uuid.as_str()) { Some(list) } else { None })
|
||||||
{
|
{
|
||||||
for device in device_list {
|
for device in device_list {
|
||||||
device_set.insert(device.clone());
|
let meta = std::fs::metadata(device)?;
|
||||||
|
device_set.insert(meta.rdev());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -44,7 +44,8 @@ pub fn get_lvm_devices(
|
|||||||
Some(list) => {
|
Some(list) => {
|
||||||
for info in list {
|
for info in list {
|
||||||
if let Some(pv_name) = info["pv_name"].as_str() {
|
if let Some(pv_name) = info["pv_name"].as_str() {
|
||||||
device_set.insert(pv_name.to_string());
|
let meta = std::fs::metadata(pv_name)?;
|
||||||
|
device_set.insert(meta.rdev());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
212
src/tools/disks/smart.rs
Normal file
212
src/tools/disks/smart.rs
Normal file
@ -0,0 +1,212 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all="lowercase")]
|
||||||
|
/// SMART status
|
||||||
|
pub enum SmartStatus {
|
||||||
|
/// Smart tests passed - everything is OK
|
||||||
|
Passed,
|
||||||
|
/// Smart tests failed - disk has problems
|
||||||
|
Failed,
|
||||||
|
/// Unknown status
|
||||||
|
Unknown,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
/// SMART Attribute
|
||||||
|
pub struct SmartAttribute {
|
||||||
|
/// Attribute name
|
||||||
|
name: String,
|
||||||
|
/// Attribute raw value
|
||||||
|
value: String,
|
||||||
|
// the rest of the values is available for ATA type
|
||||||
|
/// ATA Attribute ID
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
id: Option<u64>,
|
||||||
|
/// ATA Flags
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
flags: Option<String>,
|
||||||
|
/// ATA normalized value (0..100)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
normalized: Option<f64>,
|
||||||
|
/// ATA worst
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
worst: Option<f64>,
|
||||||
|
/// ATA threshold
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
threshold: Option<f64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
status: {
|
||||||
|
type: SmartStatus,
|
||||||
|
},
|
||||||
|
wearout: {
|
||||||
|
description: "Wearout level.",
|
||||||
|
type: f64,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
attributes: {
|
||||||
|
description: "SMART attributes.",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: SmartAttribute,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
/// Data from smartctl
|
||||||
|
pub struct SmartData {
|
||||||
|
pub status: SmartStatus,
|
||||||
|
pub wearout: Option<f64>,
|
||||||
|
pub attributes: Vec<SmartAttribute>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read smartctl data for a disk (/dev/XXX).
|
||||||
|
pub fn get_smart_data(
|
||||||
|
disk: &super::Disk,
|
||||||
|
health_only: bool,
|
||||||
|
) -> Result<SmartData, Error> {
|
||||||
|
|
||||||
|
const SMARTCTL_BIN_PATH: &str = "smartctl";
|
||||||
|
|
||||||
|
let mut command = std::process::Command::new(SMARTCTL_BIN_PATH);
|
||||||
|
command.arg("-H");
|
||||||
|
if !health_only { command.args(&["-A", "-j"]); }
|
||||||
|
|
||||||
|
let disk_path = match disk.device_path() {
|
||||||
|
Some(path) => path,
|
||||||
|
None => bail!("disk {:?} has no node in /dev", disk.syspath()),
|
||||||
|
};
|
||||||
|
command.arg(disk_path);
|
||||||
|
|
||||||
|
let output = crate::tools::run_command(command, None)?;
|
||||||
|
|
||||||
|
let output: serde_json::Value = output.parse()?;
|
||||||
|
|
||||||
|
let mut wearout = None;
|
||||||
|
|
||||||
|
let mut attributes = Vec::new();
|
||||||
|
|
||||||
|
// ATA devices
|
||||||
|
if let Some(list) = output["ata_smart_attributes"]["table"].as_array() {
|
||||||
|
let wearout_id = lookup_vendor_wearout_id(disk);
|
||||||
|
for item in list {
|
||||||
|
let id = match item["id"].as_u64() {
|
||||||
|
Some(id) => id,
|
||||||
|
None => continue, // skip attributes without id
|
||||||
|
};
|
||||||
|
|
||||||
|
let name = match item["name"].as_str() {
|
||||||
|
Some(name) => name.to_string(),
|
||||||
|
None => continue, // skip attributes without name
|
||||||
|
};
|
||||||
|
|
||||||
|
let raw_value = match item["raw"]["string"].as_str() {
|
||||||
|
Some(value) => value.to_string(),
|
||||||
|
None => continue, // skip attributes without raw value
|
||||||
|
};
|
||||||
|
|
||||||
|
let flags = match item["flags"]["string"].as_str() {
|
||||||
|
Some(flags) => flags.to_string(),
|
||||||
|
None => continue, // skip attributes without flags
|
||||||
|
};
|
||||||
|
|
||||||
|
let normalized = match item["value"].as_f64() {
|
||||||
|
Some(v) => v,
|
||||||
|
None => continue, // skip attributes without normalize value
|
||||||
|
};
|
||||||
|
|
||||||
|
let worst = match item["worst"].as_f64() {
|
||||||
|
Some(v) => v,
|
||||||
|
None => continue, // skip attributes without worst entry
|
||||||
|
};
|
||||||
|
|
||||||
|
let threshold = match item["thresh"].as_f64() {
|
||||||
|
Some(v) => v,
|
||||||
|
None => continue, // skip attributes without threshold entry
|
||||||
|
};
|
||||||
|
|
||||||
|
if id == wearout_id {
|
||||||
|
wearout = Some(normalized);
|
||||||
|
}
|
||||||
|
|
||||||
|
attributes.push(SmartAttribute {
|
||||||
|
name,
|
||||||
|
value: raw_value,
|
||||||
|
id: Some(id),
|
||||||
|
flags: Some(flags),
|
||||||
|
normalized: Some(normalized),
|
||||||
|
worst: Some(worst),
|
||||||
|
threshold: Some(threshold),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NVME devices
|
||||||
|
if let Some(list) = output["nvme_smart_health_information_log"].as_object() {
|
||||||
|
for (name, value) in list {
|
||||||
|
if name == "percentage_used" {
|
||||||
|
// extract wearout from nvme text, allow for decimal values
|
||||||
|
if let Some(v) = value.as_f64() {
|
||||||
|
if v <= 100.0 {
|
||||||
|
wearout = Some(100.0 - v);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(value) = value.as_f64() {
|
||||||
|
attributes.push(SmartAttribute {
|
||||||
|
name: name.to_string(),
|
||||||
|
value: value.to_string(),
|
||||||
|
id: None,
|
||||||
|
flags: None,
|
||||||
|
normalized: None,
|
||||||
|
worst: None,
|
||||||
|
threshold: None,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let status = match output["smart_status"]["passed"].as_bool() {
|
||||||
|
None => SmartStatus::Unknown,
|
||||||
|
Some(true) => SmartStatus::Passed,
|
||||||
|
Some(false) => SmartStatus::Failed,
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
Ok(SmartData { status, wearout, attributes })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn lookup_vendor_wearout_id(disk: &super::Disk) -> u64 {
|
||||||
|
|
||||||
|
static VENDOR_MAP: &[(&str, u64)] = &[
|
||||||
|
("kingston", 231),
|
||||||
|
("samsung", 177),
|
||||||
|
("intel", 233),
|
||||||
|
("sandisk", 233),
|
||||||
|
("crucial", 202),
|
||||||
|
];
|
||||||
|
|
||||||
|
let result = 233; // default
|
||||||
|
let model = match disk.model() {
|
||||||
|
Some(model) => model.to_string_lossy().to_lowercase(),
|
||||||
|
None => return result,
|
||||||
|
};
|
||||||
|
|
||||||
|
for (vendor, attr_id) in VENDOR_MAP {
|
||||||
|
if model.contains(vendor) {
|
||||||
|
return *attr_id;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
@ -1,18 +1,10 @@
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
|
use std::os::unix::fs::MetadataExt;
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
|
|
||||||
use nom::{
|
|
||||||
error::VerboseError,
|
|
||||||
bytes::complete::{take_while, take_while1, take_till, take_till1},
|
|
||||||
combinator::{map_res, all_consuming, recognize},
|
|
||||||
sequence::{preceded, tuple},
|
|
||||||
character::complete::{space1, digit1, char, line_ending},
|
|
||||||
multi::{many0, many1},
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
lazy_static!{
|
lazy_static!{
|
||||||
@ -24,22 +16,6 @@ lazy_static!{
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
type IResult<I, O, E = VerboseError<I>> = Result<(I, O), nom::Err<E>>;
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct ZFSPoolUsage {
|
|
||||||
total: u64,
|
|
||||||
used: u64,
|
|
||||||
free: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct ZFSPoolStatus {
|
|
||||||
name: String,
|
|
||||||
usage: Option<ZFSPoolUsage>,
|
|
||||||
devices: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns kernel IO-stats for zfs pools
|
/// Returns kernel IO-stats for zfs pools
|
||||||
pub fn zfs_pool_stats(pool: &OsStr) -> Result<Option<BlockDevStat>, Error> {
|
pub fn zfs_pool_stats(pool: &OsStr) -> Result<Option<BlockDevStat>, Error> {
|
||||||
|
|
||||||
@ -80,117 +56,22 @@ pub fn zfs_pool_stats(pool: &OsStr) -> Result<Option<BlockDevStat>, Error> {
|
|||||||
Ok(Some(stat))
|
Ok(Some(stat))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Recognizes zero or more spaces and tabs (but not carage returns or line feeds)
|
|
||||||
fn multispace0(i: &str) -> IResult<&str, &str> {
|
|
||||||
take_while(|c| c == ' ' || c == '\t')(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Recognizes one or more spaces and tabs (but not carage returns or line feeds)
|
/// Get set of devices used by zfs (or a specific zfs pool)
|
||||||
fn multispace1(i: &str) -> IResult<&str, &str> {
|
|
||||||
take_while1(|c| c == ' ' || c == '\t')(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_optional_u64(i: &str) -> IResult<&str, Option<u64>> {
|
|
||||||
if i.starts_with('-') {
|
|
||||||
Ok((&i[1..], None))
|
|
||||||
} else {
|
|
||||||
let (i, value) = map_res(recognize(digit1), str::parse)(i)?;
|
|
||||||
Ok((i, Some(value)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_pool_device(i: &str) -> IResult<&str, String> {
|
|
||||||
let (i, (device, _, _rest)) = tuple((
|
|
||||||
preceded(multispace1, take_till1(|c| c == ' ' || c == '\t')),
|
|
||||||
multispace1,
|
|
||||||
preceded(take_till(|c| c == '\n'), char('\n')),
|
|
||||||
))(i)?;
|
|
||||||
|
|
||||||
Ok((i, device.to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_pool_header(i: &str) -> IResult<&str, ZFSPoolStatus> {
|
|
||||||
let (i, (text, total, used, free, _, _eol)) = tuple((
|
|
||||||
take_while1(|c| char::is_alphanumeric(c)),
|
|
||||||
preceded(multispace1, parse_optional_u64),
|
|
||||||
preceded(multispace1, parse_optional_u64),
|
|
||||||
preceded(multispace1, parse_optional_u64),
|
|
||||||
preceded(space1, take_till(|c| c == '\n')),
|
|
||||||
line_ending,
|
|
||||||
))(i)?;
|
|
||||||
|
|
||||||
let status = if let (Some(total), Some(used), Some(free)) = (total, used, free) {
|
|
||||||
ZFSPoolStatus {
|
|
||||||
name: text.into(),
|
|
||||||
usage: Some(ZFSPoolUsage { total, used, free }),
|
|
||||||
devices: Vec::new(),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ZFSPoolStatus {
|
|
||||||
name: text.into(), usage: None, devices: Vec::new(),
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok((i, status))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_pool_status(i: &str) -> IResult<&str, ZFSPoolStatus> {
|
|
||||||
|
|
||||||
let (i, mut stat) = parse_pool_header(i)?;
|
|
||||||
let (i, devices) = many1(parse_pool_device)(i)?;
|
|
||||||
|
|
||||||
for device_path in devices.into_iter().filter(|n| n.starts_with("/dev/")) {
|
|
||||||
stat.devices.push(device_path);
|
|
||||||
}
|
|
||||||
|
|
||||||
let (i, _) = many0(tuple((multispace0, char('\n'))))(i)?; // skip empty lines
|
|
||||||
|
|
||||||
Ok((i, stat))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse zpool list outout
|
|
||||||
///
|
///
|
||||||
/// Note: This does not reveal any details on how the pool uses the devices, because
|
/// The set is indexed by using the unix raw device number (dev_t is u64)
|
||||||
/// the zpool list output format is not really defined...
|
|
||||||
pub fn parse_zfs_list(i: &str) -> Result<Vec<ZFSPoolStatus>, Error> {
|
|
||||||
match all_consuming(many1(parse_pool_status))(i) {
|
|
||||||
Err(nom::Err::Error(err)) |
|
|
||||||
Err(nom::Err::Failure(err)) => {
|
|
||||||
bail!("unable to parse zfs list output - {}", nom::error::convert_error(i, err));
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
bail!("unable to parse calendar event: {}", err);
|
|
||||||
}
|
|
||||||
Ok((_, ce)) => Ok(ce),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// List devices used by zfs (or a specific zfs pool)
|
|
||||||
pub fn zfs_devices(
|
pub fn zfs_devices(
|
||||||
partition_type_map: &HashMap<String, Vec<String>>,
|
partition_type_map: &HashMap<String, Vec<String>>,
|
||||||
pool: Option<&OsStr>,
|
pool: Option<String>,
|
||||||
) -> Result<HashSet<String>, Error> {
|
) -> Result<HashSet<u64>, Error> {
|
||||||
|
|
||||||
// Note: zpools list output can include entries for 'special', 'cache' and 'logs'
|
let list = zpool_list(pool, true)?;
|
||||||
// and maybe other things.
|
|
||||||
|
|
||||||
let mut command = std::process::Command::new("/sbin/zpool");
|
|
||||||
command.args(&["list", "-H", "-v", "-p", "-P"]);
|
|
||||||
|
|
||||||
if let Some(pool) = pool { command.arg(pool); }
|
|
||||||
|
|
||||||
let output = command.output()
|
|
||||||
.map_err(|err| format_err!("failed to execute '/sbin/zpool' - {}", err))?;
|
|
||||||
|
|
||||||
let output = crate::tools::command_output(output, None)
|
|
||||||
.map_err(|err| format_err!("zpool list command failed: {}", err))?;
|
|
||||||
|
|
||||||
let list = parse_zfs_list(&output)?;
|
|
||||||
|
|
||||||
let mut device_set = HashSet::new();
|
let mut device_set = HashSet::new();
|
||||||
for entry in list {
|
for entry in list {
|
||||||
for device in entry.devices {
|
for device in entry.devices {
|
||||||
device_set.insert(device.clone());
|
let meta = std::fs::metadata(device)?;
|
||||||
|
device_set.insert(meta.rdev());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -198,9 +79,11 @@ pub fn zfs_devices(
|
|||||||
.filter_map(|(uuid, list)| if ZFS_UUIDS.contains(uuid.as_str()) { Some(list) } else { None })
|
.filter_map(|(uuid, list)| if ZFS_UUIDS.contains(uuid.as_str()) { Some(list) } else { None })
|
||||||
{
|
{
|
||||||
for device in device_list {
|
for device in device_list {
|
||||||
device_set.insert(device.clone());
|
let meta = std::fs::metadata(device)?;
|
||||||
|
device_set.insert(meta.rdev());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(device_set)
|
Ok(device_set)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user