Compare commits
268 Commits
Author | SHA1 | Date | |
---|---|---|---|
e9764238df | |||
26f499b17b | |||
cc7995ac40 | |||
43abba4b4f | |||
58f950c546 | |||
c426e65893 | |||
caea8d611f | |||
7d0754a6d2 | |||
5afa0755ea | |||
40b63186a6 | |||
8f6088c130 | |||
2162e2c15d | |||
0d5ab04a90 | |||
4059285649 | |||
2e079b8bf2 | |||
4ff2c9b832 | |||
a8e2940ff3 | |||
d5d5f2174e | |||
2311238450 | |||
2ea501ffdf | |||
4eb4e94918 | |||
817bcda848 | |||
f6de2c7359 | |||
3f0b9c10ec | |||
2b66abbfab | |||
402c8861d8 | |||
3f683799a8 | |||
573bcd9a92 | |||
90779237ae | |||
1f82f9b7b5 | |||
19b5c3c43e | |||
fe3e65c3ea | |||
fdaab0df4e | |||
b957aa81bd | |||
8ea00f6e49 | |||
4bd789b0fa | |||
2f050cf2ed | |||
e22f4882e7 | |||
c65bc99a41 | |||
355c055e81 | |||
c2009e5309 | |||
23f74c190e | |||
a6f8728339 | |||
c1769a749c | |||
facd9801cf | |||
21302088de | |||
8268c9d161 | |||
b91b7d9ffd | |||
6e1f0c138f | |||
8567c0d29c | |||
d33d8f4e6a | |||
5b1cfa01f1 | |||
05d18b907a | |||
e44fe0c9f5 | |||
4cf0ced950 | |||
98425309b0 | |||
7b1e26699d | |||
676b0fde49 | |||
60f9a6ea8f | |||
1090fd4424 | |||
92c3fd2e22 | |||
e3efaa1972 | |||
0cf2b6441e | |||
d6d3b353be | |||
a67f7d0a07 | |||
c8137518fe | |||
cbef49bf4f | |||
0b99e5aebc | |||
29c55e5fc4 | |||
f386f512d0 | |||
3ddb14889a | |||
00c2327564 | |||
d79926795a | |||
c08fac4d69 | |||
c40440092d | |||
dc2ef2b54f | |||
b28253d650 | |||
f28cfb322a | |||
3bbe291c51 | |||
42d19fdf69 | |||
215968e033 | |||
eddd1a1b9c | |||
d2ce211899 | |||
1cb46c6f65 | |||
5d88c3a1c8 | |||
07fb504943 | |||
f675c5e978 | |||
4e37d9ce67 | |||
e303077132 | |||
6ef9bb59eb | |||
eeaa2c212b | |||
4a3adc3de8 | |||
abdb976340 | |||
3b62116ce6 | |||
e005f953d9 | |||
1c090810f5 | |||
e181d2f6da | |||
16021f6ab7 | |||
ba694720fc | |||
bde8e243cf | |||
3352ee5656 | |||
b29cbc414d | |||
026dc1d11f | |||
9438aca6c9 | |||
547f0c97e4 | |||
177a2de992 | |||
0686b1f4db | |||
0727e56a06 | |||
2fd3d57490 | |||
3f851d1321 | |||
1aef491e24 | |||
d0eccae37d | |||
a34154d900 | |||
c2cc32b4dd | |||
46405fa35d | |||
66af7f51bc | |||
c72ccd4e33 | |||
902b2cc278 | |||
8ecd7c9c21 | |||
7f17f7444a | |||
fb5a066500 | |||
d19c96d507 | |||
929a13b357 | |||
36c65ee0b0 | |||
3378fd9fe5 | |||
58c51cf3d9 | |||
5509b199fb | |||
bb59df9134 | |||
2564b0834f | |||
9321bbd1f5 | |||
4264e52220 | |||
6988b29bdc | |||
98c54240e6 | |||
d30c192589 | |||
67908b47fa | |||
ac7513e368 | |||
fbbcd85839 | |||
7a6b549270 | |||
0196b9bf5b | |||
739a51459a | |||
195d7c90ce | |||
6f3146c08c | |||
4b12879289 | |||
20b3094bcb | |||
df528ee6fa | |||
57e50fb906 | |||
3136792c95 | |||
3d571d5509 | |||
8e6e18b77c | |||
4d16badf6f | |||
a609cf210e | |||
1498659b4e | |||
4482f3fe11 | |||
5d85847f91 | |||
476b4acadc | |||
cf1bd08131 | |||
ec8f042459 | |||
431cc7b185 | |||
e693818afc | |||
3d68536fc2 | |||
26e78a2efb | |||
5444fa940b | |||
d4f2397d4c | |||
fab2413741 | |||
669c137fec | |||
fc6047fcb1 | |||
3014088684 | |||
144006fade | |||
b9cf6ee797 | |||
cdde66d277 | |||
239e49f927 | |||
ae66873ce9 | |||
bda48e04da | |||
ba97479848 | |||
6cad8ce4ce | |||
34020b929e | |||
33070956af | |||
da84cc52f4 | |||
9825748e5e | |||
2179359f40 | |||
9bb161c881 | |||
297e600730 | |||
ed7b3a7de2 | |||
0f358204bd | |||
ca6124d5fa | |||
7eacdc765b | |||
c443f58b09 | |||
ab1092392f | |||
1e3d9b103d | |||
386990ba09 | |||
bc853b028f | |||
d406de299b | |||
dfb31de8f0 | |||
7c3aa258f8 | |||
044055062c | |||
2b388026f8 | |||
707974fdb3 | |||
9069debcd8 | |||
fa2bdc1309 | |||
8e40aa63c1 | |||
d2522b2db6 | |||
ce8e3de401 | |||
7fa2779559 | |||
042afd6e52 | |||
ff30caeaf8 | |||
553cd12ba6 | |||
de1e1a9d95 | |||
91960d6162 | |||
4c24a48eb3 | |||
484e761dab | |||
059b7a252e | |||
1278aeec36 | |||
e53a4c4577 | |||
98ad58fbd2 | |||
98bb3b9016 | |||
eb80aac288 | |||
c26aad405f | |||
f03a0e509e | |||
4c1e8855cc | |||
85a9a5b68c | |||
f856e0774e | |||
43ba913977 | |||
a720894ff0 | |||
a95a3fb893 | |||
620911b426 | |||
5c264c8d80 | |||
8d78589969 | |||
eed8a5ad79 | |||
538b9c1c27 | |||
55919bf141 | |||
456ad0c478 | |||
c76c7f8303 | |||
c48aa39f3b | |||
2d32fe2c04 | |||
dc155e9bd7 | |||
4e14781aec | |||
a595f0fee0 | |||
add5861e8d | |||
1610c45a86 | |||
b2387eaa45 | |||
96d65fbcd0 | |||
7cc3473a4e | |||
4856a21836 | |||
a0153b02c9 | |||
04b0ca8b59 | |||
86e432b0b8 | |||
f0ed6a218c | |||
709584719d | |||
d43f86f3f3 | |||
997d7e19fc | |||
c67b1fa72f | |||
268687ddf0 | |||
426c1e353b | |||
2888b27f4c | |||
f5d00373f3 | |||
934f5bb8ac | |||
9857472211 | |||
013fa7bbcb | |||
a8d7033cb2 | |||
04ad7bc436 | |||
77ebbefc1a | |||
750252ba2f | |||
dc58194ebe | |||
c6887a8a4d | |||
090decbe76 | |||
c32186595e | |||
947f45252d | |||
c94e1f655e |
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "0.2.1"
|
||||
version = "0.5.0"
|
||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3"
|
||||
@ -30,15 +30,20 @@ lazy_static = "1.4"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
nix = "0.16"
|
||||
num-traits = "0.2"
|
||||
once_cell = "1.3.1"
|
||||
openssl = "0.10"
|
||||
pam = "0.7"
|
||||
pam-sys = "0.5"
|
||||
percent-encoding = "2.1"
|
||||
pin-utils = "0.1.0"
|
||||
proxmox = { version = "0.1.38", features = [ "sortable-macro", "api-macro" ] }
|
||||
pathpatterns = "0.1.1"
|
||||
proxmox = { version = "0.1.41", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
||||
proxmox-fuse = "0.1.0"
|
||||
pxar = { version = "0.2.0", features = [ "tokio-io", "futures-io" ] }
|
||||
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
||||
regex = "1.2"
|
||||
rustyline = "6"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
|
2
TODO.rst
2
TODO.rst
@ -30,8 +30,6 @@ Chores:
|
||||
|
||||
* move tools/xattr.rs and tools/acl.rs to proxmox/sys/linux/
|
||||
|
||||
* recompute PXAR_ header types from strings: avoid using numbers from casync
|
||||
|
||||
* remove pbs-* systemd timers and services on package purge
|
||||
|
||||
|
||||
|
55
debian/changelog
vendored
55
debian/changelog
vendored
@ -1,3 +1,58 @@
|
||||
rust-proxmox-backup (0.5.0-1) unstable; urgency=medium
|
||||
|
||||
* partially revert commit 1f82f9b7b5d231da22a541432d5617cb303c0000
|
||||
|
||||
* ui: allow to Forget (delete) backup snapshots
|
||||
|
||||
* pxar: deal with files changing size during archiving
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 29 Jun 2020 13:00:54 +0200
|
||||
|
||||
rust-proxmox-backup (0.4.0-1) unstable; urgency=medium
|
||||
|
||||
* change api for incremental backups mode
|
||||
|
||||
* zfs disk management gui
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 26 Jun 2020 10:43:27 +0200
|
||||
|
||||
rust-proxmox-backup (0.3.0-1) unstable; urgency=medium
|
||||
|
||||
* support incremental backups mode
|
||||
|
||||
* new disk management
|
||||
|
||||
* single file restore for container backups
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 24 Jun 2020 10:12:57 +0200
|
||||
|
||||
rust-proxmox-backup (0.2.3-1) unstable; urgency=medium
|
||||
|
||||
* tools/systemd/time: fix compute_next_event for weekdays
|
||||
|
||||
* improve display of 'next run' for sync jobs
|
||||
|
||||
* fix csum calculation for images which do not have a 'chunk_size' aligned
|
||||
size
|
||||
|
||||
* add parser for zpool list output
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 04 Jun 2020 10:39:06 +0200
|
||||
|
||||
rust-proxmox-backup (0.2.2-1) unstable; urgency=medium
|
||||
|
||||
* proxmox-backup-client.rs: implement quiet flag
|
||||
|
||||
* client restore: don't add server file ending if already specified
|
||||
|
||||
* src/client/pull.rs: also download client.log.blob
|
||||
|
||||
* src/client/pull.rs: more verbose logging
|
||||
|
||||
* gui improvements
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 03 Jun 2020 10:37:12 +0200
|
||||
|
||||
rust-proxmox-backup (0.2.1-1) unstable; urgency=medium
|
||||
|
||||
* ui: move server RRD statistics to 'Server Status' panel
|
||||
|
28
debian/postinst
vendored
Normal file
28
debian/postinst
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
# modeled after dh_systemd_start output
|
||||
systemctl --system daemon-reload >/dev/null || true
|
||||
if [ -n "$2" ]; then
|
||||
_dh_action=try-reload-or-restart
|
||||
else
|
||||
_dh_action=start
|
||||
fi
|
||||
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
||||
;;
|
||||
|
||||
abort-upgrade|abort-remove|abort-deconfigure)
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "postinst called with unknown argument \`$1'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
10
debian/prerm
vendored
Normal file
10
debian/prerm
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
# modeled after dh_systemd_start output
|
||||
if [ -d /run/systemd/system ] && [ "$1" = remove ]; then
|
||||
deb-systemd-invoke stop 'proxmox-backup-banner.service' 'proxmox-backup-proxy.service' 'proxmox-backup.service' >/dev/null || true
|
||||
fi
|
6
debian/rules
vendored
6
debian/rules
vendored
@ -37,9 +37,9 @@ override_dh_auto_install:
|
||||
PROXY_USER=backup \
|
||||
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
|
||||
|
||||
override_dh_installinit:
|
||||
dh_installinit
|
||||
dh_installinit --name proxmox-backup-proxy
|
||||
override_dh_installsystemd:
|
||||
# note: we start/try-reload-restart services manually in postinst
|
||||
dh_installsystemd --no-start --no-restart-after-upgrade
|
||||
|
||||
# workaround https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=933541
|
||||
# TODO: remove once available (Debian 11 ?)
|
||||
|
@ -2,7 +2,7 @@
|
||||
Description=Proxmox Backup API Proxy Server
|
||||
Wants=network-online.target
|
||||
After=network.target
|
||||
Requires=proxmox-backup.service
|
||||
Wants=proxmox-backup.service
|
||||
After=proxmox-backup.service
|
||||
|
||||
[Service]
|
||||
|
@ -49,7 +49,7 @@ fn hello_command(
|
||||
}
|
||||
|
||||
#[api(input: { properties: {} })]
|
||||
/// Quit command. Exit the programm.
|
||||
/// Quit command. Exit the program.
|
||||
///
|
||||
/// Returns: nothing
|
||||
fn quit_command() -> Result<(), Error> {
|
@ -44,8 +44,8 @@ async fn run() -> Result<(), Error> {
|
||||
|
||||
let mut bytes = 0;
|
||||
for _ in 0..100 {
|
||||
let writer = DummyWriter { bytes: 0 };
|
||||
let writer = client.speedtest(writer).await?;
|
||||
let mut writer = DummyWriter { bytes: 0 };
|
||||
client.speedtest(&mut writer).await?;
|
||||
println!("Received {} bytes", writer.bytes);
|
||||
bytes += writer.bytes;
|
||||
}
|
||||
@ -59,8 +59,7 @@ async fn run() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
fn main() {
|
||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||
eprintln!("ERROR: {}", err);
|
||||
}
|
@ -16,7 +16,7 @@ use std::io::Write;
|
||||
// tar: dyntest1/testfile7.dat: File shrank by 2833252864 bytes; padding with zeros
|
||||
|
||||
// # pxar create test.pxar ./dyntest1/
|
||||
// Error: detected shrinked file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
|
||||
// Error: detected shrunk file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
|
||||
|
||||
fn create_large_file(path: PathBuf) {
|
||||
|
@ -17,7 +17,7 @@ async fn upload_speed() -> Result<usize, Error> {
|
||||
|
||||
let backup_time = chrono::Utc::now();
|
||||
|
||||
let client = BackupWriter::start(client, datastore, "host", "speedtest", backup_time, false).await?;
|
||||
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false).await?;
|
||||
|
||||
println!("start upload speed test");
|
||||
let res = client.upload_speedtest().await?;
|
@ -5,9 +5,11 @@ pub mod config;
|
||||
pub mod node;
|
||||
pub mod reader;
|
||||
mod subscription;
|
||||
pub mod status;
|
||||
pub mod types;
|
||||
pub mod version;
|
||||
pub mod pull;
|
||||
mod helpers;
|
||||
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::Router;
|
||||
@ -23,6 +25,7 @@ pub const SUBDIRS: SubdirMap = &[
|
||||
("nodes", &NODES_ROUTER),
|
||||
("pull", &pull::ROUTER),
|
||||
("reader", &reader::ROUTER),
|
||||
("status", &status::ROUTER),
|
||||
("subscription", &subscription::ROUTER),
|
||||
("version", &version::ROUTER),
|
||||
];
|
||||
|
@ -2,9 +2,11 @@ use proxmox::api::router::{Router, SubdirMap};
|
||||
use proxmox::list_subdirs_api_method;
|
||||
|
||||
pub mod datastore;
|
||||
pub mod sync;
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("datastore", &datastore::ROUTER)
|
||||
("datastore", &datastore::ROUTER),
|
||||
("sync", &sync::ROUTER)
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
|
@ -1,8 +1,8 @@
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
use chrono::{TimeZone, Local};
|
||||
use anyhow::{bail, Error};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use hyper::http::request::Parts;
|
||||
use hyper::{header, Body, Response, StatusCode};
|
||||
@ -13,17 +13,21 @@ use proxmox::api::{
|
||||
RpcEnvironment, RpcEnvironmentType, Permission, UserInformation};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
use proxmox::try_block;
|
||||
use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
|
||||
|
||||
use pxar::accessor::aio::Accessor;
|
||||
use pxar::EntryKind;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::api2::node::rrd::create_value_from_rrd;
|
||||
use crate::backup::*;
|
||||
use crate::config::datastore;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
use crate::tools;
|
||||
use crate::tools::{self, AsyncReaderStream, WrappedReaderStream};
|
||||
use crate::config::acl::{
|
||||
PRIV_DATASTORE_AUDIT,
|
||||
PRIV_DATASTORE_MODIFY,
|
||||
@ -42,32 +46,45 @@ fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> R
|
||||
|
||||
fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
|
||||
|
||||
let mut path = store.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push("index.json.blob");
|
||||
|
||||
let raw_data = file_get_contents(&path)?;
|
||||
let index_size = raw_data.len() as u64;
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
|
||||
let manifest = BackupManifest::try_from(blob)?;
|
||||
let (manifest, index_size) = store.load_manifest(backup_dir)?;
|
||||
|
||||
let mut result = Vec::new();
|
||||
for item in manifest.files() {
|
||||
result.push(BackupContent {
|
||||
filename: item.filename.clone(),
|
||||
encrypted: item.encrypted,
|
||||
size: Some(item.size),
|
||||
});
|
||||
}
|
||||
|
||||
result.push(BackupContent {
|
||||
filename: "index.json.blob".to_string(),
|
||||
filename: MANIFEST_BLOB_NAME.to_string(),
|
||||
encrypted: Some(false),
|
||||
size: Some(index_size),
|
||||
});
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn get_all_snapshot_files(
|
||||
store: &DataStore,
|
||||
info: &BackupInfo,
|
||||
) -> Result<Vec<BackupContent>, Error> {
|
||||
let mut files = read_backup_index(&store, &info.backup_dir)?;
|
||||
|
||||
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
|
||||
acc.insert(item.filename.clone());
|
||||
acc
|
||||
});
|
||||
|
||||
for file in &info.files {
|
||||
if file_set.contains(file) { continue; }
|
||||
files.push(BackupContent { filename: file.to_string(), size: None, encrypted: None });
|
||||
}
|
||||
|
||||
Ok(files)
|
||||
}
|
||||
|
||||
fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
|
||||
|
||||
let mut group_hash = HashMap::new();
|
||||
@ -130,8 +147,8 @@ fn list_groups(
|
||||
let group = info.backup_dir.group();
|
||||
|
||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||
let owner = datastore.get_owner(group)?;
|
||||
if !list_all {
|
||||
let owner = datastore.get_owner(group)?;
|
||||
if owner != username { continue; }
|
||||
}
|
||||
|
||||
@ -141,6 +158,7 @@ fn list_groups(
|
||||
last_backup: info.backup_dir.backup_time().timestamp(),
|
||||
backup_count: list.len() as u64,
|
||||
files: info.files.clone(),
|
||||
owner: Some(owner),
|
||||
};
|
||||
groups.push(result_item);
|
||||
}
|
||||
@ -200,21 +218,9 @@ pub fn list_snapshot_files(
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
|
||||
|
||||
let mut files = read_backup_index(&datastore, &snapshot)?;
|
||||
|
||||
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
|
||||
|
||||
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
|
||||
acc.insert(item.filename.clone());
|
||||
acc
|
||||
});
|
||||
|
||||
for file in info.files {
|
||||
if file_set.contains(&file) { continue; }
|
||||
files.push(BackupContent { filename: file, size: None });
|
||||
}
|
||||
|
||||
Ok(files)
|
||||
get_all_snapshot_files(&datastore, &info)
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -329,29 +335,34 @@ pub fn list_snapshots (
|
||||
}
|
||||
|
||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||
let owner = datastore.get_owner(group)?;
|
||||
|
||||
if !list_all {
|
||||
let owner = datastore.get_owner(group)?;
|
||||
if owner != username { continue; }
|
||||
}
|
||||
|
||||
let mut result_item = SnapshotListItem {
|
||||
let mut size = None;
|
||||
|
||||
let files = match get_all_snapshot_files(&datastore, &info) {
|
||||
Ok(files) => {
|
||||
size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
|
||||
files
|
||||
},
|
||||
Err(err) => {
|
||||
eprintln!("error during snapshot file listing: '{}'", err);
|
||||
info.files.iter().map(|x| BackupContent { filename: x.to_string(), size: None, encrypted: None }).collect()
|
||||
},
|
||||
};
|
||||
|
||||
let result_item = SnapshotListItem {
|
||||
backup_type: group.backup_type().to_string(),
|
||||
backup_id: group.backup_id().to_string(),
|
||||
backup_time: info.backup_dir.backup_time().timestamp(),
|
||||
files: info.files,
|
||||
size: None,
|
||||
files,
|
||||
size,
|
||||
owner: Some(owner),
|
||||
};
|
||||
|
||||
if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) {
|
||||
let mut backup_size = 0;
|
||||
for item in index.iter() {
|
||||
if let Some(item_size) = item.size {
|
||||
backup_size += item_size;
|
||||
}
|
||||
}
|
||||
result_item.size = Some(backup_size);
|
||||
}
|
||||
|
||||
snapshots.push(result_item);
|
||||
}
|
||||
|
||||
@ -379,25 +390,92 @@ pub fn status(
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<StorageStatus, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
crate::tools::disks::disk_usage(&datastore.base_path())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
|
||||
},
|
||||
)]
|
||||
/// Verify backups.
|
||||
///
|
||||
/// This function can verify a single backup snapshot, all backup from a backup group,
|
||||
/// or all backups in the datastore.
|
||||
pub fn verify(
|
||||
store: String,
|
||||
backup_type: Option<String>,
|
||||
backup_id: Option<String>,
|
||||
backup_time: Option<i64>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let base_path = datastore.base_path();
|
||||
let worker_id;
|
||||
|
||||
let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
|
||||
let mut backup_dir = None;
|
||||
let mut backup_group = None;
|
||||
|
||||
use nix::NixPath;
|
||||
match (backup_type, backup_id, backup_time) {
|
||||
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
|
||||
worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
|
||||
let dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
backup_dir = Some(dir);
|
||||
}
|
||||
(Some(backup_type), Some(backup_id), None) => {
|
||||
worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
|
||||
let group = BackupGroup::new(backup_type, backup_id);
|
||||
backup_group = Some(group);
|
||||
}
|
||||
(None, None, None) => {
|
||||
worker_id = store.clone();
|
||||
}
|
||||
_ => bail!("parameters do not spefify a backup group or snapshot"),
|
||||
}
|
||||
|
||||
let res = base_path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
|
||||
nix::errno::Errno::result(res)?;
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let bsize = stat.f_bsize as u64;
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"verify", Some(worker_id.clone()), &username, to_stdout, move |worker|
|
||||
{
|
||||
let success = if let Some(backup_dir) = backup_dir {
|
||||
verify_backup_dir(&datastore, &backup_dir, &worker)?
|
||||
} else if let Some(backup_group) = backup_group {
|
||||
verify_backup_group(&datastore, &backup_group, &worker)?
|
||||
} else {
|
||||
verify_all_backups(&datastore, &worker)?
|
||||
};
|
||||
if !success {
|
||||
bail!("verfication failed - please check the log for details");
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(StorageStatus {
|
||||
total: stat.f_blocks*bsize,
|
||||
used: (stat.f_blocks-stat.f_bfree)*bsize,
|
||||
avail: stat.f_bavail*bsize,
|
||||
})
|
||||
Ok(json!(upid_str))
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
@ -749,19 +827,22 @@ fn download_file(
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
|
||||
println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
|
||||
backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
|
||||
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(&file_name);
|
||||
|
||||
let file = tokio::fs::File::open(path)
|
||||
let file = tokio::fs::File::open(&path)
|
||||
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
|
||||
.await?;
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", &path, err);
|
||||
err
|
||||
});
|
||||
let body = Body::wrap_stream(payload);
|
||||
|
||||
// fixme: set other headers ?
|
||||
@ -773,6 +854,118 @@ fn download_file(
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&download_file_decoded),
|
||||
&ObjectSchema::new(
|
||||
"Download single decoded file from backup snapshot. Only works if it's not encrypted.",
|
||||
&sorted!([
|
||||
("store", false, &DATASTORE_SCHEMA),
|
||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||
("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
|
||||
]),
|
||||
)
|
||||
).access(None, &Permission::Privilege(
|
||||
&["datastore", "{store}"],
|
||||
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
|
||||
true)
|
||||
);
|
||||
|
||||
fn download_file_decoded(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
|
||||
async move {
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
|
||||
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
||||
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
|
||||
let files = read_backup_index(&datastore, &backup_dir)?;
|
||||
for file in files {
|
||||
if file.filename == file_name && file.encrypted == Some(true) {
|
||||
bail!("cannot decode '{}' - is encrypted", file_name);
|
||||
}
|
||||
}
|
||||
|
||||
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(&file_name);
|
||||
|
||||
let extension = file_name.rsplitn(2, '.').next().unwrap();
|
||||
|
||||
let body = match extension {
|
||||
"didx" => {
|
||||
let index = DynamicIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||
Body::wrap_stream(AsyncReaderStream::new(reader)
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
||||
err
|
||||
}))
|
||||
},
|
||||
"fidx" => {
|
||||
let index = FixedIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||
Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
||||
err
|
||||
}))
|
||||
},
|
||||
"blob" => {
|
||||
let file = std::fs::File::open(&path)
|
||||
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))?;
|
||||
|
||||
Body::wrap_stream(
|
||||
WrappedReaderStream::new(DataBlobReader::new(file, None)?)
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
||||
err
|
||||
})
|
||||
)
|
||||
},
|
||||
extension => {
|
||||
bail!("cannot download '{}' files", extension);
|
||||
},
|
||||
};
|
||||
|
||||
// fixme: set other headers ?
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||
.body(body)
|
||||
.unwrap())
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&upload_backup_log),
|
||||
@ -802,7 +995,7 @@ fn upload_backup_log(
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let file_name = "client.log.blob";
|
||||
let file_name = CLIENT_LOG_BLOB_NAME;
|
||||
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
@ -843,6 +1036,212 @@ fn upload_backup_log(
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
"filepath": {
|
||||
description: "Base64 encoded path.",
|
||||
type: String,
|
||||
}
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
|
||||
},
|
||||
)]
|
||||
/// Get the entries of the given path of the catalog
|
||||
fn catalog(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
filepath: String,
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(CATALOG_NAME);
|
||||
|
||||
let index = DynamicIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
|
||||
let mut catalog_reader = CatalogReader::new(reader);
|
||||
let mut current = catalog_reader.root()?;
|
||||
let mut components = vec![];
|
||||
|
||||
|
||||
if filepath != "root" {
|
||||
components = base64::decode(filepath)?;
|
||||
if components.len() > 0 && components[0] == '/' as u8 {
|
||||
components.remove(0);
|
||||
}
|
||||
for component in components.split(|c| *c == '/' as u8) {
|
||||
if let Some(entry) = catalog_reader.lookup(¤t, component)? {
|
||||
current = entry;
|
||||
} else {
|
||||
bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut res = Vec::new();
|
||||
|
||||
for direntry in catalog_reader.read_dir(¤t)? {
|
||||
let mut components = components.clone();
|
||||
components.push('/' as u8);
|
||||
components.extend(&direntry.name);
|
||||
let path = base64::encode(components);
|
||||
let text = String::from_utf8_lossy(&direntry.name);
|
||||
let mut entry = json!({
|
||||
"filepath": path,
|
||||
"text": text,
|
||||
"type": CatalogEntryType::from(&direntry.attr).to_string(),
|
||||
"leaf": true,
|
||||
});
|
||||
match direntry.attr {
|
||||
DirEntryAttribute::Directory { start: _ } => {
|
||||
entry["leaf"] = false.into();
|
||||
},
|
||||
DirEntryAttribute::File { size, mtime } => {
|
||||
entry["size"] = size.into();
|
||||
entry["mtime"] = mtime.into();
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
res.push(entry);
|
||||
}
|
||||
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&pxar_file_download),
|
||||
&ObjectSchema::new(
|
||||
"Download single file from pxar file of a bacup snapshot. Only works if it's not encrypted.",
|
||||
&sorted!([
|
||||
("store", false, &DATASTORE_SCHEMA),
|
||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||
("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
|
||||
]),
|
||||
)
|
||||
).access(None, &Permission::Privilege(
|
||||
&["datastore", "{store}"],
|
||||
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
|
||||
true)
|
||||
);
|
||||
|
||||
fn pxar_file_download(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
|
||||
async move {
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
|
||||
let filepath = tools::required_string_param(¶m, "filepath")?.to_owned();
|
||||
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
|
||||
let mut components = base64::decode(&filepath)?;
|
||||
if components.len() > 0 && components[0] == '/' as u8 {
|
||||
components.remove(0);
|
||||
}
|
||||
|
||||
let mut split = components.splitn(2, |c| *c == '/' as u8);
|
||||
let pxar_name = split.next().unwrap();
|
||||
let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
|
||||
|
||||
path.push(OsStr::from_bytes(&pxar_name));
|
||||
|
||||
let index = DynamicIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let archive_size = reader.archive_size();
|
||||
let reader = LocalDynamicReadAt::new(reader);
|
||||
|
||||
let decoder = Accessor::new(reader, archive_size).await?;
|
||||
let root = decoder.open_root().await?;
|
||||
let file = root
|
||||
.lookup(OsStr::from_bytes(file_path)).await?
|
||||
.ok_or(format_err!("error opening '{:?}'", file_path))?;
|
||||
|
||||
let file = match file.kind() {
|
||||
EntryKind::File { .. } => file,
|
||||
EntryKind::Hardlink(_) => {
|
||||
decoder.follow_hardlink(&file).await?
|
||||
},
|
||||
// TODO symlink
|
||||
other => bail!("cannot download file of type {:?}", other),
|
||||
};
|
||||
|
||||
let body = Body::wrap_stream(
|
||||
AsyncReaderStream::new(file.contents().await?)
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", filepath, err);
|
||||
err
|
||||
})
|
||||
);
|
||||
|
||||
// fixme: set other headers ?
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||
.body(body)
|
||||
.unwrap())
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -869,14 +1268,13 @@ fn get_rrd_stats(
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let rrd_dir = format!("datastore/{}", store);
|
||||
|
||||
crate::rrd::extract_data(
|
||||
&rrd_dir,
|
||||
create_value_from_rrd(
|
||||
&format!("datastore/{}", store),
|
||||
&[
|
||||
"total", "used",
|
||||
"read_ios", "read_bytes", "read_ticks",
|
||||
"write_ios", "write_bytes", "write_ticks",
|
||||
"read_ios", "read_bytes",
|
||||
"write_ios", "write_bytes",
|
||||
"io_ticks",
|
||||
],
|
||||
timeframe,
|
||||
cf,
|
||||
@ -885,11 +1283,21 @@ fn get_rrd_stats(
|
||||
|
||||
#[sortable]
|
||||
const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"catalog",
|
||||
&Router::new()
|
||||
.get(&API_METHOD_CATALOG)
|
||||
),
|
||||
(
|
||||
"download",
|
||||
&Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_FILE)
|
||||
),
|
||||
(
|
||||
"download-decoded",
|
||||
&Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_FILE_DECODED)
|
||||
),
|
||||
(
|
||||
"files",
|
||||
&Router::new()
|
||||
@ -911,6 +1319,11 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||
&Router::new()
|
||||
.post(&API_METHOD_PRUNE)
|
||||
),
|
||||
(
|
||||
"pxar-file-download",
|
||||
&Router::new()
|
||||
.download(&API_METHOD_PXAR_FILE_DOWNLOAD)
|
||||
),
|
||||
(
|
||||
"rrd",
|
||||
&Router::new()
|
||||
@ -932,6 +1345,11 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||
&Router::new()
|
||||
.upload(&API_METHOD_UPLOAD_BACKUP_LOG)
|
||||
),
|
||||
(
|
||||
"verify",
|
||||
&Router::new()
|
||||
.post(&API_METHOD_VERIFY)
|
||||
),
|
||||
];
|
||||
|
||||
const DATASTORE_INFO_ROUTER: Router = Router::new()
|
||||
|
130
src/api2/admin/sync.rs
Normal file
130
src/api2/admin/sync.rs
Normal file
@ -0,0 +1,130 @@
|
||||
use anyhow::{Error};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::{list_subdirs_api_method, sortable};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::api2::pull::{get_pull_parameters};
|
||||
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
|
||||
use crate::server::{self, TaskListInfo, WorkerTask};
|
||||
use crate::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {},
|
||||
},
|
||||
returns: {
|
||||
description: "List configured jobs and their status.",
|
||||
type: Array,
|
||||
items: { type: sync::SyncJobStatus },
|
||||
},
|
||||
)]
|
||||
/// List all sync jobs
|
||||
pub fn list_sync_jobs(
|
||||
_param: Value,
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<SyncJobStatus>, Error> {
|
||||
|
||||
let (config, digest) = sync::config()?;
|
||||
|
||||
let mut list: Vec<SyncJobStatus> = config.convert_to_typed_array("sync")?;
|
||||
|
||||
let mut last_tasks: HashMap<String, &TaskListInfo> = HashMap::new();
|
||||
let tasks = server::read_task_list()?;
|
||||
|
||||
for info in tasks.iter() {
|
||||
let worker_id = match &info.upid.worker_id {
|
||||
Some(id) => id,
|
||||
_ => { continue; },
|
||||
};
|
||||
if let Some(last) = last_tasks.get(worker_id) {
|
||||
if last.upid.starttime < info.upid.starttime {
|
||||
last_tasks.insert(worker_id.to_string(), &info);
|
||||
}
|
||||
} else {
|
||||
last_tasks.insert(worker_id.to_string(), &info);
|
||||
}
|
||||
}
|
||||
|
||||
for job in &mut list {
|
||||
let mut last = 0;
|
||||
if let Some(task) = last_tasks.get(&job.id) {
|
||||
job.last_run_upid = Some(task.upid_str.clone());
|
||||
if let Some((endtime, status)) = &task.state {
|
||||
job.last_run_state = Some(String::from(status));
|
||||
job.last_run_endtime = Some(*endtime);
|
||||
last = *endtime;
|
||||
}
|
||||
}
|
||||
|
||||
job.next_run = (|| -> Option<i64> {
|
||||
let schedule = job.schedule.as_ref()?;
|
||||
let event = parse_calendar_event(&schedule).ok()?;
|
||||
compute_next_event(&event, last, false).ok()
|
||||
})();
|
||||
}
|
||||
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
}
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Runs the sync jobs manually.
|
||||
async fn run_sync_job(
|
||||
id: String,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let (config, _digest) = sync::config()?;
|
||||
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let delete = sync_job.remove_vanished.unwrap_or(true);
|
||||
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
|
||||
|
||||
let upid_str = WorkerTask::spawn("syncjob", Some(id.clone()), &username.clone(), false, move |worker| async move {
|
||||
|
||||
worker.log(format!("sync job '{}' start", &id));
|
||||
|
||||
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, String::from("backup@pam")).await?;
|
||||
|
||||
worker.log(format!("sync job '{}' end", &id));
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const SYNC_INFO_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"run",
|
||||
&Router::new()
|
||||
.post(&API_METHOD_RUN_SYNC_JOB)
|
||||
),
|
||||
];
|
||||
|
||||
const SYNC_INFO_ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SYNC_INFO_SUBDIRS))
|
||||
.subdirs(SYNC_INFO_SUBDIRS);
|
||||
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_SYNC_JOBS)
|
||||
.match_all("id", &SYNC_INFO_ROUTER);
|
@ -10,7 +10,7 @@ use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironm
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
|
||||
use crate::tools::{self, WrappedReaderStream};
|
||||
use crate::tools;
|
||||
use crate::server::{WorkerTask, H2Service};
|
||||
use crate::backup::*;
|
||||
use crate::api2::types::*;
|
||||
@ -107,7 +107,7 @@ async move {
|
||||
}
|
||||
|
||||
let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
|
||||
if !is_new { bail!("backup directorty already exists."); }
|
||||
if !is_new { bail!("backup directory already exists."); }
|
||||
|
||||
WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| {
|
||||
let mut env = BackupEnvironment::new(
|
||||
@ -151,7 +151,7 @@ async move {
|
||||
|
||||
match (res, env.ensure_finished()) {
|
||||
(Ok(_), Ok(())) => {
|
||||
env.log("backup finished sucessfully");
|
||||
env.log("backup finished successfully");
|
||||
Ok(())
|
||||
},
|
||||
(Err(err), Ok(())) => {
|
||||
@ -199,7 +199,6 @@ pub const BACKUP_API_SUBDIRS: SubdirMap = &[
|
||||
),
|
||||
(
|
||||
"dynamic_index", &Router::new()
|
||||
.download(&API_METHOD_DYNAMIC_CHUNK_INDEX)
|
||||
.post(&API_METHOD_CREATE_DYNAMIC_INDEX)
|
||||
.put(&API_METHOD_DYNAMIC_APPEND)
|
||||
),
|
||||
@ -222,10 +221,13 @@ pub const BACKUP_API_SUBDIRS: SubdirMap = &[
|
||||
),
|
||||
(
|
||||
"fixed_index", &Router::new()
|
||||
.download(&API_METHOD_FIXED_CHUNK_INDEX)
|
||||
.post(&API_METHOD_CREATE_FIXED_INDEX)
|
||||
.put(&API_METHOD_FIXED_APPEND)
|
||||
),
|
||||
(
|
||||
"previous", &Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_PREVIOUS)
|
||||
),
|
||||
(
|
||||
"speedtest", &Router::new()
|
||||
.upload(&API_METHOD_UPLOAD_SPEEDTEST)
|
||||
@ -284,6 +286,8 @@ pub const API_METHOD_CREATE_FIXED_INDEX: ApiMethod = ApiMethod::new(
|
||||
.minimum(1)
|
||||
.schema()
|
||||
),
|
||||
("reuse-csum", true, &StringSchema::new("If set, compare last backup's \
|
||||
csum and reuse index for incremental backup if it matches.").schema()),
|
||||
]),
|
||||
)
|
||||
);
|
||||
@ -296,10 +300,9 @@ fn create_fixed_index(
|
||||
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
println!("PARAM: {:?}", param);
|
||||
|
||||
let name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||
let size = tools::required_integer_param(¶m, "size")? as usize;
|
||||
let reuse_csum = param["reuse-csum"].as_str();
|
||||
|
||||
let archive_name = name.clone();
|
||||
if !archive_name.ends_with(".fidx") {
|
||||
@ -307,12 +310,49 @@ fn create_fixed_index(
|
||||
}
|
||||
|
||||
let mut path = env.backup_dir.relative_path();
|
||||
path.push(archive_name);
|
||||
path.push(&archive_name);
|
||||
|
||||
let chunk_size = 4096*1024; // todo: ??
|
||||
|
||||
let index = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
|
||||
let wid = env.register_fixed_writer(index, name, size, chunk_size as u32)?;
|
||||
// do incremental backup if csum is set
|
||||
let mut reader = None;
|
||||
let mut incremental = false;
|
||||
if let Some(csum) = reuse_csum {
|
||||
incremental = true;
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => {
|
||||
bail!("cannot reuse index - no previous backup exists");
|
||||
}
|
||||
};
|
||||
|
||||
let mut last_path = last_backup.backup_dir.relative_path();
|
||||
last_path.push(&archive_name);
|
||||
|
||||
let index = match env.datastore.open_fixed_reader(last_path) {
|
||||
Ok(index) => index,
|
||||
Err(_) => {
|
||||
bail!("cannot reuse index - no previous backup exists for archive");
|
||||
}
|
||||
};
|
||||
|
||||
let (old_csum, _) = index.compute_csum();
|
||||
let old_csum = proxmox::tools::digest_to_hex(&old_csum);
|
||||
if old_csum != csum {
|
||||
bail!("expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
|
||||
csum, old_csum);
|
||||
}
|
||||
|
||||
reader = Some(index);
|
||||
}
|
||||
|
||||
let mut writer = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
|
||||
|
||||
if let Some(reader) = reader {
|
||||
writer.clone_data_from(&reader)?;
|
||||
}
|
||||
|
||||
let wid = env.register_fixed_writer(writer, name, size, chunk_size as u32, incremental)?;
|
||||
|
||||
env.log(format!("created new fixed index {} ({:?})", wid, path));
|
||||
|
||||
@ -378,7 +418,7 @@ fn dynamic_append (
|
||||
|
||||
env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
|
||||
|
||||
env.debug(format!("sucessfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||
env.debug(format!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
@ -443,7 +483,7 @@ fn fixed_append (
|
||||
|
||||
env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
|
||||
|
||||
env.debug(format!("sucessfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||
env.debug(format!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
@ -498,7 +538,7 @@ fn close_dynamic_index (
|
||||
|
||||
env.dynamic_writer_close(wid, chunk_count, size, csum)?;
|
||||
|
||||
env.log(format!("sucessfully closed dynamic index {}", wid));
|
||||
env.log(format!("successfully closed dynamic index {}", wid));
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
@ -520,15 +560,15 @@ pub const API_METHOD_CLOSE_FIXED_INDEX: ApiMethod = ApiMethod::new(
|
||||
(
|
||||
"chunk-count",
|
||||
false,
|
||||
&IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
|
||||
.minimum(1)
|
||||
&IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks. Ignored for incremental backups.")
|
||||
.minimum(0)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"size",
|
||||
false,
|
||||
&IntegerSchema::new("File size. This is used to verify that the server got all data.")
|
||||
.minimum(1)
|
||||
&IntegerSchema::new("File size. This is used to verify that the server got all data. Ignored for incremental backups.")
|
||||
.minimum(0)
|
||||
.schema()
|
||||
),
|
||||
("csum", false, &StringSchema::new("Digest list checksum.").schema()),
|
||||
@ -552,7 +592,7 @@ fn close_fixed_index (
|
||||
|
||||
env.fixed_writer_close(wid, chunk_count, size, csum)?;
|
||||
|
||||
env.log(format!("sucessfully closed fixed index {}", wid));
|
||||
env.log(format!("successfully closed fixed index {}", wid));
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
@ -566,26 +606,23 @@ fn finish_backup (
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
env.finish_backup()?;
|
||||
env.log("sucessfully finished backup");
|
||||
env.log("successfully finished backup");
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_DYNAMIC_CHUNK_INDEX: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&dynamic_chunk_index),
|
||||
pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&download_previous),
|
||||
&ObjectSchema::new(
|
||||
r###"
|
||||
Download the dynamic chunk index from the previous backup.
|
||||
Simply returns an empty list if this is the first backup.
|
||||
"### ,
|
||||
"Download archive from previous backup.",
|
||||
&sorted!([
|
||||
("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA)
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
fn dynamic_chunk_index(
|
||||
fn download_previous(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
@ -598,130 +635,38 @@ fn dynamic_chunk_index(
|
||||
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||
|
||||
if !archive_name.ends_with(".didx") {
|
||||
bail!("wrong archive extension: '{}'", archive_name);
|
||||
}
|
||||
|
||||
let empty_response = {
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::empty())?
|
||||
};
|
||||
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => return Ok(empty_response),
|
||||
None => bail!("no previous backup"),
|
||||
};
|
||||
|
||||
let mut path = last_backup.backup_dir.relative_path();
|
||||
let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);
|
||||
path.push(&archive_name);
|
||||
|
||||
let index = match env.datastore.open_dynamic_reader(path) {
|
||||
Ok(index) => index,
|
||||
Err(_) => {
|
||||
env.log(format!("there is no last backup for archive '{}'", archive_name));
|
||||
return Ok(empty_response);
|
||||
{
|
||||
let index: Option<Box<dyn IndexFile>> = match archive_type(&archive_name)? {
|
||||
ArchiveType::FixedIndex => {
|
||||
let index = env.datastore.open_fixed_reader(&path)?;
|
||||
Some(Box::new(index))
|
||||
}
|
||||
ArchiveType::DynamicIndex => {
|
||||
let index = env.datastore.open_dynamic_reader(&path)?;
|
||||
Some(Box::new(index))
|
||||
}
|
||||
_ => { None }
|
||||
};
|
||||
if let Some(index) = index {
|
||||
env.log(format!("register chunks in '{}' from previous backup.", archive_name));
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let size = info.range.end - info.range.start;
|
||||
env.register_chunk(info.digest, size as u32)?;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
env.log(format!("download last backup index for archive '{}'", archive_name));
|
||||
|
||||
let count = index.index_count();
|
||||
for pos in 0..count {
|
||||
let (start, end, digest) = index.chunk_info(pos)?;
|
||||
let size = (end - start) as u32;
|
||||
env.register_chunk(digest, size)?;
|
||||
}
|
||||
|
||||
let reader = DigestListEncoder::new(Box::new(index));
|
||||
|
||||
let stream = WrappedReaderStream::new(reader);
|
||||
|
||||
// fixme: set size, content type?
|
||||
let response = http::Response::builder()
|
||||
.status(200)
|
||||
.body(Body::wrap_stream(stream))?;
|
||||
|
||||
Ok(response)
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_FIXED_CHUNK_INDEX: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&fixed_chunk_index),
|
||||
&ObjectSchema::new(
|
||||
r###"
|
||||
Download the fixed chunk index from the previous backup.
|
||||
Simply returns an empty list if this is the first backup.
|
||||
"### ,
|
||||
&sorted!([
|
||||
("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA)
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
fn fixed_chunk_index(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
|
||||
async move {
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||
|
||||
if !archive_name.ends_with(".fidx") {
|
||||
bail!("wrong archive extension: '{}'", archive_name);
|
||||
}
|
||||
|
||||
let empty_response = {
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::empty())?
|
||||
};
|
||||
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => return Ok(empty_response),
|
||||
};
|
||||
|
||||
let mut path = last_backup.backup_dir.relative_path();
|
||||
path.push(&archive_name);
|
||||
|
||||
let index = match env.datastore.open_fixed_reader(path) {
|
||||
Ok(index) => index,
|
||||
Err(_) => {
|
||||
env.log(format!("there is no last backup for archive '{}'", archive_name));
|
||||
return Ok(empty_response);
|
||||
}
|
||||
};
|
||||
|
||||
env.log(format!("download last backup index for archive '{}'", archive_name));
|
||||
|
||||
let count = index.index_count();
|
||||
let image_size = index.index_bytes();
|
||||
for pos in 0..count {
|
||||
let digest = index.index_digest(pos).unwrap();
|
||||
// Note: last chunk can be smaller
|
||||
let start = (pos*index.chunk_size) as u64;
|
||||
let mut end = start + index.chunk_size as u64;
|
||||
if end > image_size { end = image_size; }
|
||||
let size = (end - start) as u32;
|
||||
env.register_chunk(*digest, size)?;
|
||||
}
|
||||
|
||||
let reader = DigestListEncoder::new(Box::new(index));
|
||||
|
||||
let stream = WrappedReaderStream::new(reader);
|
||||
|
||||
// fixme: set size, content type?
|
||||
let response = http::Response::builder()
|
||||
.status(200)
|
||||
.body(Body::wrap_stream(stream))?;
|
||||
|
||||
Ok(response)
|
||||
env.log(format!("download '{}' from previous backup.", archive_name));
|
||||
crate::api2::helpers::create_download_response(path).await
|
||||
}.boxed()
|
||||
}
|
||||
|
@ -47,12 +47,13 @@ struct FixedWriterState {
|
||||
chunk_count: u64,
|
||||
small_chunk_count: usize, // allow 0..1 small chunks (last chunk may be smaller)
|
||||
upload_stat: UploadStatistic,
|
||||
incremental: bool,
|
||||
}
|
||||
|
||||
struct SharedBackupState {
|
||||
finished: bool,
|
||||
uid_counter: usize,
|
||||
file_counter: usize, // sucessfully uploaded files
|
||||
file_counter: usize, // successfully uploaded files
|
||||
dynamic_writers: HashMap<usize, DynamicWriterState>,
|
||||
fixed_writers: HashMap<usize, FixedWriterState>,
|
||||
known_chunks: HashMap<[u8;32], u32>,
|
||||
@ -237,7 +238,7 @@ impl BackupEnvironment {
|
||||
}
|
||||
|
||||
/// Store the writer with an unique ID
|
||||
pub fn register_fixed_writer(&self, index: FixedIndexWriter, name: String, size: usize, chunk_size: u32) -> Result<usize, Error> {
|
||||
pub fn register_fixed_writer(&self, index: FixedIndexWriter, name: String, size: usize, chunk_size: u32, incremental: bool) -> Result<usize, Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
@ -245,7 +246,7 @@ impl BackupEnvironment {
|
||||
let uid = state.next_uid();
|
||||
|
||||
state.fixed_writers.insert(uid, FixedWriterState {
|
||||
index, name, chunk_count: 0, size, chunk_size, small_chunk_count: 0, upload_stat: UploadStatistic::new(),
|
||||
index, name, chunk_count: 0, size, chunk_size, small_chunk_count: 0, upload_stat: UploadStatistic::new(), incremental,
|
||||
});
|
||||
|
||||
Ok(uid)
|
||||
@ -310,7 +311,13 @@ impl BackupEnvironment {
|
||||
|
||||
self.log(format!("Upload size: {} ({}%)", upload_stat.size, (upload_stat.size*100)/size));
|
||||
|
||||
let client_side_duplicates = chunk_count - upload_stat.count;
|
||||
// account for zero chunk, which might be uploaded but never used
|
||||
let client_side_duplicates = if chunk_count < upload_stat.count {
|
||||
0
|
||||
} else {
|
||||
chunk_count - upload_stat.count
|
||||
};
|
||||
|
||||
let server_side_duplicates = upload_stat.duplicates;
|
||||
|
||||
if (client_side_duplicates + server_side_duplicates) > 0 {
|
||||
@ -373,21 +380,22 @@ impl BackupEnvironment {
|
||||
bail!("fixed writer '{}' close failed - received wrong number of chunk ({} != {})", data.name, data.chunk_count, chunk_count);
|
||||
}
|
||||
|
||||
let expected_count = data.index.index_length();
|
||||
if !data.incremental {
|
||||
let expected_count = data.index.index_length();
|
||||
|
||||
if chunk_count != (expected_count as u64) {
|
||||
bail!("fixed writer '{}' close failed - unexpected chunk count ({} != {})", data.name, expected_count, chunk_count);
|
||||
}
|
||||
if chunk_count != (expected_count as u64) {
|
||||
bail!("fixed writer '{}' close failed - unexpected chunk count ({} != {})", data.name, expected_count, chunk_count);
|
||||
}
|
||||
|
||||
if size != (data.size as u64) {
|
||||
bail!("fixed writer '{}' close failed - unexpected file size ({} != {})", data.name, data.size, size);
|
||||
if size != (data.size as u64) {
|
||||
bail!("fixed writer '{}' close failed - unexpected file size ({} != {})", data.name, data.size, size);
|
||||
}
|
||||
}
|
||||
|
||||
let uuid = data.index.uuid;
|
||||
|
||||
let expected_csum = data.index.close()?;
|
||||
|
||||
println!("server checksum {:?} client: {:?}", expected_csum, csum);
|
||||
println!("server checksum: {:?} client: {:?} (incremental: {})", expected_csum, csum, data.incremental);
|
||||
if csum != expected_csum {
|
||||
bail!("fixed writer '{}' close failed - got unexpected checksum", data.name);
|
||||
}
|
||||
@ -430,8 +438,6 @@ impl BackupEnvironment {
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
state.finished = true;
|
||||
|
||||
if state.dynamic_writers.len() != 0 {
|
||||
bail!("found open index writer - unable to finish backup");
|
||||
}
|
||||
@ -440,6 +446,8 @@ impl BackupEnvironment {
|
||||
bail!("backup does not contain valid files (file count == 0)");
|
||||
}
|
||||
|
||||
state.finished = true;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -269,6 +269,8 @@ pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error>
|
||||
None => bail!("remote '{}' does not exist.", name),
|
||||
}
|
||||
|
||||
remote::save_config(&config)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ pub fn list_sync_jobs(
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: GC_SCHEDULE_SCHEMA,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -154,7 +154,7 @@ pub enum DeletableProperty {
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: GC_SCHEDULE_SCHEMA,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
delete: {
|
||||
description: "List of properties to delete.",
|
||||
@ -274,4 +274,4 @@ const ITEM_ROUTER: Router = Router::new()
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_SYNC_JOBS)
|
||||
.post(&API_METHOD_CREATE_SYNC_JOB)
|
||||
.match_all("name", &ITEM_ROUTER);
|
||||
.match_all("id", &ITEM_ROUTER);
|
||||
|
23
src/api2/helpers.rs
Normal file
23
src/api2/helpers.rs
Normal file
@ -0,0 +1,23 @@
|
||||
use std::path::PathBuf;
|
||||
use anyhow::Error;
|
||||
use futures::*;
|
||||
use hyper::{Body, Response, StatusCode, header};
|
||||
use proxmox::http_err;
|
||||
|
||||
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
|
||||
let file = tokio::fs::File::open(path.clone())
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path.clone(), err)))
|
||||
.await?;
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||
|
||||
let body = Body::wrap_stream(payload);
|
||||
|
||||
// fixme: set other headers ?
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||
.body(body)
|
||||
.unwrap())
|
||||
}
|
@ -9,9 +9,11 @@ mod syslog;
|
||||
mod journal;
|
||||
mod services;
|
||||
mod status;
|
||||
mod rrd;
|
||||
pub(crate) mod rrd;
|
||||
pub mod disks;
|
||||
|
||||
pub const SUBDIRS: SubdirMap = &[
|
||||
("disks", &disks::ROUTER),
|
||||
("dns", &dns::ROUTER),
|
||||
("journal", &journal::ROUTER),
|
||||
("network", &network::ROUTER),
|
||||
|
188
src/api2/node/disks.rs
Normal file
188
src/api2/node/disks.rs
Normal file
@ -0,0 +1,188 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, Permission, RpcEnvironment, RpcEnvironmentType};
|
||||
use proxmox::api::router::{Router, SubdirMap};
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::{list_subdirs_api_method};
|
||||
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::tools::disks::{
|
||||
DiskUsageInfo, DiskUsageType, DiskManage, SmartData,
|
||||
get_disks, get_smart_data, get_disk_usage_info, inititialize_gpt_disk,
|
||||
};
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::{UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
|
||||
|
||||
pub mod directory;
|
||||
pub mod zfs;
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
skipsmart: {
|
||||
description: "Skip smart checks.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"usage-type": {
|
||||
type: DiskUsageType,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "Local disk list.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: DiskUsageInfo,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List local disks
|
||||
pub fn list_disks(
|
||||
skipsmart: bool,
|
||||
usage_type: Option<DiskUsageType>,
|
||||
) -> Result<Vec<DiskUsageInfo>, Error> {
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (_, info) in get_disks(None, skipsmart)? {
|
||||
if let Some(ref usage_type) = usage_type {
|
||||
if info.used == *usage_type {
|
||||
list.push(info);
|
||||
}
|
||||
} else {
|
||||
list.push(info);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
healthonly: {
|
||||
description: "If true returns only the health status.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
type: SmartData,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get SMART attributes and health of a disk.
|
||||
pub fn smart_status(
|
||||
disk: String,
|
||||
healthonly: Option<bool>,
|
||||
) -> Result<SmartData, Error> {
|
||||
|
||||
let healthonly = healthonly.unwrap_or(false);
|
||||
|
||||
let manager = DiskManage::new();
|
||||
let disk = manager.disk_by_name(&disk)?;
|
||||
get_smart_data(&disk, healthonly)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
uuid: {
|
||||
description: "UUID for the GPT table.",
|
||||
type: String,
|
||||
optional: true,
|
||||
max_length: 36,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Initialize empty Disk with GPT
|
||||
pub fn initialize_disk(
|
||||
disk: String,
|
||||
uuid: Option<String>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let info = get_disk_usage_info(&disk, true)?;
|
||||
|
||||
if info.used != DiskUsageType::Unused {
|
||||
bail!("disk '{}' is already in use.", disk);
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"diskinit", Some(disk.clone()), &username.clone(), to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("initialize disk {}", disk));
|
||||
|
||||
let disk_manager = DiskManage::new();
|
||||
let disk_info = disk_manager.disk_by_name(&disk)?;
|
||||
|
||||
inititialize_gpt_disk(&disk_info, uuid.as_deref())?;
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(json!(upid_str))
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const SUBDIRS: SubdirMap = &sorted!([
|
||||
// ("lvm", &lvm::ROUTER),
|
||||
("directory", &directory::ROUTER),
|
||||
("zfs", &zfs::ROUTER),
|
||||
(
|
||||
"initgpt", &Router::new()
|
||||
.post(&API_METHOD_INITIALIZE_DISK)
|
||||
),
|
||||
(
|
||||
"list", &Router::new()
|
||||
.get(&API_METHOD_LIST_DISKS)
|
||||
),
|
||||
(
|
||||
"smart", &Router::new()
|
||||
.get(&API_METHOD_SMART_STATUS)
|
||||
),
|
||||
]);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
221
src/api2/node/disks/directory.rs
Normal file
221
src/api2/node/disks/directory.rs
Normal file
@ -0,0 +1,221 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::json;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Permission, RpcEnvironment, RpcEnvironmentType};
|
||||
use proxmox::api::section_config::SectionConfigData;
|
||||
use proxmox::api::router::Router;
|
||||
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::tools::disks::{
|
||||
DiskManage, FileSystemType, DiskUsageType,
|
||||
create_file_system, create_single_linux_partition, get_fs_uuid, get_disk_usage_info,
|
||||
};
|
||||
use crate::tools::systemd::{self, types::*};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::*;
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"filesystem": {
|
||||
type: FileSystemType,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Datastore mount info.
|
||||
pub struct DatastoreMountInfo {
|
||||
/// The path of the mount unit.
|
||||
pub unitfile: String,
|
||||
/// The mount path.
|
||||
pub path: String,
|
||||
/// The mounted device.
|
||||
pub device: String,
|
||||
/// File system type
|
||||
pub filesystem: Option<String>,
|
||||
/// Mount options
|
||||
pub options: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
}
|
||||
},
|
||||
returns: {
|
||||
description: "List of systemd datastore mount units.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: DatastoreMountInfo,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List systemd datastore mount units.
|
||||
pub fn list_datastore_mounts() -> Result<Vec<DatastoreMountInfo>, Error> {
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref MOUNT_NAME_REGEX: regex::Regex = regex::Regex::new(r"^mnt-datastore-(.+)\.mount$").unwrap();
|
||||
}
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
let basedir = "/etc/systemd/system";
|
||||
for item in crate::tools::fs::scan_subdir(libc::AT_FDCWD, basedir, &MOUNT_NAME_REGEX)? {
|
||||
let item = item?;
|
||||
let name = item.file_name().to_string_lossy().to_string();
|
||||
|
||||
let unitfile = format!("{}/{}", basedir, name);
|
||||
let config = systemd::config::parse_systemd_mount(&unitfile)?;
|
||||
let data: SystemdMountSection = config.lookup("Mount", "Mount")?;
|
||||
|
||||
list.push(DatastoreMountInfo {
|
||||
unitfile,
|
||||
device: data.What,
|
||||
path: data.Where,
|
||||
filesystem: data.Type,
|
||||
options: data.Options,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
"add-datastore": {
|
||||
description: "Configure a datastore using the directory.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
filesystem: {
|
||||
type: FileSystemType,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a Filesystem on an unused disk. Will be mounted under '/mnt/datastore/<name>'.".
|
||||
pub fn create_datastore_disk(
|
||||
name: String,
|
||||
disk: String,
|
||||
add_datastore: Option<bool>,
|
||||
filesystem: Option<FileSystemType>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let info = get_disk_usage_info(&disk, true)?;
|
||||
|
||||
if info.used != DiskUsageType::Unused {
|
||||
bail!("disk '{}' is already in use.", disk);
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"dircreate", Some(name.clone()), &username.clone(), to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("create datastore '{}' on disk {}", name, disk));
|
||||
|
||||
let add_datastore = add_datastore.unwrap_or(false);
|
||||
let filesystem = filesystem.unwrap_or(FileSystemType::Ext4);
|
||||
|
||||
let manager = DiskManage::new();
|
||||
|
||||
let disk = manager.clone().disk_by_name(&disk)?;
|
||||
|
||||
let partition = create_single_linux_partition(&disk)?;
|
||||
create_file_system(&partition, filesystem)?;
|
||||
|
||||
let uuid = get_fs_uuid(&partition)?;
|
||||
let uuid_path = format!("/dev/disk/by-uuid/{}", uuid);
|
||||
|
||||
let (mount_unit_name, mount_point) = create_datastore_mount_unit(&name, filesystem, &uuid_path)?;
|
||||
|
||||
systemd::reload_daemon()?;
|
||||
systemd::enable_unit(&mount_unit_name)?;
|
||||
systemd::start_unit(&mount_unit_name)?;
|
||||
|
||||
if add_datastore {
|
||||
crate::api2::config::datastore::create_datastore(json!({ "name": name, "path": mount_point }))?
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_DATASTORE_MOUNTS)
|
||||
.post(&API_METHOD_CREATE_DATASTORE_DISK);
|
||||
|
||||
|
||||
fn create_datastore_mount_unit(
|
||||
datastore_name: &str,
|
||||
fs_type: FileSystemType,
|
||||
what: &str,
|
||||
) -> Result<(String, String), Error> {
|
||||
|
||||
let mount_point = format!("/mnt/datastore/{}", datastore_name);
|
||||
let mut mount_unit_name = systemd::escape_unit(&mount_point, true);
|
||||
mount_unit_name.push_str(".mount");
|
||||
|
||||
let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
|
||||
|
||||
let unit = SystemdUnitSection {
|
||||
Description: format!("Mount datatstore '{}' under '{}'", datastore_name, mount_point),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let install = SystemdInstallSection {
|
||||
WantedBy: Some(vec!["multi-user.target".to_string()]),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mount = SystemdMountSection {
|
||||
What: what.to_string(),
|
||||
Where: mount_point.clone(),
|
||||
Type: Some(fs_type.to_string()),
|
||||
Options: Some(String::from("defaults")),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut config = SectionConfigData::new();
|
||||
config.set_data("Unit", "Unit", unit)?;
|
||||
config.set_data("Install", "Install", install)?;
|
||||
config.set_data("Mount", "Mount", mount)?;
|
||||
|
||||
systemd::config::save_systemd_mount(&mount_unit_path, &config)?;
|
||||
|
||||
Ok((mount_unit_name, mount_point))
|
||||
}
|
380
src/api2/node/disks/zfs.rs
Normal file
380
src/api2/node/disks/zfs.rs
Normal file
@ -0,0 +1,380 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::{json, Value};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{
|
||||
api, Permission, RpcEnvironment, RpcEnvironmentType,
|
||||
schema::{
|
||||
Schema,
|
||||
StringSchema,
|
||||
ArraySchema,
|
||||
IntegerSchema,
|
||||
ApiStringFormat,
|
||||
parse_property_string,
|
||||
},
|
||||
};
|
||||
use proxmox::api::router::Router;
|
||||
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::tools::disks::{
|
||||
zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree,
|
||||
DiskUsageType,
|
||||
};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::*;
|
||||
|
||||
pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||
"Disk name list.", &BLOCKDEVICE_NAME_SCHEMA)
|
||||
.schema();
|
||||
|
||||
pub const DISK_LIST_SCHEMA: Schema = StringSchema::new(
|
||||
"A list of disk names, comma separated.")
|
||||
.format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA))
|
||||
.schema();
|
||||
|
||||
pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new(
|
||||
"Pool sector size exponent.")
|
||||
.minimum(9)
|
||||
.maximum(16)
|
||||
.default(12)
|
||||
.schema();
|
||||
|
||||
|
||||
#[api(
|
||||
default: "On",
|
||||
)]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The ZFS compression algorithm to use.
|
||||
pub enum ZfsCompressionType {
|
||||
/// Gnu Zip
|
||||
Gzip,
|
||||
/// LZ4
|
||||
Lz4,
|
||||
/// LZJB
|
||||
Lzjb,
|
||||
/// ZLE
|
||||
Zle,
|
||||
/// Enable compression using the default algorithm.
|
||||
On,
|
||||
/// Disable compression.
|
||||
Off,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The ZFS RAID level to use.
|
||||
pub enum ZfsRaidLevel {
|
||||
/// Single Disk
|
||||
Single,
|
||||
/// Mirror
|
||||
Mirror,
|
||||
/// Raid10
|
||||
Raid10,
|
||||
/// RaidZ
|
||||
RaidZ,
|
||||
/// RaidZ2
|
||||
RaidZ2,
|
||||
/// RaidZ3
|
||||
RaidZ3,
|
||||
}
|
||||
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// zpool list item
|
||||
pub struct ZpoolListItem {
|
||||
/// zpool name
|
||||
pub name: String,
|
||||
/// Health
|
||||
pub health: String,
|
||||
/// Total size
|
||||
pub size: u64,
|
||||
/// Used size
|
||||
pub alloc: u64,
|
||||
/// Free space
|
||||
pub free: u64,
|
||||
/// ZFS fragnentation level
|
||||
pub frag: u64,
|
||||
/// ZFS deduplication ratio
|
||||
pub dedup: f64,
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "List of zpools.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: ZpoolListItem,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List zfs pools.
|
||||
pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> {
|
||||
|
||||
let data = zpool_list(None, false)?;
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for item in data {
|
||||
if let Some(usage) = item.usage {
|
||||
list.push(ZpoolListItem {
|
||||
name: item.name,
|
||||
health: item.health,
|
||||
size: usage.size,
|
||||
alloc: usage.alloc,
|
||||
free: usage.free,
|
||||
frag: usage.frag,
|
||||
dedup: usage.dedup,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "zpool vdev tree with status",
|
||||
properties: {
|
||||
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get zpool status details.
|
||||
pub fn zpool_details(
|
||||
name: String,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let key_value_list = zpool_status(&name)?;
|
||||
|
||||
let config = match key_value_list.iter().find(|(k, _)| k == "config") {
|
||||
Some((_, v)) => v,
|
||||
None => bail!("got zpool status without config key"),
|
||||
};
|
||||
|
||||
let vdev_list = parse_zpool_status_config_tree(config)?;
|
||||
let mut tree = vdev_list_to_tree(&vdev_list)?;
|
||||
|
||||
for (k, v) in key_value_list {
|
||||
if k != "config" {
|
||||
tree[k] = v.into();
|
||||
}
|
||||
}
|
||||
|
||||
tree["name"] = tree.as_object_mut().unwrap()
|
||||
.remove("pool")
|
||||
.unwrap_or_else(|| name.into());
|
||||
|
||||
|
||||
Ok(tree)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
devices: {
|
||||
schema: DISK_LIST_SCHEMA,
|
||||
},
|
||||
raidlevel: {
|
||||
type: ZfsRaidLevel,
|
||||
},
|
||||
ashift: {
|
||||
schema: ZFS_ASHIFT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
compression: {
|
||||
type: ZfsCompressionType,
|
||||
optional: true,
|
||||
},
|
||||
"add-datastore": {
|
||||
description: "Configure a datastore using the zpool.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a new ZFS pool.
|
||||
pub fn create_zpool(
|
||||
name: String,
|
||||
devices: String,
|
||||
raidlevel: ZfsRaidLevel,
|
||||
compression: Option<String>,
|
||||
ashift: Option<usize>,
|
||||
add_datastore: Option<bool>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let add_datastore = add_datastore.unwrap_or(false);
|
||||
|
||||
let ashift = ashift.unwrap_or(12);
|
||||
|
||||
let devices_text = devices.clone();
|
||||
let devices = parse_property_string(&devices, &DISK_ARRAY_SCHEMA)?;
|
||||
let devices: Vec<String> = devices.as_array().unwrap().iter()
|
||||
.map(|v| v.as_str().unwrap().to_string()).collect();
|
||||
|
||||
let disk_map = crate::tools::disks::get_disks(None, true)?;
|
||||
for disk in devices.iter() {
|
||||
match disk_map.get(disk) {
|
||||
Some(info) => {
|
||||
if info.used != DiskUsageType::Unused {
|
||||
bail!("disk '{}' is already in use.", disk);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
bail!("no such disk '{}'", disk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let min_disks = match raidlevel {
|
||||
ZfsRaidLevel::Single => 1,
|
||||
ZfsRaidLevel::Mirror => 2,
|
||||
ZfsRaidLevel::Raid10 => 4,
|
||||
ZfsRaidLevel::RaidZ => 3,
|
||||
ZfsRaidLevel::RaidZ2 => 4,
|
||||
ZfsRaidLevel::RaidZ3 => 5,
|
||||
};
|
||||
|
||||
// Sanity checks
|
||||
if raidlevel == ZfsRaidLevel::Raid10 && devices.len() % 2 != 0 {
|
||||
bail!("Raid10 needs an even number of disks.");
|
||||
}
|
||||
|
||||
if raidlevel == ZfsRaidLevel::Single && devices.len() > 1 {
|
||||
bail!("Please give only one disk for single disk mode.");
|
||||
}
|
||||
|
||||
if devices.len() < min_disks {
|
||||
bail!("{:?} needs at least {} disks.", raidlevel, min_disks);
|
||||
}
|
||||
|
||||
// check if the default path does exist already and bail if it does
|
||||
// otherwise we get an error on mounting
|
||||
let mut default_path = std::path::PathBuf::from("/");
|
||||
default_path.push(&name);
|
||||
|
||||
match std::fs::metadata(&default_path) {
|
||||
Err(_) => {}, // path does not exist
|
||||
Ok(_) => {
|
||||
bail!("path {:?} already exists", default_path);
|
||||
}
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"zfscreate", Some(name.clone()), &username.clone(), to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));
|
||||
|
||||
|
||||
let mut command = std::process::Command::new("zpool");
|
||||
command.args(&["create", "-o", &format!("ashift={}", ashift), &name]);
|
||||
|
||||
match raidlevel {
|
||||
ZfsRaidLevel::Single => {
|
||||
command.arg(&devices[0]);
|
||||
}
|
||||
ZfsRaidLevel::Mirror => {
|
||||
command.arg("mirror");
|
||||
command.args(devices);
|
||||
}
|
||||
ZfsRaidLevel::Raid10 => {
|
||||
devices.chunks(2).for_each(|pair| {
|
||||
command.arg("mirror");
|
||||
command.args(pair);
|
||||
});
|
||||
}
|
||||
ZfsRaidLevel::RaidZ => {
|
||||
command.arg("raidz");
|
||||
command.args(devices);
|
||||
}
|
||||
ZfsRaidLevel::RaidZ2 => {
|
||||
command.arg("raidz2");
|
||||
command.args(devices);
|
||||
}
|
||||
ZfsRaidLevel::RaidZ3 => {
|
||||
command.arg("raidz3");
|
||||
command.args(devices);
|
||||
}
|
||||
}
|
||||
|
||||
worker.log(format!("# {:?}", command));
|
||||
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
worker.log(output);
|
||||
|
||||
if let Some(compression) = compression {
|
||||
let mut command = std::process::Command::new("zfs");
|
||||
command.args(&["set", &format!("compression={}", compression), &name]);
|
||||
worker.log(format!("# {:?}", command));
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
worker.log(output);
|
||||
}
|
||||
|
||||
if add_datastore {
|
||||
let mount_point = format!("/{}", name);
|
||||
crate::api2::config::datastore::create_datastore(json!({ "name": name, "path": mount_point }))?
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
pub const POOL_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_ZPOOL_DETAILS);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_ZPOOLS)
|
||||
.post(&API_METHOD_CREATE_ZPOOL)
|
||||
.match_all("name", &POOL_ROUTER);
|
@ -94,7 +94,7 @@ fn get_journal(
|
||||
|
||||
let mut lines: Vec<String> = vec![];
|
||||
|
||||
let mut child = Command::new("/usr/bin/mini-journalreader")
|
||||
let mut child = Command::new("mini-journalreader")
|
||||
.args(&args)
|
||||
.stdout(Stdio::piped())
|
||||
.spawn()?;
|
||||
|
@ -338,7 +338,7 @@ pub enum DeletableProperty {
|
||||
autostart,
|
||||
/// Delete bridge ports (set to 'none')
|
||||
bridge_ports,
|
||||
/// Delet bridge-vlan-aware flag
|
||||
/// Delete bridge-vlan-aware flag
|
||||
bridge_vlan_aware,
|
||||
/// Delete bond-slaves (set to 'none')
|
||||
slaves,
|
||||
|
@ -1,9 +1,47 @@
|
||||
use anyhow::Error;
|
||||
use serde_json::Value;
|
||||
use serde_json::{Value, json};
|
||||
|
||||
use proxmox::api::{api, Router};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::tools::epoch_now_f64;
|
||||
use crate::rrd::{extract_cached_data, RRD_DATA_ENTRIES};
|
||||
|
||||
pub fn create_value_from_rrd(
|
||||
basedir: &str,
|
||||
list: &[&str],
|
||||
timeframe: RRDTimeFrameResolution,
|
||||
cf: RRDMode,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let mut result = Vec::new();
|
||||
let now = epoch_now_f64()?;
|
||||
|
||||
for name in list {
|
||||
let (start, reso, list) = match extract_cached_data(basedir, name, now, timeframe, cf) {
|
||||
Some(result) => result,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let mut t = start;
|
||||
for index in 0..RRD_DATA_ENTRIES {
|
||||
if result.len() <= index {
|
||||
if let Some(value) = list[index] {
|
||||
result.push(json!({ "time": t, *name: value }));
|
||||
} else {
|
||||
result.push(json!({ "time": t }));
|
||||
}
|
||||
} else {
|
||||
if let Some(value) = list[index] {
|
||||
result[index][name] = value.into();
|
||||
}
|
||||
}
|
||||
t += reso;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result.into())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -27,7 +65,7 @@ fn get_node_stats(
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
crate::rrd::extract_data(
|
||||
create_value_from_rrd(
|
||||
"host",
|
||||
&[
|
||||
"cpu", "iowait",
|
||||
@ -36,8 +74,9 @@ fn get_node_stats(
|
||||
"netin", "netout",
|
||||
"loadavg",
|
||||
"total", "used",
|
||||
"read_ios", "read_bytes", "read_ticks",
|
||||
"write_ios", "write_bytes", "write_ticks",
|
||||
"read_ios", "read_bytes",
|
||||
"write_ios", "write_bytes",
|
||||
"io_ticks",
|
||||
],
|
||||
timeframe,
|
||||
cf,
|
||||
|
@ -38,7 +38,7 @@ fn get_full_service_state(service: &str) -> Result<Value, Error> {
|
||||
|
||||
let real_service_name = real_service_name(service);
|
||||
|
||||
let mut child = Command::new("/bin/systemctl")
|
||||
let mut child = Command::new("systemctl")
|
||||
.args(&["show", real_service_name])
|
||||
.stdout(Stdio::piped())
|
||||
.spawn()?;
|
||||
@ -196,7 +196,7 @@ fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
|
||||
|
||||
let real_service_name = real_service_name(service);
|
||||
|
||||
let status = Command::new("/bin/systemctl")
|
||||
let status = Command::new("systemctl")
|
||||
.args(&[cmd, real_service_name])
|
||||
.status()?;
|
||||
|
||||
@ -256,7 +256,7 @@ fn stop_service(
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
log::info!("stoping service {}", service);
|
||||
log::info!("stopping service {}", service);
|
||||
|
||||
run_service_command(&service, "stop")
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
use std::process::Command;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{Error, format_err, bail};
|
||||
use serde_json::{json, Value};
|
||||
@ -60,6 +61,7 @@ fn get_usage(
|
||||
|
||||
let meminfo: procfs::ProcFsMemInfo = procfs::read_meminfo()?;
|
||||
let kstat: procfs::ProcFsStat = procfs::read_proc_stat()?;
|
||||
let disk_usage = crate::tools::disks::disk_usage(Path::new("/"))?;
|
||||
|
||||
Ok(json!({
|
||||
"memory": {
|
||||
@ -68,6 +70,11 @@ fn get_usage(
|
||||
"free": meminfo.memfree,
|
||||
},
|
||||
"cpu": kstat.cpu,
|
||||
"root": {
|
||||
"total": disk_usage.total,
|
||||
"used": disk_usage.used,
|
||||
"free": disk_usage.avail,
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
@ -95,7 +102,7 @@ fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> {
|
||||
NodePowerCommand::Shutdown => "poweroff",
|
||||
};
|
||||
|
||||
let output = Command::new("/bin/systemctl")
|
||||
let output = Command::new("systemctl")
|
||||
.arg(systemctl_command)
|
||||
.output()
|
||||
.map_err(|err| format_err!("failed to execute systemctl - {}", err))?;
|
||||
|
@ -27,7 +27,7 @@ fn dump_journal(
|
||||
let start = start.unwrap_or(0);
|
||||
let mut count: u64 = 0;
|
||||
|
||||
let mut child = Command::new("/bin/journalctl")
|
||||
let mut child = Command::new("journalctl")
|
||||
.args(&args)
|
||||
.stdout(Stdio::piped())
|
||||
.spawn()?;
|
||||
|
@ -323,21 +323,9 @@ pub fn list_tasks(
|
||||
|
||||
let mut count = 0;
|
||||
|
||||
for info in list.iter() {
|
||||
for info in list {
|
||||
if !list_all && info.upid.username != username { continue; }
|
||||
|
||||
let mut entry = TaskListItem {
|
||||
upid: info.upid_str.clone(),
|
||||
node: "localhost".to_string(),
|
||||
pid: info.upid.pid as i64,
|
||||
pstart: info.upid.pstart,
|
||||
starttime: info.upid.starttime,
|
||||
worker_type: info.upid.worker_type.clone(),
|
||||
worker_id: info.upid.worker_id.clone(),
|
||||
user: info.upid.username.clone(),
|
||||
endtime: None,
|
||||
status: None,
|
||||
};
|
||||
|
||||
if let Some(username) = userfilter {
|
||||
if !info.upid.username.contains(username) { continue; }
|
||||
@ -367,9 +355,6 @@ pub fn list_tasks(
|
||||
if errors && state.1 == "OK" {
|
||||
continue;
|
||||
}
|
||||
|
||||
entry.endtime = Some(state.0);
|
||||
entry.status = Some(state.1.clone());
|
||||
}
|
||||
|
||||
if (count as u64) < start {
|
||||
@ -379,7 +364,7 @@ pub fn list_tasks(
|
||||
count += 1;
|
||||
}
|
||||
|
||||
if (result.len() as u64) < limit { result.push(entry); };
|
||||
if (result.len() as u64) < limit { result.push(info.into()); };
|
||||
}
|
||||
|
||||
rpcenv["total"] = Value::from(count);
|
||||
|
@ -1,4 +1,5 @@
|
||||
//! Sync datastore from remote server
|
||||
use std::sync::{Arc};
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
|
||||
@ -15,6 +16,52 @@ use crate::config::{
|
||||
cached_user_info::CachedUserInfo,
|
||||
};
|
||||
|
||||
|
||||
pub fn check_pull_privs(
|
||||
username: &str,
|
||||
store: &str,
|
||||
remote: &str,
|
||||
remote_store: &str,
|
||||
delete: bool,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
user_info.check_privs(username, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
|
||||
|
||||
if delete {
|
||||
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_pull_parameters(
|
||||
store: &str,
|
||||
remote: &str,
|
||||
remote_store: &str,
|
||||
) -> Result<(HttpClient, BackupRepository, Arc<DataStore>), Error> {
|
||||
|
||||
let tgt_store = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let (remote_config, _digest) = remote::config()?;
|
||||
let remote: remote::Remote = remote_config.lookup("remote", remote)?;
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.password(Some(remote.password.clone()))
|
||||
.fingerprint(remote.fingerprint.clone());
|
||||
|
||||
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
|
||||
let _auth_info = client.login() // make sure we can auth
|
||||
.await
|
||||
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
|
||||
|
||||
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), remote_store.to_string());
|
||||
|
||||
Ok((client, src_repo, tgt_store))
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -52,33 +99,12 @@ async fn pull (
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
user_info.check_privs(&username, &["remote", &remote, &remote_store], PRIV_REMOTE_READ, false)?;
|
||||
|
||||
let delete = remove_vanished.unwrap_or(true);
|
||||
|
||||
if delete {
|
||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_PRUNE, false)?;
|
||||
}
|
||||
check_pull_privs(&username, &store, &remote, &remote_store, delete)?;
|
||||
|
||||
let tgt_store = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let (remote_config, _digest) = remote::config()?;
|
||||
let remote: remote::Remote = remote_config.lookup("remote", &remote)?;
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.password(Some(remote.password.clone()))
|
||||
.fingerprint(remote.fingerprint.clone());
|
||||
|
||||
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
|
||||
let _auth_info = client.login() // make sure we can auth
|
||||
.await
|
||||
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
|
||||
|
||||
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), remote_store);
|
||||
let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?;
|
||||
|
||||
// fixme: set to_stdout to false?
|
||||
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), &username.clone(), true, move |worker| async move {
|
||||
|
@ -17,6 +17,7 @@ use crate::server::{WorkerTask, H2Service};
|
||||
use crate::tools;
|
||||
use crate::config::acl::PRIV_DATASTORE_READ;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::api2::helpers;
|
||||
|
||||
mod environment;
|
||||
use environment::*;
|
||||
@ -131,7 +132,7 @@ fn upgrade_to_backup_reader_protocol(
|
||||
Either::Right((Ok(res), _)) => Ok(res),
|
||||
Either::Right((Err(err), _)) => Err(err),
|
||||
})
|
||||
.map_ok(move |_| env.log("reader finished sucessfully"))
|
||||
.map_ok(move |_| env.log("reader finished successfully"))
|
||||
})?;
|
||||
|
||||
let response = Response::builder()
|
||||
@ -187,26 +188,9 @@ fn download_file(
|
||||
path.push(env.backup_dir.relative_path());
|
||||
path.push(&file_name);
|
||||
|
||||
let path2 = path.clone();
|
||||
let path3 = path.clone();
|
||||
env.log(format!("download {:?}", path.clone()));
|
||||
|
||||
let file = tokio::fs::File::open(path)
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path2, err)))
|
||||
.await?;
|
||||
|
||||
env.log(format!("download {:?}", path3));
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||
|
||||
let body = Body::wrap_stream(payload);
|
||||
|
||||
// fixme: set other headers ?
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||
.body(body)
|
||||
.unwrap())
|
||||
helpers::create_download_response(path).await
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
|
226
src/api2/status.rs
Normal file
226
src/api2/status.rs
Normal file
@ -0,0 +1,226 @@
|
||||
use proxmox::list_subdirs_api_method;
|
||||
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{
|
||||
api,
|
||||
ApiMethod,
|
||||
Permission,
|
||||
Router,
|
||||
RpcEnvironment,
|
||||
SubdirMap,
|
||||
UserInformation,
|
||||
};
|
||||
|
||||
use crate::api2::types::{
|
||||
DATASTORE_SCHEMA,
|
||||
RRDMode,
|
||||
RRDTimeFrameResolution,
|
||||
TaskListItem
|
||||
};
|
||||
|
||||
use crate::server;
|
||||
use crate::backup::{DataStore};
|
||||
use crate::config::datastore;
|
||||
use crate::tools::epoch_now_f64;
|
||||
use crate::tools::statistics::{linear_regression};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::acl::{
|
||||
PRIV_SYS_AUDIT,
|
||||
PRIV_DATASTORE_AUDIT,
|
||||
PRIV_DATASTORE_BACKUP,
|
||||
};
|
||||
|
||||
#[api(
|
||||
returns: {
|
||||
description: "Lists the Status of the Datastores.",
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Status of a Datastore",
|
||||
type: Object,
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
total: {
|
||||
type: Integer,
|
||||
description: "The Size of the underlying storage in bytes",
|
||||
},
|
||||
used: {
|
||||
type: Integer,
|
||||
description: "The used bytes of the underlying storage",
|
||||
},
|
||||
avail: {
|
||||
type: Integer,
|
||||
description: "The available bytes of the underlying storage",
|
||||
},
|
||||
history: {
|
||||
type: Array,
|
||||
description: "A list of usages of the past (last Month).",
|
||||
items: {
|
||||
type: Number,
|
||||
description: "The usage of a time in the past. Either null or between 0.0 and 1.0.",
|
||||
}
|
||||
},
|
||||
"estimated-full-date": {
|
||||
type: Integer,
|
||||
optional: true,
|
||||
description: "Estimation of the UNIX epoch when the storage will be full.\
|
||||
This is calculated via a simple Linear Regression (Least Squares)\
|
||||
of RRD data of the last Month. Missing if there are not enough data points yet.\
|
||||
If the estimate lies in the past, the usage is decreasing.",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// List Datastore usages and estimates
|
||||
fn datastore_status(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (store, (_, _)) in &config.sections {
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||
if !allowed {
|
||||
continue;
|
||||
}
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
let status = crate::tools::disks::disk_usage(&datastore.base_path())?;
|
||||
|
||||
let mut entry = json!({
|
||||
"store": store,
|
||||
"total": status.total,
|
||||
"used": status.used,
|
||||
"avail": status.avail,
|
||||
});
|
||||
|
||||
let rrd_dir = format!("datastore/{}", store);
|
||||
let now = epoch_now_f64()?;
|
||||
let rrd_resolution = RRDTimeFrameResolution::Month;
|
||||
let rrd_mode = RRDMode::Average;
|
||||
|
||||
let total_res = crate::rrd::extract_cached_data(
|
||||
&rrd_dir,
|
||||
"total",
|
||||
now,
|
||||
rrd_resolution,
|
||||
rrd_mode,
|
||||
);
|
||||
|
||||
let used_res = crate::rrd::extract_cached_data(
|
||||
&rrd_dir,
|
||||
"used",
|
||||
now,
|
||||
rrd_resolution,
|
||||
rrd_mode,
|
||||
);
|
||||
|
||||
match (total_res, used_res) {
|
||||
(Some((start, reso, total_list)), Some((_, _, used_list))) => {
|
||||
let mut usage_list: Vec<f64> = Vec::new();
|
||||
let mut time_list: Vec<u64> = Vec::new();
|
||||
let mut history = Vec::new();
|
||||
|
||||
for (idx, used) in used_list.iter().enumerate() {
|
||||
let total = if idx < total_list.len() {
|
||||
total_list[idx]
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
match (total, used) {
|
||||
(Some(total), Some(used)) if total != 0.0 => {
|
||||
time_list.push(start + (idx as u64)*reso);
|
||||
let usage = used/total;
|
||||
usage_list.push(usage);
|
||||
history.push(json!(usage));
|
||||
},
|
||||
_ => {
|
||||
history.push(json!(null))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
entry["history"] = history.into();
|
||||
|
||||
// we skip the calculation for datastores with not enough data
|
||||
if usage_list.len() >= 7 {
|
||||
if let Some((a,b)) = linear_regression(&time_list, &usage_list) {
|
||||
if b != 0.0 {
|
||||
let estimate = (1.0 - a) / b;
|
||||
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
|
||||
list.push(entry);
|
||||
}
|
||||
|
||||
Ok(list.into())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
since: {
|
||||
type: u64,
|
||||
description: "Only list tasks since this UNIX epoch.",
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "A list of tasks.",
|
||||
type: Array,
|
||||
items: { type: TaskListItem },
|
||||
},
|
||||
access: {
|
||||
description: "Users can only see there own tasks, unless the have Sys.Audit on /system/tasks.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List tasks.
|
||||
pub fn list_tasks(
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TaskListItem>, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["system", "tasks"]);
|
||||
|
||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||
|
||||
// TODO: replace with call that gets all task since 'since' epoch
|
||||
let list: Vec<TaskListItem> = server::read_task_list()?
|
||||
.into_iter()
|
||||
.map(TaskListItem::from)
|
||||
.filter(|entry| list_all || entry.user == username)
|
||||
.collect();
|
||||
|
||||
Ok(list.into())
|
||||
}
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("datastore-usage", &Router::new().get(&API_METHOD_DATASTORE_STATUS)),
|
||||
("tasks", &Router::new().get(&API_METHOD_LIST_TASKS)),
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
@ -27,6 +27,8 @@ macro_rules! DNS_NAME { () => (concat!(r"(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL
|
||||
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
|
||||
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
|
||||
|
||||
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => (r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)") }
|
||||
|
||||
@ -63,13 +65,17 @@ const_regex!{
|
||||
|
||||
pub DNS_NAME_OR_IP_REGEX = concat!(r"^", DNS_NAME!(), "|", IPRE!(), r"$");
|
||||
|
||||
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!(), r"$");
|
||||
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$");
|
||||
|
||||
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE!() ,"):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
||||
|
||||
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
|
||||
|
||||
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
||||
|
||||
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
||||
|
||||
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
|
||||
}
|
||||
|
||||
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
||||
@ -129,6 +135,8 @@ pub const CIDR_V6_FORMAT: ApiStringFormat =
|
||||
pub const CIDR_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&CIDR_REGEX);
|
||||
|
||||
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
|
||||
|
||||
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
@ -287,6 +295,11 @@ pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run sync job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
.schema();
|
||||
|
||||
pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||
"Run garbage collection job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||
@ -344,6 +357,11 @@ pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID")
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
|
||||
.format(&BLOCKDEVICE_NAME_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
// Complex type definitions
|
||||
|
||||
@ -379,6 +397,9 @@ pub struct GroupListItem {
|
||||
pub backup_count: u64,
|
||||
/// List of contained archive files.
|
||||
pub files: Vec<String>,
|
||||
/// The owner of group
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub owner: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -407,10 +428,13 @@ pub struct SnapshotListItem {
|
||||
pub backup_id: String,
|
||||
pub backup_time: i64,
|
||||
/// List of contained archive files.
|
||||
pub files: Vec<String>,
|
||||
pub files: Vec<BackupContent>,
|
||||
/// Overall snapshot size (sum of all archive sizes).
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub size: Option<u64>,
|
||||
/// The owner of the snapshots group
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub owner: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -479,6 +503,9 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
|
||||
/// Basic information about archive files inside a backup snapshot.
|
||||
pub struct BackupContent {
|
||||
pub filename: String,
|
||||
/// Info if file is encrypted (or empty if we do not have that info)
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub encrypted: Option<bool>,
|
||||
/// Archive size (from backup manifest).
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub size: Option<u64>,
|
||||
@ -575,6 +602,27 @@ pub struct TaskListItem {
|
||||
pub status: Option<String>,
|
||||
}
|
||||
|
||||
impl From<crate::server::TaskListInfo> for TaskListItem {
|
||||
fn from(info: crate::server::TaskListInfo) -> Self {
|
||||
let (endtime, status) = info
|
||||
.state
|
||||
.map_or_else(|| (None, None), |(a,b)| (Some(a), Some(b)));
|
||||
|
||||
TaskListItem {
|
||||
upid: info.upid_str,
|
||||
node: "localhost".to_string(),
|
||||
pid: info.upid.pid as i64,
|
||||
pstart: info.upid.pstart,
|
||||
starttime: info.upid.starttime,
|
||||
worker_type: info.upid.worker_type,
|
||||
worker_id: info.upid.worker_id,
|
||||
user: info.upid.username,
|
||||
endtime,
|
||||
status,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
@ -807,7 +855,7 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
|
||||
|
||||
for fingerprint in invalid_fingerprints.iter() {
|
||||
if let Ok(_) = parse_simple_value(fingerprint, &schema) {
|
||||
bail!("test fingerprint '{}' failed - got Ok() while expection an error.", fingerprint);
|
||||
bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint);
|
||||
}
|
||||
}
|
||||
|
||||
@ -851,7 +899,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
||||
|
||||
for name in invalid_user_ids.iter() {
|
||||
if let Ok(_) = parse_simple_value(name, &schema) {
|
||||
bail!("test userid '{}' failed - got Ok() while expection an error.", name);
|
||||
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10,6 +10,8 @@ use std::path::PathBuf;
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
use proxmox::try_block;
|
||||
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
fn compute_csrf_secret_digest(
|
||||
timestamp: i64,
|
||||
secret: &[u8],
|
||||
@ -29,8 +31,7 @@ pub fn assemble_csrf_prevention_token(
|
||||
username: &str,
|
||||
) -> String {
|
||||
|
||||
let epoch = std::time::SystemTime::now().duration_since(
|
||||
std::time::SystemTime::UNIX_EPOCH).unwrap().as_secs() as i64;
|
||||
let epoch = epoch_now_u64().unwrap() as i64;
|
||||
|
||||
let digest = compute_csrf_secret_digest(epoch, secret, username);
|
||||
|
||||
@ -67,8 +68,7 @@ pub fn verify_csrf_prevention_token(
|
||||
bail!("invalid signature.");
|
||||
}
|
||||
|
||||
let now = std::time::SystemTime::now().duration_since(
|
||||
std::time::SystemTime::UNIX_EPOCH)?.as_secs() as i64;
|
||||
let now = epoch_now_u64()? as i64;
|
||||
|
||||
let age = now - ttime;
|
||||
if age < min_age {
|
||||
|
@ -198,5 +198,11 @@ pub use prune::*;
|
||||
mod datastore;
|
||||
pub use datastore::*;
|
||||
|
||||
mod verify;
|
||||
pub use verify::*;
|
||||
|
||||
mod catalog_shell;
|
||||
pub use catalog_shell::*;
|
||||
|
||||
mod async_index_reader;
|
||||
pub use async_index_reader::*;
|
||||
|
127
src/backup/async_index_reader.rs
Normal file
127
src/backup/async_index_reader.rs
Normal file
@ -0,0 +1,127 @@
|
||||
use std::future::Future;
|
||||
use std::task::{Poll, Context};
|
||||
use std::pin::Pin;
|
||||
|
||||
use anyhow::Error;
|
||||
use futures::future::FutureExt;
|
||||
use futures::ready;
|
||||
use tokio::io::AsyncRead;
|
||||
|
||||
use proxmox::sys::error::io_err_other;
|
||||
use proxmox::io_format_err;
|
||||
|
||||
use super::IndexFile;
|
||||
use super::read_chunk::AsyncReadChunk;
|
||||
|
||||
enum AsyncIndexReaderState<S> {
|
||||
NoData,
|
||||
WaitForData(Pin<Box<dyn Future<Output = Result<(S, Vec<u8>), Error>> + Send + 'static>>),
|
||||
HaveData(usize),
|
||||
}
|
||||
|
||||
pub struct AsyncIndexReader<S, I: IndexFile> {
|
||||
store: Option<S>,
|
||||
index: I,
|
||||
read_buffer: Vec<u8>,
|
||||
current_chunk_idx: usize,
|
||||
current_chunk_digest: [u8; 32],
|
||||
state: AsyncIndexReaderState<S>,
|
||||
}
|
||||
|
||||
// ok because the only public interfaces operates on &mut Self
|
||||
unsafe impl<S: Sync, I: IndexFile + Sync> Sync for AsyncIndexReader<S, I> {}
|
||||
|
||||
impl<S: AsyncReadChunk, I: IndexFile> AsyncIndexReader<S, I> {
|
||||
pub fn new(index: I, store: S) -> Self {
|
||||
Self {
|
||||
store: Some(store),
|
||||
index,
|
||||
read_buffer: Vec::with_capacity(1024*1024),
|
||||
current_chunk_idx: 0,
|
||||
current_chunk_digest: [0u8; 32],
|
||||
state: AsyncIndexReaderState::NoData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, I> AsyncRead for AsyncIndexReader<S, I> where
|
||||
S: AsyncReadChunk + Unpin + Sync + 'static,
|
||||
I: IndexFile + Unpin
|
||||
{
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context,
|
||||
buf: &mut [u8],
|
||||
) -> Poll<tokio::io::Result<usize>> {
|
||||
let this = Pin::get_mut(self);
|
||||
loop {
|
||||
match &mut this.state {
|
||||
AsyncIndexReaderState::NoData => {
|
||||
if this.current_chunk_idx >= this.index.index_count() {
|
||||
return Poll::Ready(Ok(0));
|
||||
}
|
||||
|
||||
let digest = this
|
||||
.index
|
||||
.index_digest(this.current_chunk_idx)
|
||||
.ok_or(io_format_err!("could not get digest"))?
|
||||
.clone();
|
||||
|
||||
if digest == this.current_chunk_digest {
|
||||
this.state = AsyncIndexReaderState::HaveData(0);
|
||||
continue;
|
||||
}
|
||||
|
||||
this.current_chunk_digest = digest;
|
||||
|
||||
let store = match this.store.take() {
|
||||
Some(store) => store,
|
||||
None => {
|
||||
return Poll::Ready(Err(io_format_err!("could not find store")));
|
||||
},
|
||||
};
|
||||
|
||||
let future = async move {
|
||||
store.read_chunk(&digest)
|
||||
.await
|
||||
.map(move |x| (store, x))
|
||||
};
|
||||
|
||||
this.state = AsyncIndexReaderState::WaitForData(future.boxed());
|
||||
},
|
||||
AsyncIndexReaderState::WaitForData(ref mut future) => {
|
||||
match ready!(future.as_mut().poll(cx)) {
|
||||
Ok((store, mut chunk_data)) => {
|
||||
this.read_buffer.clear();
|
||||
this.read_buffer.append(&mut chunk_data);
|
||||
this.state = AsyncIndexReaderState::HaveData(0);
|
||||
this.store = Some(store);
|
||||
},
|
||||
Err(err) => {
|
||||
return Poll::Ready(Err(io_err_other(err)));
|
||||
},
|
||||
};
|
||||
},
|
||||
AsyncIndexReaderState::HaveData(offset) => {
|
||||
let offset = *offset;
|
||||
let len = this.read_buffer.len();
|
||||
let n = if len - offset < buf.len() {
|
||||
len - offset
|
||||
} else {
|
||||
buf.len()
|
||||
};
|
||||
|
||||
buf[0..n].copy_from_slice(&this.read_buffer[offset..offset+n]);
|
||||
if offset + n == len {
|
||||
this.state = AsyncIndexReaderState::NoData;
|
||||
this.current_chunk_idx += 1;
|
||||
} else {
|
||||
this.state = AsyncIndexReaderState::HaveData(offset + n);
|
||||
}
|
||||
|
||||
return Poll::Ready(Ok(n));
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -59,17 +59,6 @@ impl BackupGroup {
|
||||
&self.backup_id
|
||||
}
|
||||
|
||||
pub fn parse(path: &str) -> Result<Self, Error> {
|
||||
|
||||
let cap = GROUP_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
||||
|
||||
Ok(Self {
|
||||
backup_type: cap.get(1).unwrap().as_str().to_owned(),
|
||||
backup_id: cap.get(2).unwrap().as_str().to_owned(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn group_path(&self) -> PathBuf {
|
||||
|
||||
let mut relative_path = PathBuf::new();
|
||||
@ -152,6 +141,31 @@ impl BackupGroup {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for BackupGroup {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let backup_type = self.backup_type();
|
||||
let id = self.backup_id();
|
||||
write!(f, "{}/{}", backup_type, id)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for BackupGroup {
|
||||
type Err = Error;
|
||||
|
||||
/// Parse a backup group path
|
||||
///
|
||||
/// This parses strings like `vm/100".
|
||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||
let cap = GROUP_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
||||
|
||||
Ok(Self {
|
||||
backup_type: cap.get(1).unwrap().as_str().to_owned(),
|
||||
backup_id: cap.get(2).unwrap().as_str().to_owned(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Uniquely identify a Backup (relative to data store)
|
||||
///
|
||||
/// We also call this a backup snaphost.
|
||||
@ -188,16 +202,6 @@ impl BackupDir {
|
||||
self.backup_time
|
||||
}
|
||||
|
||||
pub fn parse(path: &str) -> Result<Self, Error> {
|
||||
|
||||
let cap = SNAPSHOT_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||
|
||||
let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str());
|
||||
let backup_time = cap.get(3).unwrap().as_str().parse::<DateTime<Utc>>()?;
|
||||
Ok(BackupDir::from((group, backup_time.timestamp())))
|
||||
}
|
||||
|
||||
pub fn relative_path(&self) -> PathBuf {
|
||||
|
||||
let mut relative_path = self.group.group_path();
|
||||
@ -212,6 +216,31 @@ impl BackupDir {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for BackupDir {
|
||||
type Err = Error;
|
||||
|
||||
/// Parse a snapshot path
|
||||
///
|
||||
/// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
|
||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||
let cap = SNAPSHOT_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||
|
||||
let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str());
|
||||
let backup_time = cap.get(3).unwrap().as_str().parse::<DateTime<Utc>>()?;
|
||||
Ok(BackupDir::from((group, backup_time.timestamp())))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for BackupDir {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let backup_type = self.group.backup_type();
|
||||
let id = self.group.backup_id();
|
||||
let time = Self::backup_time_to_string(self.backup_time);
|
||||
write!(f, "{}/{}/{}", backup_type, id, time)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(BackupGroup, i64)> for BackupDir {
|
||||
fn from((group, timestamp): (BackupGroup, i64)) -> Self {
|
||||
Self { group, backup_time: Utc.timestamp(timestamp, 0) }
|
||||
|
@ -1,23 +1,21 @@
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use std::fmt;
|
||||
use std::ffi::{CStr, CString, OsStr};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::io::{Read, Write, Seek, SeekFrom};
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::{CStr, CString, OsStr};
|
||||
use std::fmt;
|
||||
use std::io::{Read, Write, Seek, SeekFrom};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::offset::{TimeZone, Local};
|
||||
|
||||
use pathpatterns::{MatchList, MatchType};
|
||||
use proxmox::tools::io::ReadExt;
|
||||
use proxmox::sys::error::io_err_other;
|
||||
|
||||
use crate::pxar::catalog::BackupCatalogWriter;
|
||||
use crate::pxar::{MatchPattern, MatchPatternSlice, MatchType};
|
||||
use crate::backup::file_formats::PROXMOX_CATALOG_FILE_MAGIC_1_0;
|
||||
use crate::tools::runtime::block_on;
|
||||
use crate::pxar::catalog::BackupCatalogWriter;
|
||||
|
||||
#[repr(u8)]
|
||||
#[derive(Copy,Clone,PartialEq)]
|
||||
enum CatalogEntryType {
|
||||
pub(crate) enum CatalogEntryType {
|
||||
Directory = b'd',
|
||||
File = b'f',
|
||||
Symlink = b'l',
|
||||
@ -46,6 +44,21 @@ impl TryFrom<u8> for CatalogEntryType {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&DirEntryAttribute> for CatalogEntryType {
|
||||
fn from(value: &DirEntryAttribute) -> Self {
|
||||
match value {
|
||||
DirEntryAttribute::Directory { .. } => CatalogEntryType::Directory,
|
||||
DirEntryAttribute::File { .. } => CatalogEntryType::File,
|
||||
DirEntryAttribute::Symlink => CatalogEntryType::Symlink,
|
||||
DirEntryAttribute::Hardlink => CatalogEntryType::Hardlink,
|
||||
DirEntryAttribute::BlockDevice => CatalogEntryType::BlockDevice,
|
||||
DirEntryAttribute::CharDevice => CatalogEntryType::CharDevice,
|
||||
DirEntryAttribute::Fifo => CatalogEntryType::Fifo,
|
||||
DirEntryAttribute::Socket => CatalogEntryType::Socket,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for CatalogEntryType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", char::from(*self as u8))
|
||||
@ -63,7 +76,7 @@ pub struct DirEntry {
|
||||
}
|
||||
|
||||
/// Used to specific additional attributes inside DirEntry
|
||||
#[derive(Clone, PartialEq)]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum DirEntryAttribute {
|
||||
Directory { start: u64 },
|
||||
File { size: u64, mtime: u64 },
|
||||
@ -106,6 +119,23 @@ impl DirEntry {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get file mode bits for this entry to be used with the `MatchList` api.
|
||||
pub fn get_file_mode(&self) -> Option<u32> {
|
||||
Some(
|
||||
match self.attr {
|
||||
DirEntryAttribute::Directory { .. } => pxar::mode::IFDIR,
|
||||
DirEntryAttribute::File { .. } => pxar::mode::IFREG,
|
||||
DirEntryAttribute::Symlink => pxar::mode::IFLNK,
|
||||
DirEntryAttribute::Hardlink => return None,
|
||||
DirEntryAttribute::BlockDevice => pxar::mode::IFBLK,
|
||||
DirEntryAttribute::CharDevice => pxar::mode::IFCHR,
|
||||
DirEntryAttribute::Fifo => pxar::mode::IFIFO,
|
||||
DirEntryAttribute::Socket => pxar::mode::IFSOCK,
|
||||
}
|
||||
as u32
|
||||
)
|
||||
}
|
||||
|
||||
/// Check if DirEntry is a directory
|
||||
pub fn is_directory(&self) -> bool {
|
||||
match self.attr {
|
||||
@ -383,32 +413,6 @@ impl <W: Write> BackupCatalogWriter for CatalogWriter<W> {
|
||||
}
|
||||
}
|
||||
|
||||
// fixme: move to somehere else?
|
||||
/// Implement Write to tokio mpsc channel Sender
|
||||
pub struct SenderWriter(tokio::sync::mpsc::Sender<Result<Vec<u8>, Error>>);
|
||||
|
||||
impl SenderWriter {
|
||||
pub fn new(sender: tokio::sync::mpsc::Sender<Result<Vec<u8>, Error>>) -> Self {
|
||||
Self(sender)
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for SenderWriter {
|
||||
fn write(&mut self, buf: &[u8]) -> Result<usize, std::io::Error> {
|
||||
block_on(async move {
|
||||
self.0
|
||||
.send(Ok(buf.to_vec()))
|
||||
.await
|
||||
.map_err(io_err_other)
|
||||
.and(Ok(buf.len()))
|
||||
})
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> Result<(), std::io::Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Read Catalog files
|
||||
pub struct CatalogReader<R> {
|
||||
reader: R,
|
||||
@ -476,7 +480,7 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
&mut self,
|
||||
parent: &DirEntry,
|
||||
filename: &[u8],
|
||||
) -> Result<DirEntry, Error> {
|
||||
) -> Result<Option<DirEntry>, Error> {
|
||||
|
||||
let start = match parent.attr {
|
||||
DirEntryAttribute::Directory { start } => start,
|
||||
@ -496,10 +500,7 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
Ok(false) // stop parsing
|
||||
})?;
|
||||
|
||||
match item {
|
||||
None => bail!("no such file"),
|
||||
Some(entry) => Ok(entry),
|
||||
}
|
||||
Ok(item)
|
||||
}
|
||||
|
||||
/// Read the raw directory info block from current reader position.
|
||||
@ -532,7 +533,10 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
self.dump_dir(&path, pos)?;
|
||||
}
|
||||
CatalogEntryType::File => {
|
||||
let dt = Local.timestamp(mtime as i64, 0);
|
||||
let dt = Local
|
||||
.timestamp_opt(mtime as i64, 0)
|
||||
.single() // chrono docs say timestamp_opt can only be None or Single!
|
||||
.unwrap_or_else(|| Local.timestamp(0, 0));
|
||||
|
||||
println!(
|
||||
"{} {:?} {} {}",
|
||||
@ -555,38 +559,30 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
/// provided callback on them.
|
||||
pub fn find(
|
||||
&mut self,
|
||||
mut entry: &mut Vec<DirEntry>,
|
||||
pattern: &[MatchPatternSlice],
|
||||
callback: &Box<fn(&[DirEntry])>,
|
||||
parent: &DirEntry,
|
||||
file_path: &mut Vec<u8>,
|
||||
match_list: &impl MatchList, //&[MatchEntry],
|
||||
callback: &mut dyn FnMut(&[u8]) -> Result<(), Error>,
|
||||
) -> Result<(), Error> {
|
||||
let parent = entry.last().unwrap();
|
||||
if !parent.is_directory() {
|
||||
return Ok(())
|
||||
}
|
||||
|
||||
let file_len = file_path.len();
|
||||
for e in self.read_dir(parent)? {
|
||||
match MatchPatternSlice::match_filename_include(
|
||||
&CString::new(e.name.clone())?,
|
||||
e.is_directory(),
|
||||
pattern,
|
||||
)? {
|
||||
(MatchType::Positive, _) => {
|
||||
entry.push(e);
|
||||
callback(&entry);
|
||||
let pattern = MatchPattern::from_line(b"**/*").unwrap().unwrap();
|
||||
let child_pattern = vec![pattern.as_slice()];
|
||||
self.find(&mut entry, &child_pattern, callback)?;
|
||||
entry.pop();
|
||||
}
|
||||
(MatchType::PartialPositive, child_pattern)
|
||||
| (MatchType::PartialNegative, child_pattern) => {
|
||||
entry.push(e);
|
||||
self.find(&mut entry, &child_pattern, callback)?;
|
||||
entry.pop();
|
||||
}
|
||||
_ => {}
|
||||
let is_dir = e.is_directory();
|
||||
file_path.truncate(file_len);
|
||||
if !e.name.starts_with(b"/") {
|
||||
file_path.reserve(e.name.len() + 1);
|
||||
file_path.push(b'/');
|
||||
}
|
||||
file_path.extend(&e.name);
|
||||
match match_list.matches(&file_path, e.get_file_mode()) {
|
||||
Some(MatchType::Exclude) => continue,
|
||||
Some(MatchType::Include) => callback(&file_path)?,
|
||||
None => (),
|
||||
}
|
||||
if is_dir {
|
||||
self.find(&e, file_path, match_list, callback)?;
|
||||
}
|
||||
}
|
||||
file_path.truncate(file_len);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -429,6 +429,10 @@ impl ChunkStore {
|
||||
full_path
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
pub fn base_path(&self) -> PathBuf {
|
||||
self.base.clone()
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ impl DataBlob {
|
||||
}
|
||||
|
||||
/// Decode blob data
|
||||
pub fn decode(self, config: Option<&CryptConfig>) -> Result<Vec<u8>, Error> {
|
||||
pub fn decode(&self, config: Option<&CryptConfig>) -> Result<Vec<u8>, Error> {
|
||||
|
||||
let magic = self.magic();
|
||||
|
||||
@ -311,7 +311,9 @@ impl DataBlob {
|
||||
/// Verify digest and data length for unencrypted chunks.
|
||||
///
|
||||
/// To do that, we need to decompress data first. Please note that
|
||||
/// this is noth possible for encrypted chunks.
|
||||
/// this is not possible for encrypted chunks. This function simply return Ok
|
||||
/// for encrypted chunks.
|
||||
/// Note: This does not call verify_crc
|
||||
pub fn verify_unencrypted(
|
||||
&self,
|
||||
expected_chunk_size: usize,
|
||||
@ -320,22 +322,18 @@ impl DataBlob {
|
||||
|
||||
let magic = self.magic();
|
||||
|
||||
let verify_raw_data = |data: &[u8]| {
|
||||
if expected_chunk_size != data.len() {
|
||||
bail!("detected chunk with wrong length ({} != {})", expected_chunk_size, data.len());
|
||||
}
|
||||
let digest = openssl::sha::sha256(data);
|
||||
if &digest != expected_digest {
|
||||
bail!("detected chunk with wrong digest.");
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if magic == &COMPRESSED_BLOB_MAGIC_1_0 {
|
||||
let data = zstd::block::decompress(&self.raw_data[12..], 16*1024*1024)?;
|
||||
verify_raw_data(&data)?;
|
||||
} else if magic == &UNCOMPRESSED_BLOB_MAGIC_1_0 {
|
||||
verify_raw_data(&self.raw_data[12..])?;
|
||||
let data = self.decode(None)?;
|
||||
|
||||
if expected_chunk_size != data.len() {
|
||||
bail!("detected chunk with wrong length ({} != {})", expected_chunk_size, data.len());
|
||||
}
|
||||
let digest = openssl::sha::sha256(&data);
|
||||
if &digest != expected_digest {
|
||||
bail!("detected chunk with wrong digest.");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -19,6 +19,10 @@ pub struct DataBlobReader<R: Read> {
|
||||
state: BlobReaderState<R>,
|
||||
}
|
||||
|
||||
// zstd_safe::DCtx is not sync but we are, since
|
||||
// the only public interface is on mutable reference
|
||||
unsafe impl<R: Read> Sync for DataBlobReader<R> {}
|
||||
|
||||
impl <R: Read> DataBlobReader<R> {
|
||||
|
||||
pub fn new(mut reader: R, config: Option<Arc<CryptConfig>>) -> Result<Self, Error> {
|
||||
|
@ -2,6 +2,7 @@ use std::collections::{HashSet, HashMap};
|
||||
use std::io::{self, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
@ -11,7 +12,7 @@ use super::backup_info::{BackupGroup, BackupDir};
|
||||
use super::chunk_store::ChunkStore;
|
||||
use super::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
|
||||
use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
|
||||
use super::manifest::{MANIFEST_BLOB_NAME, BackupManifest};
|
||||
use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
|
||||
use super::index::*;
|
||||
use super::{DataBlob, ArchiveType, archive_type};
|
||||
use crate::config::datastore;
|
||||
@ -134,6 +135,10 @@ impl DataStore {
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &str {
|
||||
self.chunk_store.name()
|
||||
}
|
||||
|
||||
pub fn base_path(&self) -> PathBuf {
|
||||
self.chunk_store.base_path()
|
||||
}
|
||||
@ -149,6 +154,7 @@ impl DataStore {
|
||||
|
||||
let mut wanted_files = HashSet::new();
|
||||
wanted_files.insert(MANIFEST_BLOB_NAME.to_string());
|
||||
wanted_files.insert(CLIENT_LOG_BLOB_NAME.to_string());
|
||||
manifest.files().iter().for_each(|item| { wanted_files.insert(item.filename.clone()); });
|
||||
|
||||
for item in tools::fs::read_subdir(libc::AT_FDCWD, &full_path)? {
|
||||
@ -469,4 +475,28 @@ impl DataStore {
|
||||
) -> Result<(bool, u64), Error> {
|
||||
self.chunk_store.insert_chunk(chunk, digest)
|
||||
}
|
||||
|
||||
pub fn verify_stored_chunk(&self, digest: &[u8; 32], expected_chunk_size: u64) -> Result<(), Error> {
|
||||
let blob = self.chunk_store.read_chunk(digest)?;
|
||||
blob.verify_crc()?;
|
||||
blob.verify_unencrypted(expected_chunk_size as usize, digest)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<(DataBlob, u64), Error> {
|
||||
let mut path = self.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(filename);
|
||||
|
||||
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
||||
let raw_size = raw_data.len() as u64;
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
Ok((blob, raw_size))
|
||||
}
|
||||
|
||||
pub fn load_manifest(&self, backup_dir: &BackupDir) -> Result<(BackupManifest, u64), Error> {
|
||||
let (blob, raw_size) = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
||||
let manifest = BackupManifest::try_from(blob)?;
|
||||
Ok((manifest, raw_size))
|
||||
}
|
||||
}
|
||||
|
@ -1,23 +1,28 @@
|
||||
use std::convert::TryInto;
|
||||
use std::fs::File;
|
||||
use std::io::{BufWriter, Seek, SeekFrom, Write};
|
||||
use std::io::{self, BufWriter, Seek, SeekFrom, Write};
|
||||
use std::ops::Range;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::task::Context;
|
||||
use std::pin::Pin;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use proxmox::tools::io::ReadExt;
|
||||
use proxmox::tools::uuid::Uuid;
|
||||
use proxmox::tools::vec;
|
||||
use proxmox::tools::mmap::Mmap;
|
||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||
|
||||
use super::chunk_stat::ChunkStat;
|
||||
use super::chunk_store::ChunkStore;
|
||||
use super::index::ChunkReadInfo;
|
||||
use super::read_chunk::ReadChunk;
|
||||
use super::Chunker;
|
||||
use super::IndexFile;
|
||||
use super::{DataBlob, DataChunkBuilder};
|
||||
use crate::tools;
|
||||
use crate::tools::{self, epoch_now_u64};
|
||||
|
||||
/// Header format definition for dynamic index files (`.dixd`)
|
||||
#[repr(C)]
|
||||
@ -36,34 +41,34 @@ proxmox::static_assert_size!(DynamicIndexHeader, 4096);
|
||||
// pub data: DynamicIndexHeaderData,
|
||||
// }
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[repr(C)]
|
||||
pub struct DynamicEntry {
|
||||
end_le: u64,
|
||||
digest: [u8; 32],
|
||||
}
|
||||
|
||||
impl DynamicEntry {
|
||||
#[inline]
|
||||
pub fn end(&self) -> u64 {
|
||||
u64::from_le(self.end_le)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DynamicIndexReader {
|
||||
_file: File,
|
||||
pub size: usize,
|
||||
index: *const u8,
|
||||
index_entries: usize,
|
||||
index: Mmap<DynamicEntry>,
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub index_csum: [u8; 32],
|
||||
}
|
||||
|
||||
// `index` is mmap()ed which cannot be thread-local so should be sendable
|
||||
// FIXME: Introduce an mmap wrapper type for this?
|
||||
unsafe impl Send for DynamicIndexReader {}
|
||||
unsafe impl Sync for DynamicIndexReader {}
|
||||
|
||||
impl Drop for DynamicIndexReader {
|
||||
fn drop(&mut self) {
|
||||
if let Err(err) = self.unmap() {
|
||||
eprintln!("Unable to unmap dynamic index - {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DynamicIndexReader {
|
||||
pub fn open(path: &Path) -> Result<Self, Error> {
|
||||
File::open(path)
|
||||
.map_err(Error::from)
|
||||
.and_then(|file| Self::new(file))
|
||||
.and_then(Self::new)
|
||||
.map_err(|err| format_err!("Unable to open dynamic index {:?} - {}", path, err))
|
||||
}
|
||||
|
||||
@ -74,6 +79,7 @@ impl DynamicIndexReader {
|
||||
bail!("unable to get shared lock - {}", err);
|
||||
}
|
||||
|
||||
// FIXME: This is NOT OUR job! Check the callers of this method and remove this!
|
||||
file.seek(SeekFrom::Start(0))?;
|
||||
|
||||
let header_size = std::mem::size_of::<DynamicIndexHeader>();
|
||||
@ -93,123 +99,49 @@ impl DynamicIndexReader {
|
||||
let size = stat.st_size as usize;
|
||||
|
||||
let index_size = size - header_size;
|
||||
if (index_size % 40) != 0 {
|
||||
let index_count = index_size / 40;
|
||||
if index_count * 40 != index_size {
|
||||
bail!("got unexpected file size");
|
||||
}
|
||||
|
||||
let data = unsafe {
|
||||
nix::sys::mman::mmap(
|
||||
std::ptr::null_mut(),
|
||||
index_size,
|
||||
let index = unsafe {
|
||||
Mmap::map_fd(
|
||||
rawfd,
|
||||
header_size as u64,
|
||||
index_count,
|
||||
nix::sys::mman::ProtFlags::PROT_READ,
|
||||
nix::sys::mman::MapFlags::MAP_PRIVATE,
|
||||
rawfd,
|
||||
header_size as i64,
|
||||
)
|
||||
}? as *const u8;
|
||||
)?
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
_file: file,
|
||||
size,
|
||||
index: data,
|
||||
index_entries: index_size / 40,
|
||||
index,
|
||||
ctime,
|
||||
uuid: header.uuid,
|
||||
index_csum: header.index_csum,
|
||||
})
|
||||
}
|
||||
|
||||
fn unmap(&mut self) -> Result<(), Error> {
|
||||
if self.index == std::ptr::null_mut() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Err(err) = unsafe {
|
||||
nix::sys::mman::munmap(self.index as *mut std::ffi::c_void, self.index_entries * 40)
|
||||
} {
|
||||
bail!("unmap dynamic index failed - {}", err);
|
||||
}
|
||||
|
||||
self.index = std::ptr::null_mut();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
pub fn chunk_info(&self, pos: usize) -> Result<(u64, u64, [u8; 32]), Error> {
|
||||
if pos >= self.index_entries {
|
||||
bail!("chunk index out of range");
|
||||
}
|
||||
let start = if pos == 0 {
|
||||
0
|
||||
} else {
|
||||
unsafe { *(self.index.add((pos - 1) * 40) as *const u64) }
|
||||
};
|
||||
|
||||
let end = unsafe { *(self.index.add(pos * 40) as *const u64) };
|
||||
|
||||
let mut digest = std::mem::MaybeUninit::<[u8; 32]>::uninit();
|
||||
unsafe {
|
||||
std::ptr::copy_nonoverlapping(
|
||||
self.index.add(pos * 40 + 8),
|
||||
(*digest.as_mut_ptr()).as_mut_ptr(),
|
||||
32,
|
||||
);
|
||||
}
|
||||
|
||||
Ok((start, end, unsafe { digest.assume_init() }))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
fn chunk_end(&self, pos: usize) -> u64 {
|
||||
if pos >= self.index_entries {
|
||||
if pos >= self.index.len() {
|
||||
panic!("chunk index out of range");
|
||||
}
|
||||
unsafe { *(self.index.add(pos * 40) as *const u64) }
|
||||
self.index[pos].end()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn chunk_digest(&self, pos: usize) -> &[u8; 32] {
|
||||
if pos >= self.index_entries {
|
||||
if pos >= self.index.len() {
|
||||
panic!("chunk index out of range");
|
||||
}
|
||||
let slice = unsafe { std::slice::from_raw_parts(self.index.add(pos * 40 + 8), 32) };
|
||||
slice.try_into().unwrap()
|
||||
&self.index[pos].digest
|
||||
}
|
||||
|
||||
/// Compute checksum and data size
|
||||
pub fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||
let mut csum = openssl::sha::Sha256::new();
|
||||
let mut chunk_end = 0;
|
||||
for pos in 0..self.index_entries {
|
||||
chunk_end = self.chunk_end(pos);
|
||||
let digest = self.chunk_digest(pos);
|
||||
csum.update(&chunk_end.to_le_bytes());
|
||||
csum.update(digest);
|
||||
}
|
||||
let csum = csum.finish();
|
||||
|
||||
(csum, chunk_end)
|
||||
}
|
||||
|
||||
/*
|
||||
pub fn dump_pxar(&self, mut writer: Box<dyn Write>) -> Result<(), Error> {
|
||||
|
||||
for pos in 0..self.index_entries {
|
||||
let _end = self.chunk_end(pos);
|
||||
let digest = self.chunk_digest(pos);
|
||||
//println!("Dump {:08x}", end );
|
||||
let chunk = self.store.read_chunk(digest)?;
|
||||
// fimxe: handle encrypted chunks
|
||||
let data = chunk.decode(None)?;
|
||||
writer.write_all(&data)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
*/
|
||||
|
||||
// TODO: can we use std::slice::binary_search with Mmap now?
|
||||
fn binary_search(
|
||||
&self,
|
||||
start_idx: usize,
|
||||
@ -238,11 +170,11 @@ impl DynamicIndexReader {
|
||||
|
||||
impl IndexFile for DynamicIndexReader {
|
||||
fn index_count(&self) -> usize {
|
||||
self.index_entries
|
||||
self.index.len()
|
||||
}
|
||||
|
||||
fn index_digest(&self, pos: usize) -> Option<&[u8; 32]> {
|
||||
if pos >= self.index_entries {
|
||||
if pos >= self.index.len() {
|
||||
None
|
||||
} else {
|
||||
Some(unsafe { std::mem::transmute(self.chunk_digest(pos).as_ptr()) })
|
||||
@ -250,12 +182,59 @@ impl IndexFile for DynamicIndexReader {
|
||||
}
|
||||
|
||||
fn index_bytes(&self) -> u64 {
|
||||
if self.index_entries == 0 {
|
||||
if self.index.is_empty() {
|
||||
0
|
||||
} else {
|
||||
self.chunk_end((self.index_entries - 1) as usize)
|
||||
self.chunk_end(self.index.len() - 1)
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||
let mut csum = openssl::sha::Sha256::new();
|
||||
let mut chunk_end = 0;
|
||||
for pos in 0..self.index_count() {
|
||||
let info = self.chunk_info(pos).unwrap();
|
||||
chunk_end = info.range.end;
|
||||
csum.update(&chunk_end.to_le_bytes());
|
||||
csum.update(&info.digest);
|
||||
}
|
||||
let csum = csum.finish();
|
||||
(csum, chunk_end)
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo> {
|
||||
if pos >= self.index.len() {
|
||||
return None;
|
||||
}
|
||||
let start = if pos == 0 { 0 } else { self.index[pos - 1].end() };
|
||||
|
||||
let end = self.index[pos].end();
|
||||
|
||||
Some(ChunkReadInfo {
|
||||
range: start..end,
|
||||
digest: self.index[pos].digest.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct CachedChunk {
|
||||
range: Range<u64>,
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
impl CachedChunk {
|
||||
/// Perform sanity checks on the range and data size:
|
||||
pub fn new(range: Range<u64>, data: Vec<u8>) -> Result<Self, Error> {
|
||||
if data.len() as u64 != range.end - range.start {
|
||||
bail!(
|
||||
"read chunk with wrong size ({} != {})",
|
||||
data.len(),
|
||||
range.end - range.start,
|
||||
);
|
||||
}
|
||||
Ok(Self { range, data })
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BufferedDynamicReader<S> {
|
||||
@ -266,7 +245,7 @@ pub struct BufferedDynamicReader<S> {
|
||||
buffered_chunk_idx: usize,
|
||||
buffered_chunk_start: u64,
|
||||
read_offset: u64,
|
||||
lru_cache: crate::tools::lru_cache::LruCache<usize, (u64, u64, Vec<u8>)>,
|
||||
lru_cache: crate::tools::lru_cache::LruCache<usize, CachedChunk>,
|
||||
}
|
||||
|
||||
struct ChunkCacher<'a, S> {
|
||||
@ -274,16 +253,21 @@ struct ChunkCacher<'a, S> {
|
||||
index: &'a DynamicIndexReader,
|
||||
}
|
||||
|
||||
impl<'a, S: ReadChunk> crate::tools::lru_cache::Cacher<usize, (u64, u64, Vec<u8>)> for ChunkCacher<'a, S> {
|
||||
fn fetch(&mut self, index: usize) -> Result<Option<(u64, u64, Vec<u8>)>, anyhow::Error> {
|
||||
let (start, end, digest) = self.index.chunk_info(index)?;
|
||||
self.store.read_chunk(&digest).and_then(|data| Ok(Some((start, end, data))))
|
||||
impl<'a, S: ReadChunk> crate::tools::lru_cache::Cacher<usize, CachedChunk> for ChunkCacher<'a, S> {
|
||||
fn fetch(&mut self, index: usize) -> Result<Option<CachedChunk>, Error> {
|
||||
let info = match self.index.chunk_info(index) {
|
||||
Some(info) => info,
|
||||
None => bail!("chunk index out of range"),
|
||||
};
|
||||
let range = info.range;
|
||||
let data = self.store.read_chunk(&info.digest)?;
|
||||
CachedChunk::new(range, data).map(Some)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||
pub fn new(index: DynamicIndexReader, store: S) -> Self {
|
||||
let archive_size = index.chunk_end(index.index_entries - 1);
|
||||
let archive_size = index.index_bytes();
|
||||
Self {
|
||||
store,
|
||||
index,
|
||||
@ -301,7 +285,8 @@ impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||
}
|
||||
|
||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
||||
let (start, end, data) = self.lru_cache.access(
|
||||
//let (start, end, data) = self.lru_cache.access(
|
||||
let cached_chunk = self.lru_cache.access(
|
||||
idx,
|
||||
&mut ChunkCacher {
|
||||
store: &mut self.store,
|
||||
@ -309,21 +294,13 @@ impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||
},
|
||||
)?.ok_or_else(|| format_err!("chunk not found by cacher"))?;
|
||||
|
||||
if (*end - *start) != data.len() as u64 {
|
||||
bail!(
|
||||
"read chunk with wrong size ({} != {}",
|
||||
(*end - *start),
|
||||
data.len()
|
||||
);
|
||||
}
|
||||
|
||||
// fixme: avoid copy
|
||||
self.read_buffer.clear();
|
||||
self.read_buffer.extend_from_slice(&data);
|
||||
self.read_buffer.extend_from_slice(&cached_chunk.data);
|
||||
|
||||
self.buffered_chunk_idx = idx;
|
||||
|
||||
self.buffered_chunk_start = *start;
|
||||
self.buffered_chunk_start = cached_chunk.range.start;
|
||||
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
||||
Ok(())
|
||||
}
|
||||
@ -340,7 +317,7 @@ impl<S: ReadChunk> crate::tools::BufferedRead for BufferedDynamicReader<S> {
|
||||
|
||||
// optimization for sequential read
|
||||
if buffer_len > 0
|
||||
&& ((self.buffered_chunk_idx + 1) < index.index_entries)
|
||||
&& ((self.buffered_chunk_idx + 1) < index.index.len())
|
||||
&& (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||
{
|
||||
let next_idx = self.buffered_chunk_idx + 1;
|
||||
@ -356,7 +333,7 @@ impl<S: ReadChunk> crate::tools::BufferedRead for BufferedDynamicReader<S> {
|
||||
|| (offset < self.buffered_chunk_start)
|
||||
|| (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||
{
|
||||
let end_idx = index.index_entries - 1;
|
||||
let end_idx = index.index.len() - 1;
|
||||
let end = index.chunk_end(end_idx);
|
||||
let idx = index.binary_search(0, 0, end_idx, end, offset)?;
|
||||
self.buffer_chunk(idx)?;
|
||||
@ -383,9 +360,7 @@ impl<S: ReadChunk> std::io::Read for BufferedDynamicReader<S> {
|
||||
data.len()
|
||||
};
|
||||
|
||||
unsafe {
|
||||
std::ptr::copy_nonoverlapping(data.as_ptr(), buf.as_mut_ptr(), n);
|
||||
}
|
||||
buf[0..n].copy_from_slice(&data[0..n]);
|
||||
|
||||
self.read_offset += n as u64;
|
||||
|
||||
@ -417,6 +392,49 @@ impl<S: ReadChunk> std::io::Seek for BufferedDynamicReader<S> {
|
||||
}
|
||||
}
|
||||
|
||||
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
||||
/// async use!
|
||||
///
|
||||
/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
|
||||
/// so that we can properly access it from multiple threads simultaneously while not issuing
|
||||
/// duplicate simultaneous reads over http.
|
||||
#[derive(Clone)]
|
||||
pub struct LocalDynamicReadAt<R: ReadChunk> {
|
||||
inner: Arc<Mutex<BufferedDynamicReader<R>>>,
|
||||
}
|
||||
|
||||
impl<R: ReadChunk> LocalDynamicReadAt<R> {
|
||||
pub fn new(inner: BufferedDynamicReader<R>) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(inner)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: ReadChunk> ReadAt for LocalDynamicReadAt<R> {
|
||||
fn start_read_at<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
_cx: &mut Context,
|
||||
buf: &'a mut [u8],
|
||||
offset: u64,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
use std::io::Read;
|
||||
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
||||
let mut reader = self.inner.lock().unwrap();
|
||||
reader.seek(SeekFrom::Start(offset))?;
|
||||
Ok(reader.read(buf)?)
|
||||
}))
|
||||
}
|
||||
|
||||
fn poll_complete<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
_op: ReadAtOperation<'a>,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
panic!("LocalDynamicReadAt::start_read_at returned Pending");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Create dynamic index files (`.dixd`)
|
||||
pub struct DynamicIndexWriter {
|
||||
store: Arc<ChunkStore>,
|
||||
@ -460,9 +478,7 @@ impl DynamicIndexWriter {
|
||||
panic!("got unexpected header size");
|
||||
}
|
||||
|
||||
let ctime = std::time::SystemTime::now()
|
||||
.duration_since(std::time::SystemTime::UNIX_EPOCH)?
|
||||
.as_secs();
|
||||
let ctime = epoch_now_u64()?;
|
||||
|
||||
let uuid = Uuid::generate();
|
||||
|
||||
|
@ -1,11 +1,10 @@
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use std::convert::TryInto;
|
||||
use std::io::{Seek, SeekFrom};
|
||||
|
||||
use super::chunk_stat::*;
|
||||
use super::chunk_store::*;
|
||||
use super::IndexFile;
|
||||
use crate::tools;
|
||||
use super::{IndexFile, ChunkReadInfo};
|
||||
use crate::tools::{self, epoch_now_u64};
|
||||
|
||||
use chrono::{Local, TimeZone};
|
||||
use std::fs::File;
|
||||
@ -147,38 +146,6 @@ impl FixedIndexReader {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn chunk_info(&self, pos: usize) -> Result<(u64, u64, [u8; 32]), Error> {
|
||||
if pos >= self.index_length {
|
||||
bail!("chunk index out of range");
|
||||
}
|
||||
let start = (pos * self.chunk_size) as u64;
|
||||
let mut end = start + self.chunk_size as u64;
|
||||
|
||||
if end > self.size {
|
||||
end = self.size;
|
||||
}
|
||||
|
||||
let mut digest = std::mem::MaybeUninit::<[u8; 32]>::uninit();
|
||||
unsafe {
|
||||
std::ptr::copy_nonoverlapping(
|
||||
self.index.add(pos * 32),
|
||||
(*digest.as_mut_ptr()).as_mut_ptr(),
|
||||
32,
|
||||
);
|
||||
}
|
||||
|
||||
Ok((start, end, unsafe { digest.assume_init() }))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn chunk_digest(&self, pos: usize) -> &[u8; 32] {
|
||||
if pos >= self.index_length {
|
||||
panic!("chunk index out of range");
|
||||
}
|
||||
let slice = unsafe { std::slice::from_raw_parts(self.index.add(pos * 32), 32) };
|
||||
slice.try_into().unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn chunk_end(&self, pos: usize) -> u64 {
|
||||
if pos >= self.index_length {
|
||||
@ -193,20 +160,6 @@ impl FixedIndexReader {
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute checksum and data size
|
||||
pub fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||
let mut csum = openssl::sha::Sha256::new();
|
||||
let mut chunk_end = 0;
|
||||
for pos in 0..self.index_length {
|
||||
chunk_end = ((pos + 1) * self.chunk_size) as u64;
|
||||
let digest = self.chunk_digest(pos);
|
||||
csum.update(digest);
|
||||
}
|
||||
let csum = csum.finish();
|
||||
|
||||
(csum, chunk_end)
|
||||
}
|
||||
|
||||
pub fn print_info(&self) {
|
||||
println!("Size: {}", self.size);
|
||||
println!("ChunkSize: {}", self.chunk_size);
|
||||
@ -234,6 +187,38 @@ impl IndexFile for FixedIndexReader {
|
||||
fn index_bytes(&self) -> u64 {
|
||||
self.size
|
||||
}
|
||||
|
||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo> {
|
||||
if pos >= self.index_length {
|
||||
return None;
|
||||
}
|
||||
|
||||
let start = (pos * self.chunk_size) as u64;
|
||||
let mut end = start + self.chunk_size as u64;
|
||||
|
||||
if end > self.size {
|
||||
end = self.size;
|
||||
}
|
||||
|
||||
let digest = self.index_digest(pos).unwrap();
|
||||
Some(ChunkReadInfo {
|
||||
range: start..end,
|
||||
digest: *digest,
|
||||
})
|
||||
}
|
||||
|
||||
fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||
let mut csum = openssl::sha::Sha256::new();
|
||||
let mut chunk_end = 0;
|
||||
for pos in 0..self.index_count() {
|
||||
let info = self.chunk_info(pos).unwrap();
|
||||
chunk_end = info.range.end;
|
||||
csum.update(&info.digest);
|
||||
}
|
||||
let csum = csum.finish();
|
||||
|
||||
(csum, chunk_end)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FixedIndexWriter {
|
||||
@ -290,9 +275,7 @@ impl FixedIndexWriter {
|
||||
panic!("got unexpected header size");
|
||||
}
|
||||
|
||||
let ctime = std::time::SystemTime::now()
|
||||
.duration_since(std::time::SystemTime::UNIX_EPOCH)?
|
||||
.as_secs();
|
||||
let ctime = epoch_now_u64()?;
|
||||
|
||||
let uuid = Uuid::generate();
|
||||
|
||||
@ -469,6 +452,18 @@ impl FixedIndexWriter {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn clone_data_from(&mut self, reader: &FixedIndexReader) -> Result<(), Error> {
|
||||
if self.index_length != reader.index_count() {
|
||||
bail!("clone_data_from failed - index sizes not equal");
|
||||
}
|
||||
|
||||
for i in 0..self.index_length {
|
||||
self.add_digest(i, reader.index_digest(i).unwrap())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BufferedFixedReader<S> {
|
||||
@ -501,18 +496,17 @@ impl<S: ReadChunk> BufferedFixedReader<S> {
|
||||
|
||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
||||
let index = &self.index;
|
||||
let (start, end, digest) = index.chunk_info(idx)?;
|
||||
let info = match index.chunk_info(idx) {
|
||||
Some(info) => info,
|
||||
None => bail!("chunk index out of range"),
|
||||
};
|
||||
|
||||
// fixme: avoid copy
|
||||
|
||||
let data = self.store.read_chunk(&digest)?;
|
||||
|
||||
if (end - start) != data.len() as u64 {
|
||||
bail!(
|
||||
"read chunk with wrong size ({} != {}",
|
||||
(end - start),
|
||||
data.len()
|
||||
);
|
||||
let data = self.store.read_chunk(&info.digest)?;
|
||||
let size = info.range.end - info.range.start;
|
||||
if size != data.len() as u64 {
|
||||
bail!("read chunk with wrong size ({} != {}", size, data.len());
|
||||
}
|
||||
|
||||
self.read_buffer.clear();
|
||||
@ -520,8 +514,7 @@ impl<S: ReadChunk> BufferedFixedReader<S> {
|
||||
|
||||
self.buffered_chunk_idx = idx;
|
||||
|
||||
self.buffered_chunk_start = start as u64;
|
||||
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
||||
self.buffered_chunk_start = info.range.start as u64;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -1,10 +1,17 @@
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::ops::Range;
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::*;
|
||||
pub struct ChunkReadInfo {
|
||||
pub range: Range<u64>,
|
||||
pub digest: [u8; 32],
|
||||
}
|
||||
|
||||
impl ChunkReadInfo {
|
||||
#[inline]
|
||||
pub fn size(&self) -> u64 {
|
||||
self.range.end - self.range.start
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait to get digest list from index files
|
||||
///
|
||||
@ -13,6 +20,10 @@ pub trait IndexFile {
|
||||
fn index_count(&self) -> usize;
|
||||
fn index_digest(&self, pos: usize) -> Option<&[u8; 32]>;
|
||||
fn index_bytes(&self) -> u64;
|
||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo>;
|
||||
|
||||
/// Compute index checksum and size
|
||||
fn compute_csum(&self) -> ([u8; 32], u64);
|
||||
|
||||
/// Returns most often used chunks
|
||||
fn find_most_used_chunks(&self, max: usize) -> HashMap<[u8; 32], usize> {
|
||||
@ -46,111 +57,3 @@ pub trait IndexFile {
|
||||
map
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode digest list from an `IndexFile` into a binary stream
|
||||
///
|
||||
/// The reader simply returns a birary stream of 32 byte digest values.
|
||||
pub struct DigestListEncoder {
|
||||
index: Box<dyn IndexFile + Send + Sync>,
|
||||
pos: usize,
|
||||
count: usize,
|
||||
}
|
||||
|
||||
impl DigestListEncoder {
|
||||
|
||||
pub fn new(index: Box<dyn IndexFile + Send + Sync>) -> Self {
|
||||
let count = index.index_count();
|
||||
Self { index, pos: 0, count }
|
||||
}
|
||||
}
|
||||
|
||||
impl std::io::Read for DigestListEncoder {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
||||
if buf.len() < 32 {
|
||||
panic!("read buffer too small");
|
||||
}
|
||||
|
||||
if self.pos < self.count {
|
||||
let mut written = 0;
|
||||
loop {
|
||||
let digest = self.index.index_digest(self.pos).unwrap();
|
||||
buf[written..(written + 32)].copy_from_slice(digest);
|
||||
self.pos += 1;
|
||||
written += 32;
|
||||
if self.pos >= self.count {
|
||||
break;
|
||||
}
|
||||
if (written + 32) >= buf.len() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(written)
|
||||
} else {
|
||||
Ok(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Decodes a Stream<Item=Bytes> into Stream<Item=<[u8;32]>
|
||||
///
|
||||
/// The reader simply returns a birary stream of 32 byte digest values.
|
||||
|
||||
pub struct DigestListDecoder<S: Unpin> {
|
||||
input: S,
|
||||
buffer: BytesMut,
|
||||
}
|
||||
|
||||
impl<S: Unpin> DigestListDecoder<S> {
|
||||
pub fn new(input: S) -> Self {
|
||||
Self { input, buffer: BytesMut::new() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Unpin> Unpin for DigestListDecoder<S> {}
|
||||
|
||||
impl<S: Unpin, E> Stream for DigestListDecoder<S>
|
||||
where
|
||||
S: Stream<Item=Result<Bytes, E>>,
|
||||
E: Into<Error>,
|
||||
{
|
||||
type Item = Result<[u8; 32], Error>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
let this = self.get_mut();
|
||||
|
||||
loop {
|
||||
if this.buffer.len() >= 32 {
|
||||
let left = this.buffer.split_to(32);
|
||||
|
||||
let mut digest = std::mem::MaybeUninit::<[u8; 32]>::uninit();
|
||||
unsafe {
|
||||
(*digest.as_mut_ptr()).copy_from_slice(&left[..]);
|
||||
return Poll::Ready(Some(Ok(digest.assume_init())));
|
||||
}
|
||||
}
|
||||
|
||||
match Pin::new(&mut this.input).poll_next(cx) {
|
||||
Poll::Pending => {
|
||||
return Poll::Pending;
|
||||
}
|
||||
Poll::Ready(Some(Err(err))) => {
|
||||
return Poll::Ready(Some(Err(err.into())));
|
||||
}
|
||||
Poll::Ready(Some(Ok(data))) => {
|
||||
this.buffer.extend_from_slice(&data);
|
||||
// continue
|
||||
}
|
||||
Poll::Ready(None) => {
|
||||
let rest = this.buffer.len();
|
||||
if rest == 0 {
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
return Poll::Ready(Some(Err(format_err!(
|
||||
"got small digest ({} != 32).",
|
||||
rest,
|
||||
))));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -7,9 +7,11 @@ use serde_json::{json, Value};
|
||||
use crate::backup::BackupDir;
|
||||
|
||||
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
||||
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
||||
|
||||
pub struct FileInfo {
|
||||
pub filename: String,
|
||||
pub encrypted: Option<bool>,
|
||||
pub size: u64,
|
||||
pub csum: [u8; 32],
|
||||
}
|
||||
@ -47,9 +49,9 @@ impl BackupManifest {
|
||||
Self { files: Vec::new(), snapshot }
|
||||
}
|
||||
|
||||
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32]) -> Result<(), Error> {
|
||||
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32], encrypted: Option<bool>) -> Result<(), Error> {
|
||||
let _archive_type = archive_type(&filename)?; // check type
|
||||
self.files.push(FileInfo { filename, size, csum });
|
||||
self.files.push(FileInfo { filename, size, csum, encrypted });
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -72,7 +74,7 @@ impl BackupManifest {
|
||||
let info = self.lookup_file_info(name)?;
|
||||
|
||||
if size != info.size {
|
||||
bail!("wrong size for file '{}' ({} != {}", name, info.size, size);
|
||||
bail!("wrong size for file '{}' ({} != {})", name, info.size, size);
|
||||
}
|
||||
|
||||
if csum != &info.csum {
|
||||
@ -89,11 +91,18 @@ impl BackupManifest {
|
||||
"backup-time": self.snapshot.backup_time().timestamp(),
|
||||
"files": self.files.iter()
|
||||
.fold(Vec::new(), |mut acc, info| {
|
||||
acc.push(json!({
|
||||
let mut value = json!({
|
||||
"filename": info.filename,
|
||||
"encrypted": info.encrypted,
|
||||
"size": info.size,
|
||||
"csum": proxmox::tools::digest_to_hex(&info.csum),
|
||||
}));
|
||||
});
|
||||
|
||||
if let Some(encrypted) = info.encrypted {
|
||||
value["encrypted"] = encrypted.into();
|
||||
}
|
||||
|
||||
acc.push(value);
|
||||
acc
|
||||
})
|
||||
})
|
||||
@ -133,7 +142,8 @@ impl TryFrom<Value> for BackupManifest {
|
||||
let csum = required_string_property(item, "csum")?;
|
||||
let csum = proxmox::tools::hex_to_digest(csum)?;
|
||||
let size = required_integer_property(item, "size")? as u64;
|
||||
manifest.add_file(filename, size, csum)?;
|
||||
let encrypted = item["encrypted"].as_bool();
|
||||
manifest.add_file(filename, size, csum, encrypted)?;
|
||||
}
|
||||
|
||||
if manifest.files().is_empty() {
|
||||
|
@ -1,38 +1,39 @@
|
||||
use anyhow::{Error};
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::datastore::*;
|
||||
use super::crypt_config::*;
|
||||
use super::data_blob::*;
|
||||
use anyhow::Error;
|
||||
|
||||
use super::crypt_config::CryptConfig;
|
||||
use super::data_blob::DataBlob;
|
||||
use super::datastore::DataStore;
|
||||
|
||||
/// The ReadChunk trait allows reading backup data chunks (local or remote)
|
||||
pub trait ReadChunk {
|
||||
/// Returns the encoded chunk data
|
||||
fn read_raw_chunk(&mut self, digest:&[u8; 32]) -> Result<DataBlob, Error>;
|
||||
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error>;
|
||||
|
||||
/// Returns the decoded chunk data
|
||||
fn read_chunk(&mut self, digest:&[u8; 32]) -> Result<Vec<u8>, Error>;
|
||||
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error>;
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LocalChunkReader {
|
||||
store: Arc<DataStore>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
}
|
||||
|
||||
impl LocalChunkReader {
|
||||
|
||||
pub fn new(store: Arc<DataStore>, crypt_config: Option<Arc<CryptConfig>>) -> Self {
|
||||
Self { store, crypt_config }
|
||||
Self {
|
||||
store,
|
||||
crypt_config,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadChunk for LocalChunkReader {
|
||||
|
||||
fn read_raw_chunk(&mut self, digest:&[u8; 32]) -> Result<DataBlob, Error> {
|
||||
|
||||
let digest_str = proxmox::tools::digest_to_hex(digest);
|
||||
println!("READ CHUNK {}", digest_str);
|
||||
|
||||
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
let (path, _) = self.store.chunk_path(digest);
|
||||
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
||||
let chunk = DataBlob::from_raw(raw_data)?;
|
||||
@ -41,13 +42,59 @@ impl ReadChunk for LocalChunkReader {
|
||||
Ok(chunk)
|
||||
}
|
||||
|
||||
fn read_chunk(&mut self, digest:&[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||
let chunk = self.read_raw_chunk(digest)?;
|
||||
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
|
||||
Ok(raw_data)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait AsyncReadChunk: Send {
|
||||
/// Returns the encoded chunk data
|
||||
fn read_raw_chunk<'a>(
|
||||
&'a self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>>;
|
||||
|
||||
/// Returns the decoded chunk data
|
||||
fn read_chunk<'a>(
|
||||
&'a self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>>;
|
||||
}
|
||||
|
||||
impl AsyncReadChunk for LocalChunkReader {
|
||||
fn read_raw_chunk<'a>(
|
||||
&'a self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
||||
Box::pin(async move{
|
||||
let (path, _) = self.store.chunk_path(digest);
|
||||
|
||||
let raw_data = tokio::fs::read(&path).await?;
|
||||
let chunk = DataBlob::from_raw(raw_data)?;
|
||||
chunk.verify_crc()?;
|
||||
|
||||
Ok(chunk)
|
||||
})
|
||||
}
|
||||
|
||||
fn read_chunk<'a>(
|
||||
&'a self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
|
||||
Box::pin(async move {
|
||||
let chunk = AsyncReadChunk::read_raw_chunk(self, digest).await?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
|
||||
Ok(raw_data)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
196
src/backup/verify.rs
Normal file
196
src/backup/verify.rs
Normal file
@ -0,0 +1,196 @@
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use super::{
|
||||
DataStore, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
||||
ENCR_COMPR_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0,
|
||||
FileInfo, ArchiveType, archive_type,
|
||||
};
|
||||
|
||||
fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||
|
||||
let (blob, raw_size) = datastore.load_blob(backup_dir, &info.filename)?;
|
||||
|
||||
let csum = openssl::sha::sha256(blob.raw_data());
|
||||
if raw_size != info.size {
|
||||
bail!("wrong size ({} != {})", info.size, raw_size);
|
||||
}
|
||||
|
||||
if csum != info.csum {
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
blob.verify_crc()?;
|
||||
|
||||
let magic = blob.magic();
|
||||
|
||||
if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
blob.decode(None)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verify_index_chunks(
|
||||
datastore: &DataStore,
|
||||
index: Box<dyn IndexFile>,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let size = info.range.end - info.range.start;
|
||||
datastore.verify_stored_chunk(&info.digest, size)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verify_fixed_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
||||
|
||||
let mut path = backup_dir.relative_path();
|
||||
path.push(&info.filename);
|
||||
|
||||
let index = datastore.open_fixed_reader(&path)?;
|
||||
|
||||
let (csum, size) = index.compute_csum();
|
||||
if size != info.size {
|
||||
bail!("wrong size ({} != {})", info.size, size);
|
||||
}
|
||||
|
||||
if csum != info.csum {
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
verify_index_chunks(datastore, Box::new(index), worker)
|
||||
}
|
||||
|
||||
fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
||||
let mut path = backup_dir.relative_path();
|
||||
path.push(&info.filename);
|
||||
|
||||
let index = datastore.open_dynamic_reader(&path)?;
|
||||
|
||||
let (csum, size) = index.compute_csum();
|
||||
if size != info.size {
|
||||
bail!("wrong size ({} != {})", info.size, size);
|
||||
}
|
||||
|
||||
if csum != info.csum {
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
verify_index_chunks(datastore, Box::new(index), worker)
|
||||
}
|
||||
|
||||
/// Verify a single backup snapshot
|
||||
///
|
||||
/// This checks all archives inside a backup snapshot.
|
||||
/// Errors are logged to the worker log.
|
||||
///
|
||||
/// Returns
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
|
||||
let manifest = match datastore.load_manifest(&backup_dir) {
|
||||
Ok((manifest, _)) => manifest,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify {}:{}", datastore.name(), backup_dir));
|
||||
|
||||
let mut error_count = 0;
|
||||
|
||||
for info in manifest.files() {
|
||||
let result = proxmox::try_block!({
|
||||
worker.log(format!(" check {}", info.filename));
|
||||
match archive_type(&info.filename)? {
|
||||
ArchiveType::FixedIndex => verify_fixed_index(&datastore, &backup_dir, info, worker),
|
||||
ArchiveType::DynamicIndex => verify_dynamic_index(&datastore, &backup_dir, info, worker),
|
||||
ArchiveType::Blob => verify_blob(&datastore, &backup_dir, info),
|
||||
}
|
||||
});
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
|
||||
if let Err(err) = result {
|
||||
worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err));
|
||||
error_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(error_count == 0)
|
||||
}
|
||||
|
||||
/// Verify all backups inside a backup group
|
||||
///
|
||||
/// Errors are logged to the worker log.
|
||||
///
|
||||
/// Returns
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
|
||||
let mut list = match group.list_backups(&datastore.base_path()) {
|
||||
Ok(list) => list,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify group {}:{}", datastore.name(), group));
|
||||
|
||||
let mut error_count = 0;
|
||||
|
||||
BackupInfo::sort_list(&mut list, false); // newest first
|
||||
for info in list {
|
||||
if !verify_backup_dir(datastore, &info.backup_dir, worker)? {
|
||||
error_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(error_count == 0)
|
||||
}
|
||||
|
||||
/// Verify all backups inside a datastore
|
||||
///
|
||||
/// Errors are logged to the worker log.
|
||||
///
|
||||
/// Returns
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
|
||||
let list = match BackupGroup::list_groups(&datastore.base_path()) {
|
||||
Ok(list) => list,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err));
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify datastore {}", datastore.name()));
|
||||
|
||||
let mut error_count = 0;
|
||||
for group in list {
|
||||
if !verify_backup_group(datastore, &group, worker)? {
|
||||
error_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(error_count == 0)
|
||||
}
|
@ -14,6 +14,8 @@ use proxmox_backup::config;
|
||||
use proxmox_backup::buildcfg;
|
||||
|
||||
fn main() {
|
||||
proxmox_backup::tools::setup_safe_path_env();
|
||||
|
||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||
eprintln!("Error: {}", err);
|
||||
std::process::exit(-1);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -32,6 +32,24 @@ async fn view_task_result(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Note: local workers should print logs to stdout, so there is no need
|
||||
// to fetch/display logs. We just wait for the worker to finish.
|
||||
pub async fn wait_for_local_worker(upid_str: &str) -> Result<(), Error> {
|
||||
|
||||
let upid: proxmox_backup::server::UPID = upid_str.parse()?;
|
||||
|
||||
let sleep_duration = core::time::Duration::new(0, 100_000_000);
|
||||
|
||||
loop {
|
||||
if proxmox_backup::server::worker_is_active_local(&upid) {
|
||||
tokio::time::delay_for(sleep_duration).await;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn connect() -> Result<HttpClient, Error> {
|
||||
|
||||
let uid = nix::unistd::Uid::current();
|
||||
@ -301,11 +319,48 @@ async fn pull_datastore(
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"store": {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Verify backups
|
||||
async fn verify(
|
||||
store: String,
|
||||
param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let mut client = connect()?;
|
||||
|
||||
let args = json!({});
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/verify", store);
|
||||
|
||||
let result = client.post(&path, Some(args)).await?;
|
||||
|
||||
view_task_result(client, result, &output_format).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
fn main() {
|
||||
|
||||
proxmox_backup::tools::setup_safe_path_env();
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("acl", acl_commands())
|
||||
.insert("datastore", datastore_commands())
|
||||
.insert("disk", disk_commands())
|
||||
.insert("dns", dns_commands())
|
||||
.insert("network", network_commands())
|
||||
.insert("user", user_commands())
|
||||
@ -321,8 +376,16 @@ fn main() {
|
||||
.completion_cb("local-store", config::datastore::complete_datastore_name)
|
||||
.completion_cb("remote", config::remote::complete_remote_name)
|
||||
.completion_cb("remote-store", complete_remote_datastore_name)
|
||||
)
|
||||
.insert(
|
||||
"verify",
|
||||
CliCommand::new(&API_METHOD_VERIFY)
|
||||
.arg_param(&["store"])
|
||||
.completion_cb("store", config::datastore::complete_datastore_name)
|
||||
);
|
||||
|
||||
|
||||
|
||||
let mut rpcenv = CliEnvironment::new();
|
||||
rpcenv.set_user(Some(String::from("root@pam")));
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
use std::sync::Arc;
|
||||
use std::ffi::OsString;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
@ -9,17 +8,18 @@ use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
|
||||
|
||||
use proxmox::try_block;
|
||||
use proxmox::api::RpcEnvironmentType;
|
||||
use proxmox::sys::linux::procfs::mountinfo::{Device, MountInfo};
|
||||
|
||||
use proxmox_backup::configdir;
|
||||
use proxmox_backup::buildcfg;
|
||||
use proxmox_backup::server;
|
||||
use proxmox_backup::tools::daemon;
|
||||
use proxmox_backup::tools::{daemon, epoch_now, epoch_now_u64};
|
||||
use proxmox_backup::server::{ApiConfig, rest::*};
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
use proxmox_backup::tools::disks::{ DiskManage, zfs::zfs_pool_stats };
|
||||
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
|
||||
|
||||
fn main() {
|
||||
proxmox_backup::tools::setup_safe_path_env();
|
||||
|
||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||
eprintln!("Error: {}", err);
|
||||
std::process::exit(-1);
|
||||
@ -136,10 +136,10 @@ fn start_task_scheduler() {
|
||||
tokio::spawn(task.map(|_| ()));
|
||||
}
|
||||
|
||||
use std::time:: {Instant, Duration, SystemTime, UNIX_EPOCH};
|
||||
use std::time:: {Instant, Duration};
|
||||
|
||||
fn next_minute() -> Result<Instant, Error> {
|
||||
let epoch_now = SystemTime::now().duration_since(UNIX_EPOCH)?;
|
||||
let epoch_now = epoch_now()?;
|
||||
let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
|
||||
Ok(Instant::now() + epoch_next - epoch_now)
|
||||
}
|
||||
@ -298,8 +298,9 @@ async fn schedule_datastore_garbage_collection() {
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
|
||||
Ok(epoch_now) => epoch_now.as_secs() as i64,
|
||||
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
@ -385,12 +386,15 @@ async fn schedule_datastore_prune() {
|
||||
}
|
||||
};
|
||||
|
||||
//fixme: if last_prune_job_stzill_running { continue; }
|
||||
|
||||
let worker_type = "prune";
|
||||
|
||||
let last = match lookup_last_worker(worker_type, &store) {
|
||||
Ok(Some(upid)) => upid.starttime,
|
||||
Ok(Some(upid)) => {
|
||||
if proxmox_backup::server::worker_is_active_local(&upid) {
|
||||
continue;
|
||||
}
|
||||
upid.starttime
|
||||
}
|
||||
Ok(None) => 0,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_last_job_start failed: {}", err);
|
||||
@ -406,8 +410,8 @@ async fn schedule_datastore_prune() {
|
||||
}
|
||||
};
|
||||
|
||||
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
|
||||
Ok(epoch_now) => epoch_now.as_secs() as i64,
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
@ -507,12 +511,15 @@ async fn schedule_datastore_sync_jobs() {
|
||||
}
|
||||
};
|
||||
|
||||
//fixme: if last_sync_job_still_running { continue; }
|
||||
let worker_type = "syncjob";
|
||||
|
||||
let worker_type = "sync";
|
||||
|
||||
let last = match lookup_last_worker(worker_type, &job_config.store) {
|
||||
Ok(Some(upid)) => upid.starttime,
|
||||
let last = match lookup_last_worker(worker_type, &job_id) {
|
||||
Ok(Some(upid)) => {
|
||||
if proxmox_backup::server::worker_is_active_local(&upid) {
|
||||
continue;
|
||||
}
|
||||
upid.starttime
|
||||
},
|
||||
Ok(None) => 0,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_last_job_start failed: {}", err);
|
||||
@ -528,8 +535,8 @@ async fn schedule_datastore_sync_jobs() {
|
||||
}
|
||||
};
|
||||
|
||||
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
|
||||
Ok(epoch_now) => epoch_now.as_secs() as i64,
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
@ -594,31 +601,36 @@ async fn schedule_datastore_sync_jobs() {
|
||||
|
||||
async fn run_stat_generator() {
|
||||
|
||||
let mut count = 0;
|
||||
loop {
|
||||
count += 1;
|
||||
let save = if count >= 6 { count = 0; true } else { false };
|
||||
|
||||
let delay_target = Instant::now() + Duration::from_secs(10);
|
||||
|
||||
generate_host_stats().await;
|
||||
generate_host_stats(save).await;
|
||||
|
||||
tokio::time::delay_until(tokio::time::Instant::from_std(delay_target)).await;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
fn rrd_update_gauge(name: &str, value: f64) {
|
||||
fn rrd_update_gauge(name: &str, value: f64, save: bool) {
|
||||
use proxmox_backup::rrd;
|
||||
if let Err(err) = rrd::update_value(name, value, rrd::DST::Gauge) {
|
||||
if let Err(err) = rrd::update_value(name, value, rrd::DST::Gauge, save) {
|
||||
eprintln!("rrd::update_value '{}' failed - {}", name, err);
|
||||
}
|
||||
}
|
||||
|
||||
fn rrd_update_derive(name: &str, value: f64) {
|
||||
fn rrd_update_derive(name: &str, value: f64, save: bool) {
|
||||
use proxmox_backup::rrd;
|
||||
if let Err(err) = rrd::update_value(name, value, rrd::DST::Derive) {
|
||||
if let Err(err) = rrd::update_value(name, value, rrd::DST::Derive, save) {
|
||||
eprintln!("rrd::update_value '{}' failed - {}", name, err);
|
||||
}
|
||||
}
|
||||
|
||||
async fn generate_host_stats() {
|
||||
async fn generate_host_stats(save: bool) {
|
||||
use proxmox::sys::linux::procfs::{
|
||||
read_meminfo, read_proc_stat, read_proc_net_dev, read_loadavg};
|
||||
use proxmox_backup::config::datastore;
|
||||
@ -628,8 +640,8 @@ async fn generate_host_stats() {
|
||||
|
||||
match read_proc_stat() {
|
||||
Ok(stat) => {
|
||||
rrd_update_gauge("host/cpu", stat.cpu);
|
||||
rrd_update_gauge("host/iowait", stat.iowait_percent);
|
||||
rrd_update_gauge("host/cpu", stat.cpu, save);
|
||||
rrd_update_gauge("host/iowait", stat.iowait_percent, save);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read_proc_stat failed - {}", err);
|
||||
@ -638,10 +650,10 @@ async fn generate_host_stats() {
|
||||
|
||||
match read_meminfo() {
|
||||
Ok(meminfo) => {
|
||||
rrd_update_gauge("host/memtotal", meminfo.memtotal as f64);
|
||||
rrd_update_gauge("host/memused", meminfo.memused as f64);
|
||||
rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64);
|
||||
rrd_update_gauge("host/swapused", meminfo.swapused as f64);
|
||||
rrd_update_gauge("host/memtotal", meminfo.memtotal as f64, save);
|
||||
rrd_update_gauge("host/memused", meminfo.memused as f64, save);
|
||||
rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64, save);
|
||||
rrd_update_gauge("host/swapused", meminfo.swapused as f64, save);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read_meminfo failed - {}", err);
|
||||
@ -658,8 +670,8 @@ async fn generate_host_stats() {
|
||||
netin += item.receive;
|
||||
netout += item.send;
|
||||
}
|
||||
rrd_update_derive("host/netin", netin as f64);
|
||||
rrd_update_derive("host/netout", netout as f64);
|
||||
rrd_update_derive("host/netin", netin as f64, save);
|
||||
rrd_update_derive("host/netout", netout as f64, save);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read_prox_net_dev failed - {}", err);
|
||||
@ -668,7 +680,7 @@ async fn generate_host_stats() {
|
||||
|
||||
match read_loadavg() {
|
||||
Ok(loadavg) => {
|
||||
rrd_update_gauge("host/loadavg", loadavg.0 as f64);
|
||||
rrd_update_gauge("host/loadavg", loadavg.0 as f64, save);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read_loadavg failed - {}", err);
|
||||
@ -677,7 +689,7 @@ async fn generate_host_stats() {
|
||||
|
||||
let disk_manager = DiskManage::new();
|
||||
|
||||
gather_disk_stats(disk_manager.clone(), Path::new("/"), "host");
|
||||
gather_disk_stats(disk_manager.clone(), Path::new("/"), "host", save);
|
||||
|
||||
match datastore::config() {
|
||||
Ok((config, _)) => {
|
||||
@ -688,7 +700,7 @@ async fn generate_host_stats() {
|
||||
|
||||
let rrd_prefix = format!("datastore/{}", config.name);
|
||||
let path = std::path::Path::new(&config.path);
|
||||
gather_disk_stats(disk_manager.clone(), path, &rrd_prefix);
|
||||
gather_disk_stats(disk_manager.clone(), path, &rrd_prefix, save);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
@ -699,100 +711,59 @@ async fn generate_host_stats() {
|
||||
});
|
||||
}
|
||||
|
||||
fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) {
|
||||
|
||||
fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str) {
|
||||
|
||||
match disk_usage(path) {
|
||||
Ok((total, used, _avail)) => {
|
||||
match proxmox_backup::tools::disks::disk_usage(path) {
|
||||
Ok(status) => {
|
||||
let rrd_key = format!("{}/total", rrd_prefix);
|
||||
rrd_update_gauge(&rrd_key, total as f64);
|
||||
rrd_update_gauge(&rrd_key, status.total as f64, save);
|
||||
let rrd_key = format!("{}/used", rrd_prefix);
|
||||
rrd_update_gauge(&rrd_key, used as f64);
|
||||
rrd_update_gauge(&rrd_key, status.used as f64, save);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read disk_usage on {:?} failed - {}", path, err);
|
||||
}
|
||||
}
|
||||
|
||||
match disk_manager.mount_info() {
|
||||
Ok(mountinfo) => {
|
||||
if let Some((fs_type, device, source)) = find_mounted_device(mountinfo, path) {
|
||||
let mut device_stat = None;
|
||||
match fs_type.as_str() {
|
||||
"zfs" => {
|
||||
if let Some(pool) = source {
|
||||
match zfs_pool_stats(&pool) {
|
||||
Ok(stat) => device_stat = stat,
|
||||
Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err),
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) {
|
||||
match disk.read_stat() {
|
||||
Ok(stat) => device_stat = stat,
|
||||
Err(err) => eprintln!("disk.read_stat {:?} failed - {}", path, err),
|
||||
}
|
||||
match disk_manager.find_mounted_device(path) {
|
||||
Ok(None) => {},
|
||||
Ok(Some((fs_type, device, source))) => {
|
||||
let mut device_stat = None;
|
||||
match fs_type.as_str() {
|
||||
"zfs" => {
|
||||
if let Some(pool) = source {
|
||||
match zfs_pool_stats(&pool) {
|
||||
Ok(stat) => device_stat = stat,
|
||||
Err(err) => eprintln!("zfs_pool_stats({:?}) failed - {}", pool, err),
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(stat) = device_stat {
|
||||
let rrd_key = format!("{}/read_ios", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, stat.read_ios as f64);
|
||||
let rrd_key = format!("{}/read_bytes", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64);
|
||||
let rrd_key = format!("{}/read_ticks", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, (stat.read_ticks as f64)/1000.0);
|
||||
_ => {
|
||||
if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) {
|
||||
match disk.read_stat() {
|
||||
Ok(stat) => device_stat = stat,
|
||||
Err(err) => eprintln!("disk.read_stat {:?} failed - {}", path, err),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(stat) = device_stat {
|
||||
let rrd_key = format!("{}/read_ios", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, stat.read_ios as f64, save);
|
||||
let rrd_key = format!("{}/read_bytes", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, (stat.read_sectors*512) as f64, save);
|
||||
|
||||
let rrd_key = format!("{}/write_ios", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, stat.write_ios as f64);
|
||||
let rrd_key = format!("{}/write_bytes", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64);
|
||||
let rrd_key = format!("{}/write_ticks", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, (stat.write_ticks as f64)/1000.0);
|
||||
}
|
||||
let rrd_key = format!("{}/write_ios", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, stat.write_ios as f64, save);
|
||||
let rrd_key = format!("{}/write_bytes", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, (stat.write_sectors*512) as f64, save);
|
||||
|
||||
let rrd_key = format!("{}/io_ticks", rrd_prefix);
|
||||
rrd_update_derive(&rrd_key, (stat.io_ticks as f64)/1000.0, save);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("disk_manager mount_info() failed - {}", err);
|
||||
eprintln!("find_mounted_device failed - {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns (total, used, avail)
|
||||
fn disk_usage(path: &std::path::Path) -> Result<(u64, u64, u64), Error> {
|
||||
|
||||
let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
|
||||
|
||||
use nix::NixPath;
|
||||
|
||||
let res = path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
|
||||
nix::errno::Errno::result(res)?;
|
||||
|
||||
let bsize = stat.f_bsize as u64;
|
||||
|
||||
Ok((stat.f_blocks*bsize, (stat.f_blocks-stat.f_bfree)*bsize, stat.f_bavail*bsize))
|
||||
}
|
||||
|
||||
// Returns (fs_type, device, mount_source)
|
||||
pub fn find_mounted_device(
|
||||
mountinfo: &MountInfo,
|
||||
path: &std::path::Path,
|
||||
) -> Option<(String, Device, Option<OsString>)> {
|
||||
|
||||
let mut result = None;
|
||||
let mut match_len = 0;
|
||||
|
||||
let root_path = std::path::Path::new("/");
|
||||
for (_id, entry) in mountinfo {
|
||||
if entry.root == root_path && path.starts_with(&entry.mount_point) {
|
||||
let len = entry.mount_point.as_path().as_os_str().len();
|
||||
if len > match_len {
|
||||
match_len = len;
|
||||
result = Some((entry.fs_type.clone(), entry.device, entry.mount_source.clone()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
82
src/bin/proxmox_backup_client/benchmark.rs
Normal file
82
src/bin/proxmox_backup_client/benchmark.rs
Normal file
@ -0,0 +1,82 @@
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{Error};
|
||||
use serde_json::Value;
|
||||
use chrono::{TimeZone, Utc};
|
||||
|
||||
use proxmox::api::{ApiMethod, RpcEnvironment};
|
||||
use proxmox::api::api;
|
||||
|
||||
use proxmox_backup::backup::{
|
||||
load_and_decrypt_key,
|
||||
CryptConfig,
|
||||
|
||||
};
|
||||
|
||||
use proxmox_backup::client::*;
|
||||
|
||||
use crate::{
|
||||
KEYFILE_SCHEMA, REPO_URL_SCHEMA,
|
||||
extract_repository_from_value,
|
||||
get_encryption_key_password,
|
||||
record_repository,
|
||||
connect,
|
||||
};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
keyfile: {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Run benchmark tests
|
||||
pub async fn benchmark(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||
|
||||
let crypt_config = match keyfile {
|
||||
None => None,
|
||||
Some(path) => {
|
||||
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||
let crypt_config = CryptConfig::new(key)?;
|
||||
Some(Arc::new(crypt_config))
|
||||
}
|
||||
};
|
||||
|
||||
let backup_time = Utc.timestamp(Utc::now().timestamp(), 0);
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
record_repository(&repo);
|
||||
|
||||
let client = BackupWriter::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
"host",
|
||||
"benshmark",
|
||||
backup_time,
|
||||
false,
|
||||
).await?;
|
||||
|
||||
println!("Start upload speed test");
|
||||
let speed = client.upload_speedtest().await?;
|
||||
|
||||
println!("Upload speed: {} MiB/s", speed);
|
||||
|
||||
Ok(())
|
||||
}
|
6
src/bin/proxmox_backup_client/mod.rs
Normal file
6
src/bin/proxmox_backup_client/mod.rs
Normal file
@ -0,0 +1,6 @@
|
||||
mod benchmark;
|
||||
pub use benchmark::*;
|
||||
mod mount;
|
||||
pub use mount::*;
|
||||
mod task;
|
||||
pub use task::*;
|
196
src/bin/proxmox_backup_client/mount.rs
Normal file
196
src/bin/proxmox_backup_client/mount.rs
Normal file
@ -0,0 +1,196 @@
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::Path;
|
||||
use std::ffi::OsStr;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::Value;
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
use nix::unistd::{fork, ForkResult, pipe};
|
||||
use futures::select;
|
||||
use futures::future::FutureExt;
|
||||
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment, schema::*, cli::*};
|
||||
|
||||
|
||||
use proxmox_backup::tools;
|
||||
use proxmox_backup::backup::{
|
||||
load_and_decrypt_key,
|
||||
CryptConfig,
|
||||
IndexFile,
|
||||
BackupDir,
|
||||
BackupGroup,
|
||||
BufferedDynamicReader,
|
||||
};
|
||||
|
||||
use proxmox_backup::client::*;
|
||||
|
||||
use crate::{
|
||||
REPO_URL_SCHEMA,
|
||||
extract_repository_from_value,
|
||||
get_encryption_key_password,
|
||||
complete_pxar_archive_name,
|
||||
complete_group_or_snapshot,
|
||||
complete_repository,
|
||||
record_repository,
|
||||
connect,
|
||||
api_datastore_latest_snapshot,
|
||||
BufferedDynamicReadAt,
|
||||
};
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&mount),
|
||||
&ObjectSchema::new(
|
||||
"Mount pxar archive.",
|
||||
&sorted!([
|
||||
("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
|
||||
("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
|
||||
("target", false, &StringSchema::new("Target directory path.").schema()),
|
||||
("repository", true, &REPO_URL_SCHEMA),
|
||||
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
|
||||
("verbose", true, &BooleanSchema::new("Verbose output.").default(false).schema()),
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
pub fn mount_cmd_def() -> CliCommand {
|
||||
|
||||
CliCommand::new(&API_METHOD_MOUNT)
|
||||
.arg_param(&["snapshot", "archive-name", "target"])
|
||||
.completion_cb("repository", complete_repository)
|
||||
.completion_cb("snapshot", complete_group_or_snapshot)
|
||||
.completion_cb("archive-name", complete_pxar_archive_name)
|
||||
.completion_cb("target", tools::complete_file_name)
|
||||
}
|
||||
|
||||
fn mount(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||
if verbose {
|
||||
// This will stay in foreground with debug output enabled as None is
|
||||
// passed for the RawFd.
|
||||
return proxmox_backup::tools::runtime::main(mount_do(param, None));
|
||||
}
|
||||
|
||||
// Process should be deamonized.
|
||||
// Make sure to fork before the async runtime is instantiated to avoid troubles.
|
||||
let pipe = pipe()?;
|
||||
match fork() {
|
||||
Ok(ForkResult::Parent { .. }) => {
|
||||
nix::unistd::close(pipe.1).unwrap();
|
||||
// Blocks the parent process until we are ready to go in the child
|
||||
let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
|
||||
Ok(Value::Null)
|
||||
}
|
||||
Ok(ForkResult::Child) => {
|
||||
nix::unistd::close(pipe.0).unwrap();
|
||||
nix::unistd::setsid().unwrap();
|
||||
proxmox_backup::tools::runtime::main(mount_do(param, Some(pipe.1)))
|
||||
}
|
||||
Err(_) => bail!("failed to daemonize process"),
|
||||
}
|
||||
}
|
||||
|
||||
async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||
let target = tools::required_string_param(¶m, "target")?;
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||
let group: BackupGroup = path.parse()?;
|
||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||
} else {
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||
};
|
||||
|
||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||
let crypt_config = match keyfile {
|
||||
None => None,
|
||||
Some(path) => {
|
||||
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||
Some(Arc::new(CryptConfig::new(key)?))
|
||||
}
|
||||
};
|
||||
|
||||
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||
format!("{}.didx", archive_name)
|
||||
} else {
|
||||
bail!("Can only mount pxar archives.");
|
||||
};
|
||||
|
||||
let client = BackupReader::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
&backup_type,
|
||||
&backup_id,
|
||||
backup_time,
|
||||
true,
|
||||
).await?;
|
||||
|
||||
let manifest = client.download_manifest().await?;
|
||||
|
||||
if server_archive_name.ends_with(".didx") {
|
||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let archive_size = reader.archive_size();
|
||||
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||
let options = OsStr::new("ro,default_permissions");
|
||||
|
||||
let session = proxmox_backup::pxar::fuse::Session::mount(
|
||||
decoder,
|
||||
&options,
|
||||
false,
|
||||
Path::new(target),
|
||||
)
|
||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||
|
||||
if let Some(pipe) = pipe {
|
||||
nix::unistd::chdir(Path::new("/")).unwrap();
|
||||
// Finish creation of daemon by redirecting filedescriptors.
|
||||
let nullfd = nix::fcntl::open(
|
||||
"/dev/null",
|
||||
nix::fcntl::OFlag::O_RDWR,
|
||||
nix::sys::stat::Mode::empty(),
|
||||
).unwrap();
|
||||
nix::unistd::dup2(nullfd, 0).unwrap();
|
||||
nix::unistd::dup2(nullfd, 1).unwrap();
|
||||
nix::unistd::dup2(nullfd, 2).unwrap();
|
||||
if nullfd > 2 {
|
||||
nix::unistd::close(nullfd).unwrap();
|
||||
}
|
||||
// Signal the parent process that we are done with the setup and it can
|
||||
// terminate.
|
||||
nix::unistd::write(pipe, &[0u8])?;
|
||||
nix::unistd::close(pipe).unwrap();
|
||||
}
|
||||
|
||||
let mut interrupt = signal(SignalKind::interrupt())?;
|
||||
select! {
|
||||
res = session.fuse() => res?,
|
||||
_ = interrupt.recv().fuse() => {
|
||||
// exit on interrupted
|
||||
}
|
||||
}
|
||||
} else {
|
||||
bail!("unknown archive file extension (expected .pxar)");
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
148
src/bin/proxmox_backup_client/task.rs
Normal file
148
src/bin/proxmox_backup_client/task.rs
Normal file
@ -0,0 +1,148 @@
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, cli::*};
|
||||
|
||||
use proxmox_backup::tools;
|
||||
|
||||
use proxmox_backup::client::*;
|
||||
use proxmox_backup::api2::types::UPID_SCHEMA;
|
||||
|
||||
use crate::{
|
||||
REPO_URL_SCHEMA,
|
||||
extract_repository_from_value,
|
||||
complete_repository,
|
||||
connect,
|
||||
};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
limit: {
|
||||
description: "The maximal number of tasks to list.",
|
||||
type: Integer,
|
||||
optional: true,
|
||||
minimum: 1,
|
||||
maximum: 1000,
|
||||
default: 50,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
all: {
|
||||
type: Boolean,
|
||||
description: "Also list stopped tasks.",
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// List running server tasks for this repo user
|
||||
async fn task_list(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
|
||||
let limit = param["limit"].as_u64().unwrap_or(50) as usize;
|
||||
let running = !param["all"].as_bool().unwrap_or(false);
|
||||
|
||||
let args = json!({
|
||||
"running": running,
|
||||
"start": 0,
|
||||
"limit": limit,
|
||||
"userfilter": repo.user(),
|
||||
"store": repo.store(),
|
||||
});
|
||||
|
||||
let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
|
||||
let mut data = result["data"].take();
|
||||
|
||||
let schema = &proxmox_backup::api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
|
||||
.column(ColumnConfig::new("endtime").right_align(false).renderer(tools::format::render_epoch))
|
||||
.column(ColumnConfig::new("upid"))
|
||||
.column(ColumnConfig::new("status").renderer(tools::format::render_task_status));
|
||||
|
||||
format_and_print_result_full(&mut data, schema, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
upid: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Display the task log.
|
||||
async fn task_log(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let upid = tools::required_string_param(¶m, "upid")?;
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
|
||||
display_task_log(client, upid, true).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
upid: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Try to stop a specific task.
|
||||
async fn task_stop(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let upid_str = tools::required_string_param(¶m, "upid")?;
|
||||
|
||||
let mut client = connect(repo.host(), repo.user())?;
|
||||
|
||||
let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
|
||||
let _ = client.delete(&path, None).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn task_mgmt_cli() -> CliCommandMap {
|
||||
|
||||
let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
|
||||
.completion_cb("repository", complete_repository);
|
||||
|
||||
let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
|
||||
.arg_param(&["upid"]);
|
||||
|
||||
let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
|
||||
.arg_param(&["upid"]);
|
||||
|
||||
CliCommandMap::new()
|
||||
.insert("log", task_log_cmd_def)
|
||||
.insert("list", task_list_cmd_def)
|
||||
.insert("stop", task_stop_cmd_def)
|
||||
}
|
@ -17,7 +17,7 @@ fn x509name_to_string(name: &openssl::x509::X509NameRef) -> Result<String, Error
|
||||
}
|
||||
|
||||
#[api]
|
||||
/// Diplay node certificate information.
|
||||
/// Display node certificate information.
|
||||
fn cert_info() -> Result<(), Error> {
|
||||
|
||||
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
|
||||
|
@ -86,7 +86,7 @@ pub fn datastore_commands() -> CommandLineInterface {
|
||||
.completion_cb("name", config::datastore::complete_datastore_name)
|
||||
.completion_cb("gc-schedule", config::datastore::complete_calendar_event)
|
||||
.completion_cb("prune-schedule", config::datastore::complete_calendar_event)
|
||||
)
|
||||
)
|
||||
.insert("remove",
|
||||
CliCommand::new(&api2::config::datastore::API_METHOD_DELETE_DATASTORE)
|
||||
.arg_param(&["name"])
|
||||
|
353
src/bin/proxmox_backup_manager/disk.rs
Normal file
353
src/bin/proxmox_backup_manager/disk.rs
Normal file
@ -0,0 +1,353 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
|
||||
use proxmox_backup::tools::disks::{
|
||||
FileSystemType,
|
||||
SmartAttribute,
|
||||
complete_disk_name,
|
||||
};
|
||||
|
||||
use proxmox_backup::api2::node::disks::{
|
||||
zfs::DISK_LIST_SCHEMA,
|
||||
zfs::ZFS_ASHIFT_SCHEMA,
|
||||
zfs::ZfsRaidLevel,
|
||||
zfs::ZfsCompressionType,
|
||||
};
|
||||
|
||||
use proxmox_backup::api2::{self, types::* };
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Local disk list.
|
||||
fn list_disks(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::API_METHOD_LIST_DISKS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let render_wearout = |value: &Value, _record: &Value| -> Result<String, Error> {
|
||||
match value.as_f64() {
|
||||
Some(value) => Ok(format!("{:.2} %", if value <= 100.0 { 100.0 - value } else { 0.0 })),
|
||||
None => Ok(String::from("-")),
|
||||
}
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("name"))
|
||||
.column(ColumnConfig::new("used"))
|
||||
.column(ColumnConfig::new("gpt"))
|
||||
.column(ColumnConfig::new("disk-type"))
|
||||
.column(ColumnConfig::new("size"))
|
||||
.column(ColumnConfig::new("model"))
|
||||
.column(ColumnConfig::new("wearout").renderer(render_wearout))
|
||||
.column(ColumnConfig::new("status"))
|
||||
;
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
},
|
||||
returns: {
|
||||
description: "SMART attributes.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: SmartAttribute,
|
||||
},
|
||||
}
|
||||
)]
|
||||
/// Show SMART attributes.
|
||||
fn smart_attributes(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::API_METHOD_SMART_STATUS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let mut data = data["attributes"].take();
|
||||
|
||||
let options = default_table_format_options();
|
||||
format_and_print_result_full(&mut data, API_METHOD_SMART_ATTRIBUTES.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
uuid: {
|
||||
description: "UUID for the GPT table.",
|
||||
type: String,
|
||||
optional: true,
|
||||
max_length: 36,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Initialize empty Disk with GPT
|
||||
async fn initialize_disk(
|
||||
mut param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::API_METHOD_INITIALIZE_DISK;
|
||||
let result = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
crate::wait_for_local_worker(result.as_str().unwrap()).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
devices: {
|
||||
schema: DISK_LIST_SCHEMA,
|
||||
},
|
||||
raidlevel: {
|
||||
type: ZfsRaidLevel,
|
||||
},
|
||||
ashift: {
|
||||
schema: ZFS_ASHIFT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
compression: {
|
||||
type: ZfsCompressionType,
|
||||
optional: true,
|
||||
},
|
||||
"add-datastore": {
|
||||
description: "Configure a datastore using the zpool.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// create a zfs pool
|
||||
async fn create_zpool(
|
||||
mut param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::zfs::API_METHOD_CREATE_ZPOOL;
|
||||
let result = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
crate::wait_for_local_worker(result.as_str().unwrap()).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Local zfs pools.
|
||||
fn list_zpools(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::zfs::API_METHOD_LIST_ZPOOLS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let render_usage = |value: &Value, record: &Value| -> Result<String, Error> {
|
||||
let value = value.as_u64().unwrap_or(0);
|
||||
let size = match record["size"].as_u64() {
|
||||
Some(size) => size,
|
||||
None => bail!("missing size property"),
|
||||
};
|
||||
if size == 0 {
|
||||
bail!("got zero size");
|
||||
}
|
||||
Ok(format!("{:.2} %", (value as f64)/(size as f64)))
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("name"))
|
||||
.column(ColumnConfig::new("size"))
|
||||
.column(ColumnConfig::new("alloc").right_align(true).renderer(render_usage))
|
||||
.column(ColumnConfig::new("health"));
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn zpool_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&API_METHOD_LIST_ZPOOLS))
|
||||
.insert("create",
|
||||
CliCommand::new(&API_METHOD_CREATE_ZPOOL)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("devices", complete_disk_name) // fixme: comlete the list
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// List systemd datastore mount units.
|
||||
fn list_datastore_mounts(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::directory::API_METHOD_LIST_DATASTORE_MOUNTS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("path"))
|
||||
.column(ColumnConfig::new("device"))
|
||||
.column(ColumnConfig::new("filesystem"))
|
||||
.column(ColumnConfig::new("options"));
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
"add-datastore": {
|
||||
description: "Configure a datastore using the directory.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
filesystem: {
|
||||
type: FileSystemType,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Create a Filesystem on an unused disk. Will be mounted under '/mnt/datastore/<name>'.
|
||||
async fn create_datastore_disk(
|
||||
mut param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::directory::API_METHOD_CREATE_DATASTORE_DISK;
|
||||
let result = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
crate::wait_for_local_worker(result.as_str().unwrap()).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn filesystem_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&API_METHOD_LIST_DATASTORE_MOUNTS))
|
||||
.insert("create",
|
||||
CliCommand::new(&API_METHOD_CREATE_DATASTORE_DISK)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("disk", complete_disk_name)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
||||
|
||||
pub fn disk_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&API_METHOD_LIST_DISKS))
|
||||
.insert("smart-attributes",
|
||||
CliCommand::new(&API_METHOD_SMART_ATTRIBUTES)
|
||||
.arg_param(&["disk"])
|
||||
.completion_cb("disk", complete_disk_name)
|
||||
)
|
||||
.insert("fs", filesystem_commands())
|
||||
.insert("zpool", zpool_commands())
|
||||
.insert("initialize",
|
||||
CliCommand::new(&API_METHOD_INITIALIZE_DISK)
|
||||
.arg_param(&["disk"])
|
||||
.completion_cb("disk", complete_disk_name)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
@ -14,3 +14,5 @@ mod sync;
|
||||
pub use sync::*;
|
||||
mod user;
|
||||
pub use user::*;
|
||||
mod disk;
|
||||
pub use disk::*;
|
||||
|
800
src/bin/pxar.rs
800
src/bin/pxar.rs
@ -1,191 +1,305 @@
|
||||
extern crate proxmox_backup;
|
||||
use std::collections::HashSet;
|
||||
use std::ffi::OsStr;
|
||||
use std::fs::OpenOptions;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::future::FutureExt;
|
||||
use futures::select;
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
|
||||
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
|
||||
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::api::cli::*;
|
||||
use proxmox::api::api;
|
||||
|
||||
use proxmox_backup::tools;
|
||||
|
||||
use serde_json::{Value};
|
||||
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::fs::OpenOptions;
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use proxmox_backup::pxar;
|
||||
|
||||
fn dump_archive_from_reader<R: std::io::Read>(
|
||||
reader: &mut R,
|
||||
feature_flags: u64,
|
||||
verbose: bool,
|
||||
) -> Result<(), Error> {
|
||||
let mut decoder = pxar::SequentialDecoder::new(reader, feature_flags);
|
||||
|
||||
let stdout = std::io::stdout();
|
||||
let mut out = stdout.lock();
|
||||
|
||||
let mut path = PathBuf::new();
|
||||
decoder.dump_entry(&mut path, verbose, &mut out)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn dump_archive(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let archive = tools::required_string_param(¶m, "archive")?;
|
||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||
|
||||
let feature_flags = pxar::flags::DEFAULT;
|
||||
|
||||
if archive == "-" {
|
||||
let stdin = std::io::stdin();
|
||||
let mut reader = stdin.lock();
|
||||
dump_archive_from_reader(&mut reader, feature_flags, verbose)?;
|
||||
} else {
|
||||
if verbose { println!("PXAR dump: {}", archive); }
|
||||
let file = std::fs::File::open(archive)?;
|
||||
let mut reader = std::io::BufReader::new(file);
|
||||
dump_archive_from_reader(&mut reader, feature_flags, verbose)?;
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
use proxmox_backup::pxar::{fuse, format_single_line_entry, ENCODER_MAX_ENTRIES, Flags};
|
||||
|
||||
fn extract_archive_from_reader<R: std::io::Read>(
|
||||
reader: &mut R,
|
||||
target: &str,
|
||||
feature_flags: u64,
|
||||
feature_flags: Flags,
|
||||
allow_existing_dirs: bool,
|
||||
verbose: bool,
|
||||
pattern: Option<Vec<pxar::MatchPattern>>
|
||||
match_list: &[MatchEntry],
|
||||
) -> Result<(), Error> {
|
||||
let mut decoder = pxar::SequentialDecoder::new(reader, feature_flags);
|
||||
decoder.set_callback(move |path| {
|
||||
if verbose {
|
||||
println!("{:?}", path);
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
decoder.set_allow_existing_dirs(allow_existing_dirs);
|
||||
|
||||
let pattern = pattern.unwrap_or_else(Vec::new);
|
||||
decoder.restore(Path::new(target), &pattern)?;
|
||||
|
||||
Ok(())
|
||||
proxmox_backup::pxar::extract_archive(
|
||||
pxar::decoder::Decoder::from_std(reader)?,
|
||||
Path::new(target),
|
||||
&match_list,
|
||||
feature_flags,
|
||||
allow_existing_dirs,
|
||||
|path| {
|
||||
if verbose {
|
||||
println!("{:?}", path);
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
archive: {
|
||||
description: "Archive name.",
|
||||
},
|
||||
pattern: {
|
||||
description: "List of paths or pattern matching files to restore",
|
||||
type: Array,
|
||||
items: {
|
||||
type: String,
|
||||
description: "Path or pattern matching files to restore.",
|
||||
},
|
||||
optional: true,
|
||||
},
|
||||
target: {
|
||||
description: "Target directory",
|
||||
optional: true,
|
||||
},
|
||||
verbose: {
|
||||
description: "Verbose output.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-xattrs": {
|
||||
description: "Ignore extended file attributes.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-fcaps": {
|
||||
description: "Ignore file capabilities.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-acls": {
|
||||
description: "Ignore access control list entries.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"allow-existing-dirs": {
|
||||
description: "Allows directories to already exist on restore.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"files-from": {
|
||||
description: "File containing match pattern for files to restore.",
|
||||
optional: true,
|
||||
},
|
||||
"no-device-nodes": {
|
||||
description: "Ignore device nodes.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-fifos": {
|
||||
description: "Ignore fifos.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-sockets": {
|
||||
description: "Ignore sockets.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Extract an archive.
|
||||
fn extract_archive(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let archive = tools::required_string_param(¶m, "archive")?;
|
||||
let target = param["target"].as_str().unwrap_or(".");
|
||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||
let no_xattrs = param["no-xattrs"].as_bool().unwrap_or(false);
|
||||
let no_fcaps = param["no-fcaps"].as_bool().unwrap_or(false);
|
||||
let no_acls = param["no-acls"].as_bool().unwrap_or(false);
|
||||
let no_device_nodes = param["no-device-nodes"].as_bool().unwrap_or(false);
|
||||
let no_fifos = param["no-fifos"].as_bool().unwrap_or(false);
|
||||
let no_sockets = param["no-sockets"].as_bool().unwrap_or(false);
|
||||
let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
|
||||
let files_from = param["files-from"].as_str();
|
||||
let empty = Vec::new();
|
||||
let arg_pattern = param["pattern"].as_array().unwrap_or(&empty);
|
||||
|
||||
let mut feature_flags = pxar::flags::DEFAULT;
|
||||
archive: String,
|
||||
pattern: Option<Vec<String>>,
|
||||
target: Option<String>,
|
||||
verbose: bool,
|
||||
no_xattrs: bool,
|
||||
no_fcaps: bool,
|
||||
no_acls: bool,
|
||||
allow_existing_dirs: bool,
|
||||
files_from: Option<String>,
|
||||
no_device_nodes: bool,
|
||||
no_fifos: bool,
|
||||
no_sockets: bool,
|
||||
) -> Result<(), Error> {
|
||||
let mut feature_flags = Flags::DEFAULT;
|
||||
if no_xattrs {
|
||||
feature_flags ^= pxar::flags::WITH_XATTRS;
|
||||
feature_flags ^= Flags::WITH_XATTRS;
|
||||
}
|
||||
if no_fcaps {
|
||||
feature_flags ^= pxar::flags::WITH_FCAPS;
|
||||
feature_flags ^= Flags::WITH_FCAPS;
|
||||
}
|
||||
if no_acls {
|
||||
feature_flags ^= pxar::flags::WITH_ACL;
|
||||
feature_flags ^= Flags::WITH_ACL;
|
||||
}
|
||||
if no_device_nodes {
|
||||
feature_flags ^= pxar::flags::WITH_DEVICE_NODES;
|
||||
feature_flags ^= Flags::WITH_DEVICE_NODES;
|
||||
}
|
||||
if no_fifos {
|
||||
feature_flags ^= pxar::flags::WITH_FIFOS;
|
||||
feature_flags ^= Flags::WITH_FIFOS;
|
||||
}
|
||||
if no_sockets {
|
||||
feature_flags ^= pxar::flags::WITH_SOCKETS;
|
||||
feature_flags ^= Flags::WITH_SOCKETS;
|
||||
}
|
||||
|
||||
let mut pattern_list = Vec::new();
|
||||
if let Some(filename) = files_from {
|
||||
let dir = nix::dir::Dir::open("./", nix::fcntl::OFlag::O_RDONLY, nix::sys::stat::Mode::empty())?;
|
||||
if let Some((mut pattern, _, _)) = pxar::MatchPattern::from_file(dir.as_raw_fd(), filename)? {
|
||||
pattern_list.append(&mut pattern);
|
||||
let pattern = pattern.unwrap_or_else(Vec::new);
|
||||
let target = target.as_ref().map_or_else(|| ".", String::as_str);
|
||||
|
||||
let mut match_list = Vec::new();
|
||||
if let Some(filename) = &files_from {
|
||||
for line in proxmox_backup::tools::file_get_non_comment_lines(filename)? {
|
||||
let line = line
|
||||
.map_err(|err| format_err!("error reading {}: {}", filename, err))?;
|
||||
match_list.push(
|
||||
MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, MatchType::Include)
|
||||
.map_err(|err| format_err!("bad pattern in file '{}': {}", filename, err))?,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
for s in arg_pattern {
|
||||
let l = s.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
|
||||
let p = pxar::MatchPattern::from_line(l.as_bytes())?
|
||||
.ok_or_else(|| format_err!("Invalid match pattern in arguments"))?;
|
||||
pattern_list.push(p);
|
||||
for entry in pattern {
|
||||
match_list.push(
|
||||
MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Include)
|
||||
.map_err(|err| format_err!("error in pattern: {}", err))?,
|
||||
);
|
||||
}
|
||||
|
||||
let pattern = if pattern_list.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(pattern_list)
|
||||
};
|
||||
|
||||
if archive == "-" {
|
||||
let stdin = std::io::stdin();
|
||||
let mut reader = stdin.lock();
|
||||
extract_archive_from_reader(&mut reader, target, feature_flags, allow_existing_dirs, verbose, pattern)?;
|
||||
extract_archive_from_reader(
|
||||
&mut reader,
|
||||
&target,
|
||||
feature_flags,
|
||||
allow_existing_dirs,
|
||||
verbose,
|
||||
&match_list,
|
||||
)?;
|
||||
} else {
|
||||
if verbose { println!("PXAR extract: {}", archive); }
|
||||
if verbose {
|
||||
println!("PXAR extract: {}", archive);
|
||||
}
|
||||
let file = std::fs::File::open(archive)?;
|
||||
let mut reader = std::io::BufReader::new(file);
|
||||
extract_archive_from_reader(&mut reader, target, feature_flags, allow_existing_dirs, verbose, pattern)?;
|
||||
extract_archive_from_reader(
|
||||
&mut reader,
|
||||
&target,
|
||||
feature_flags,
|
||||
allow_existing_dirs,
|
||||
verbose,
|
||||
&match_list,
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
archive: {
|
||||
description: "Archive name.",
|
||||
},
|
||||
source: {
|
||||
description: "Source directory.",
|
||||
},
|
||||
verbose: {
|
||||
description: "Verbose output.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-xattrs": {
|
||||
description: "Ignore extended file attributes.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-fcaps": {
|
||||
description: "Ignore file capabilities.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-acls": {
|
||||
description: "Ignore access control list entries.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"all-file-systems": {
|
||||
description: "Include mounted sudirs.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-device-nodes": {
|
||||
description: "Ignore device nodes.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-fifos": {
|
||||
description: "Ignore fifos.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-sockets": {
|
||||
description: "Ignore sockets.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
exclude: {
|
||||
description: "List of paths or pattern matching files to exclude.",
|
||||
optional: true,
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Path or pattern matching files to restore",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
"entries-max": {
|
||||
description: "Max number of entries loaded at once into memory",
|
||||
optional: true,
|
||||
default: ENCODER_MAX_ENTRIES as isize,
|
||||
minimum: 0,
|
||||
maximum: std::isize::MAX,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Create a new .pxar archive.
|
||||
fn create_archive(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
archive: String,
|
||||
source: String,
|
||||
verbose: bool,
|
||||
no_xattrs: bool,
|
||||
no_fcaps: bool,
|
||||
no_acls: bool,
|
||||
all_file_systems: bool,
|
||||
no_device_nodes: bool,
|
||||
no_fifos: bool,
|
||||
no_sockets: bool,
|
||||
exclude: Option<Vec<String>>,
|
||||
entries_max: isize,
|
||||
) -> Result<(), Error> {
|
||||
let pattern_list = {
|
||||
let input = exclude.unwrap_or_else(Vec::new);
|
||||
let mut pattern_list = Vec::with_capacity(input.len());
|
||||
for entry in input {
|
||||
pattern_list.push(
|
||||
MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude)
|
||||
.map_err(|err| format_err!("error in exclude pattern: {}", err))?,
|
||||
);
|
||||
}
|
||||
pattern_list
|
||||
};
|
||||
|
||||
let archive = tools::required_string_param(¶m, "archive")?;
|
||||
let source = tools::required_string_param(¶m, "source")?;
|
||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||
let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
|
||||
let no_xattrs = param["no-xattrs"].as_bool().unwrap_or(false);
|
||||
let no_fcaps = param["no-fcaps"].as_bool().unwrap_or(false);
|
||||
let no_acls = param["no-acls"].as_bool().unwrap_or(false);
|
||||
let no_device_nodes = param["no-device-nodes"].as_bool().unwrap_or(false);
|
||||
let no_fifos = param["no-fifos"].as_bool().unwrap_or(false);
|
||||
let no_sockets = param["no-sockets"].as_bool().unwrap_or(false);
|
||||
let empty = Vec::new();
|
||||
let exclude_pattern = param["exclude"].as_array().unwrap_or(&empty);
|
||||
let entries_max = param["entries-max"].as_u64().unwrap_or(pxar::ENCODER_MAX_ENTRIES as u64);
|
||||
|
||||
let devices = if all_file_systems { None } else { Some(HashSet::new()) };
|
||||
let device_set = if all_file_systems {
|
||||
None
|
||||
} else {
|
||||
Some(HashSet::new())
|
||||
};
|
||||
|
||||
let source = PathBuf::from(source);
|
||||
|
||||
let mut dir = nix::dir::Dir::open(
|
||||
&source, nix::fcntl::OFlag::O_NOFOLLOW, nix::sys::stat::Mode::empty())?;
|
||||
let dir = nix::dir::Dir::open(
|
||||
&source,
|
||||
nix::fcntl::OFlag::O_NOFOLLOW,
|
||||
nix::sys::stat::Mode::empty(),
|
||||
)?;
|
||||
|
||||
let file = OpenOptions::new()
|
||||
.create_new(true)
|
||||
@ -193,332 +307,150 @@ fn create_archive(
|
||||
.mode(0o640)
|
||||
.open(archive)?;
|
||||
|
||||
let mut writer = std::io::BufWriter::with_capacity(1024*1024, file);
|
||||
let mut feature_flags = pxar::flags::DEFAULT;
|
||||
let writer = std::io::BufWriter::with_capacity(1024 * 1024, file);
|
||||
let mut feature_flags = Flags::DEFAULT;
|
||||
if no_xattrs {
|
||||
feature_flags ^= pxar::flags::WITH_XATTRS;
|
||||
feature_flags ^= Flags::WITH_XATTRS;
|
||||
}
|
||||
if no_fcaps {
|
||||
feature_flags ^= pxar::flags::WITH_FCAPS;
|
||||
feature_flags ^= Flags::WITH_FCAPS;
|
||||
}
|
||||
if no_acls {
|
||||
feature_flags ^= pxar::flags::WITH_ACL;
|
||||
feature_flags ^= Flags::WITH_ACL;
|
||||
}
|
||||
if no_device_nodes {
|
||||
feature_flags ^= pxar::flags::WITH_DEVICE_NODES;
|
||||
feature_flags ^= Flags::WITH_DEVICE_NODES;
|
||||
}
|
||||
if no_fifos {
|
||||
feature_flags ^= pxar::flags::WITH_FIFOS;
|
||||
feature_flags ^= Flags::WITH_FIFOS;
|
||||
}
|
||||
if no_sockets {
|
||||
feature_flags ^= pxar::flags::WITH_SOCKETS;
|
||||
feature_flags ^= Flags::WITH_SOCKETS;
|
||||
}
|
||||
|
||||
let mut pattern_list = Vec::new();
|
||||
for s in exclude_pattern {
|
||||
let l = s.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
|
||||
let p = pxar::MatchPattern::from_line(l.as_bytes())?
|
||||
.ok_or_else(|| format_err!("Invalid match pattern in arguments"))?;
|
||||
pattern_list.push(p);
|
||||
}
|
||||
|
||||
let catalog = None::<&mut pxar::catalog::DummyCatalogWriter>;
|
||||
pxar::Encoder::encode(
|
||||
source,
|
||||
&mut dir,
|
||||
&mut writer,
|
||||
catalog,
|
||||
devices,
|
||||
verbose,
|
||||
false,
|
||||
feature_flags,
|
||||
let writer = pxar::encoder::sync::StandardWriter::new(writer);
|
||||
proxmox_backup::pxar::create_archive(
|
||||
dir,
|
||||
writer,
|
||||
pattern_list,
|
||||
feature_flags,
|
||||
device_set,
|
||||
false,
|
||||
|path| {
|
||||
if verbose {
|
||||
println!("{:?}", path);
|
||||
}
|
||||
Ok(())
|
||||
},
|
||||
entries_max as usize,
|
||||
None,
|
||||
)?;
|
||||
|
||||
writer.flush()?;
|
||||
|
||||
Ok(Value::Null)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
archive: { description: "Archive name." },
|
||||
mountpoint: { description: "Mountpoint for the file system." },
|
||||
verbose: {
|
||||
description: "Verbose output, running in the foreground (for debugging).",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Mount the archive to the provided mountpoint via FUSE.
|
||||
fn mount_archive(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let archive = tools::required_string_param(¶m, "archive")?;
|
||||
let mountpoint = tools::required_string_param(¶m, "mountpoint")?;
|
||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||
let no_mt = param["no-mt"].as_bool().unwrap_or(false);
|
||||
|
||||
let archive = Path::new(archive);
|
||||
let mountpoint = Path::new(mountpoint);
|
||||
async fn mount_archive(
|
||||
archive: String,
|
||||
mountpoint: String,
|
||||
verbose: bool,
|
||||
) -> Result<(), Error> {
|
||||
let archive = Path::new(&archive);
|
||||
let mountpoint = Path::new(&mountpoint);
|
||||
let options = OsStr::new("ro,default_permissions");
|
||||
let mut session = pxar::fuse::Session::from_path(&archive, &options, verbose)
|
||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||
// Mount the session and deamonize if verbose is not set
|
||||
session.mount(&mountpoint, !verbose)?;
|
||||
session.run_loop(!no_mt)?;
|
||||
|
||||
Ok(Value::Null)
|
||||
let session = fuse::Session::mount_path(&archive, &options, verbose, mountpoint)
|
||||
.await
|
||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||
|
||||
let mut interrupt = signal(SignalKind::interrupt())?;
|
||||
|
||||
select! {
|
||||
res = session.fuse() => res?,
|
||||
_ = interrupt.recv().fuse() => {
|
||||
if verbose {
|
||||
eprintln!("interrupted");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_CREATE_ARCHIVE: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&create_archive),
|
||||
&ObjectSchema::new(
|
||||
"Create new .pxar archive.",
|
||||
&sorted!([
|
||||
(
|
||||
"archive",
|
||||
false,
|
||||
&StringSchema::new("Archive name").schema()
|
||||
),
|
||||
(
|
||||
"source",
|
||||
false,
|
||||
&StringSchema::new("Source directory.").schema()
|
||||
),
|
||||
(
|
||||
"verbose",
|
||||
true,
|
||||
&BooleanSchema::new("Verbose output.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-xattrs",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore extended file attributes.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-fcaps",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore file capabilities.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-acls",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore access control list entries.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"all-file-systems",
|
||||
true,
|
||||
&BooleanSchema::new("Include mounted sudirs.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-device-nodes",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore device nodes.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-fifos",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore fifos.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-sockets",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore sockets.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"exclude",
|
||||
true,
|
||||
&ArraySchema::new(
|
||||
"List of paths or pattern matching files to exclude.",
|
||||
&StringSchema::new("Path or pattern matching files to restore.").schema()
|
||||
).schema()
|
||||
),
|
||||
(
|
||||
"entries-max",
|
||||
true,
|
||||
&IntegerSchema::new("Max number of entries loaded at once into memory")
|
||||
.default(pxar::ENCODER_MAX_ENTRIES as isize)
|
||||
.minimum(0)
|
||||
.maximum(std::isize::MAX)
|
||||
.schema()
|
||||
),
|
||||
]),
|
||||
)
|
||||
);
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
archive: {
|
||||
description: "Archive name.",
|
||||
},
|
||||
verbose: {
|
||||
description: "Verbose output.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// List the contents of an archive.
|
||||
fn dump_archive(archive: String, verbose: bool) -> Result<(), Error> {
|
||||
for entry in pxar::decoder::Decoder::open(archive)? {
|
||||
let entry = entry?;
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_EXTRACT_ARCHIVE: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&extract_archive),
|
||||
&ObjectSchema::new(
|
||||
"Extract an archive.",
|
||||
&sorted!([
|
||||
(
|
||||
"archive",
|
||||
false,
|
||||
&StringSchema::new("Archive name.").schema()
|
||||
),
|
||||
(
|
||||
"pattern",
|
||||
true,
|
||||
&ArraySchema::new(
|
||||
"List of paths or pattern matching files to restore",
|
||||
&StringSchema::new("Path or pattern matching files to restore.").schema()
|
||||
).schema()
|
||||
),
|
||||
(
|
||||
"target",
|
||||
true,
|
||||
&StringSchema::new("Target directory.").schema()
|
||||
),
|
||||
(
|
||||
"verbose",
|
||||
true,
|
||||
&BooleanSchema::new("Verbose output.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-xattrs",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore extended file attributes.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-fcaps",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore file capabilities.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-acls",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore access control list entries.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"allow-existing-dirs",
|
||||
true,
|
||||
&BooleanSchema::new("Allows directories to already exist on restore.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"files-from",
|
||||
true,
|
||||
&StringSchema::new("Match pattern for files to restore.").schema()
|
||||
),
|
||||
(
|
||||
"no-device-nodes",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore device nodes.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-fifos",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore fifos.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-sockets",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore sockets.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_MOUNT_ARCHIVE: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&mount_archive),
|
||||
&ObjectSchema::new(
|
||||
"Mount the archive as filesystem via FUSE.",
|
||||
&sorted!([
|
||||
(
|
||||
"archive",
|
||||
false,
|
||||
&StringSchema::new("Archive name.").schema()
|
||||
),
|
||||
(
|
||||
"mountpoint",
|
||||
false,
|
||||
&StringSchema::new("Mountpoint for the filesystem root.").schema()
|
||||
),
|
||||
(
|
||||
"verbose",
|
||||
true,
|
||||
&BooleanSchema::new("Verbose output, keeps process running in foreground (for debugging).")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-mt",
|
||||
true,
|
||||
&BooleanSchema::new("Run in single threaded mode (for debugging).")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_DUMP_ARCHIVE: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&dump_archive),
|
||||
&ObjectSchema::new(
|
||||
"List the contents of an archive.",
|
||||
&sorted!([
|
||||
( "archive", false, &StringSchema::new("Archive name.").schema()),
|
||||
( "verbose", true, &BooleanSchema::new("Verbose output.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
])
|
||||
)
|
||||
);
|
||||
if verbose {
|
||||
println!("{}", format_single_line_entry(&entry));
|
||||
} else {
|
||||
println!("{:?}", entry.path());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("create", CliCommand::new(&API_METHOD_CREATE_ARCHIVE)
|
||||
.arg_param(&["archive", "source"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("source", tools::complete_file_name)
|
||||
.insert(
|
||||
"create",
|
||||
CliCommand::new(&API_METHOD_CREATE_ARCHIVE)
|
||||
.arg_param(&["archive", "source"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("source", tools::complete_file_name),
|
||||
)
|
||||
.insert("extract", CliCommand::new(&API_METHOD_EXTRACT_ARCHIVE)
|
||||
.arg_param(&["archive", "target"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("target", tools::complete_file_name)
|
||||
.completion_cb("files-from", tools::complete_file_name)
|
||||
)
|
||||
.insert("mount", CliCommand::new(&API_METHOD_MOUNT_ARCHIVE)
|
||||
.arg_param(&["archive", "mountpoint"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("mountpoint", tools::complete_file_name)
|
||||
.insert(
|
||||
"extract",
|
||||
CliCommand::new(&API_METHOD_EXTRACT_ARCHIVE)
|
||||
.arg_param(&["archive", "target"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("target", tools::complete_file_name)
|
||||
.completion_cb("files-from", tools::complete_file_name),
|
||||
)
|
||||
.insert("list", CliCommand::new(&API_METHOD_DUMP_ARCHIVE)
|
||||
.arg_param(&["archive"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.insert(
|
||||
"mount",
|
||||
CliCommand::new(&API_METHOD_MOUNT_ARCHIVE)
|
||||
.arg_param(&["archive", "mountpoint"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("mountpoint", tools::complete_file_name),
|
||||
)
|
||||
.insert(
|
||||
"list",
|
||||
CliCommand::new(&API_METHOD_DUMP_ARCHIVE)
|
||||
.arg_param(&["archive"])
|
||||
.completion_cb("archive", tools::complete_file_name),
|
||||
);
|
||||
|
||||
let rpcenv = CliEnvironment::new();
|
||||
run_cli_command(cmd_def, rpcenv, None);
|
||||
run_cli_command(cmd_def, rpcenv, Some(|future| {
|
||||
proxmox_backup::tools::runtime::main(future)
|
||||
}));
|
||||
}
|
||||
|
@ -3,11 +3,11 @@
|
||||
//! This library implements the client side to access the backups
|
||||
//! server using https.
|
||||
|
||||
pub mod pipe_to_stream;
|
||||
mod merge_known_chunks;
|
||||
pub mod pipe_to_stream;
|
||||
|
||||
mod http_client;
|
||||
pub use http_client::*;
|
||||
pub use http_client::*;
|
||||
|
||||
mod task_log;
|
||||
pub use task_log::*;
|
||||
@ -24,10 +24,10 @@ pub use remote_chunk_reader::*;
|
||||
mod pxar_backup_stream;
|
||||
pub use pxar_backup_stream::*;
|
||||
|
||||
mod pxar_decode_writer;
|
||||
pub use pxar_decode_writer::*;
|
||||
|
||||
mod backup_repo;
|
||||
pub use backup_repo::*;
|
||||
|
||||
mod backup_specification;
|
||||
pub use backup_specification::*;
|
||||
|
||||
pub mod pull;
|
||||
|
@ -91,7 +91,7 @@ impl BackupReader {
|
||||
&self,
|
||||
file_name: &str,
|
||||
output: W,
|
||||
) -> Result<W, Error> {
|
||||
) -> Result<(), Error> {
|
||||
let path = "download";
|
||||
let param = json!({ "file-name": file_name });
|
||||
self.h2.download(path, Some(param), output).await
|
||||
@ -103,7 +103,7 @@ impl BackupReader {
|
||||
pub async fn speedtest<W: Write + Send>(
|
||||
&self,
|
||||
output: W,
|
||||
) -> Result<W, Error> {
|
||||
) -> Result<(), Error> {
|
||||
self.h2.download("speedtest", None, output).await
|
||||
}
|
||||
|
||||
@ -112,7 +112,7 @@ impl BackupReader {
|
||||
&self,
|
||||
digest: &[u8; 32],
|
||||
output: W,
|
||||
) -> Result<W, Error> {
|
||||
) -> Result<(), Error> {
|
||||
let path = "chunk";
|
||||
let param = json!({ "digest": digest_to_hex(digest) });
|
||||
self.h2.download(path, Some(param), output).await
|
||||
@ -127,7 +127,8 @@ impl BackupReader {
|
||||
|
||||
use std::convert::TryFrom;
|
||||
|
||||
let raw_data = self.download(MANIFEST_BLOB_NAME, Vec::with_capacity(64*1024)).await?;
|
||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
blob.verify_crc()?;
|
||||
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
@ -138,7 +139,7 @@ impl BackupReader {
|
||||
|
||||
/// Download a .blob file
|
||||
///
|
||||
/// This creates a temorary file in /tmp (using O_TMPFILE). The data is verified using
|
||||
/// This creates a temporary file in /tmp (using O_TMPFILE). The data is verified using
|
||||
/// the provided manifest.
|
||||
pub async fn download_blob(
|
||||
&self,
|
||||
@ -146,13 +147,13 @@ impl BackupReader {
|
||||
name: &str,
|
||||
) -> Result<DataBlobReader<File>, Error> {
|
||||
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let mut tmpfile = self.download(name, tmpfile).await?;
|
||||
self.download(name, &mut tmpfile).await?;
|
||||
|
||||
let (csum, size) = compute_file_csum(&mut tmpfile)?;
|
||||
manifest.verify_file(name, &csum, size)?;
|
||||
@ -164,7 +165,7 @@ impl BackupReader {
|
||||
|
||||
/// Download dynamic index file
|
||||
///
|
||||
/// This creates a temorary file in /tmp (using O_TMPFILE). The index is verified using
|
||||
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
|
||||
/// the provided manifest.
|
||||
pub async fn download_dynamic_index(
|
||||
&self,
|
||||
@ -172,13 +173,13 @@ impl BackupReader {
|
||||
name: &str,
|
||||
) -> Result<DynamicIndexReader, Error> {
|
||||
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let tmpfile = self.download(name, tmpfile).await?;
|
||||
self.download(name, &mut tmpfile).await?;
|
||||
|
||||
let index = DynamicIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?;
|
||||
@ -192,7 +193,7 @@ impl BackupReader {
|
||||
|
||||
/// Download fixed index file
|
||||
///
|
||||
/// This creates a temorary file in /tmp (using O_TMPFILE). The index is verified using
|
||||
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
|
||||
/// the provided manifest.
|
||||
pub async fn download_fixed_index(
|
||||
&self,
|
||||
@ -200,13 +201,13 @@ impl BackupReader {
|
||||
name: &str,
|
||||
) -> Result<FixedIndexReader, Error> {
|
||||
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let tmpfile = self.download(name, tmpfile).await?;
|
||||
self.download(name, &mut tmpfile).await?;
|
||||
|
||||
let index = FixedIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?;
|
||||
|
@ -3,12 +3,8 @@ use std::fmt;
|
||||
use anyhow::{format_err, Error};
|
||||
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::const_regex;
|
||||
|
||||
const_regex! {
|
||||
/// Regular expression to parse repository URLs
|
||||
pub BACKUP_REPO_URL_REGEX = r"^(?:(?:([\w@]+)@)?([\w\-_.]+):)?(\w+)$";
|
||||
}
|
||||
use crate::api2::types::*;
|
||||
|
||||
/// API schema format definition for repository URLs
|
||||
pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX);
|
||||
|
39
src/client/backup_specification.rs
Normal file
39
src/client/backup_specification.rs
Normal file
@ -0,0 +1,39 @@
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use proxmox::api::schema::*;
|
||||
|
||||
proxmox::const_regex! {
|
||||
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
|
||||
}
|
||||
|
||||
pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
|
||||
"Backup source specification ([<label>:<path>]).")
|
||||
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
|
||||
.schema();
|
||||
|
||||
pub enum BackupSpecificationType { PXAR, IMAGE, CONFIG, LOGFILE }
|
||||
|
||||
pub struct BackupSpecification {
|
||||
pub archive_name: String, // left part
|
||||
pub config_string: String, // right part
|
||||
pub spec_type: BackupSpecificationType,
|
||||
}
|
||||
|
||||
pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Error> {
|
||||
|
||||
if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) {
|
||||
let archive_name = caps.get(1).unwrap().as_str().into();
|
||||
let extension = caps.get(2).unwrap().as_str();
|
||||
let config_string = caps.get(3).unwrap().as_str().into();
|
||||
let spec_type = match extension {
|
||||
"pxar" => BackupSpecificationType::PXAR,
|
||||
"img" => BackupSpecificationType::IMAGE,
|
||||
"conf" => BackupSpecificationType::CONFIG,
|
||||
"log" => BackupSpecificationType::LOGFILE,
|
||||
_ => bail!("unknown backup source type '{}'", extension),
|
||||
};
|
||||
return Ok(BackupSpecification { archive_name, config_string, spec_type });
|
||||
}
|
||||
|
||||
bail!("unable to parse backup source specification '{}'", value);
|
||||
}
|
@ -1,4 +1,5 @@
|
||||
use std::collections::HashSet;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
@ -22,6 +23,7 @@ pub struct BackupWriter {
|
||||
h2: H2Client,
|
||||
abort: AbortHandle,
|
||||
verbose: bool,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
}
|
||||
|
||||
impl Drop for BackupWriter {
|
||||
@ -38,12 +40,13 @@ pub struct BackupStats {
|
||||
|
||||
impl BackupWriter {
|
||||
|
||||
fn new(h2: H2Client, abort: AbortHandle, verbose: bool) -> Arc<Self> {
|
||||
Arc::new(Self { h2, abort, verbose })
|
||||
fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>, verbose: bool) -> Arc<Self> {
|
||||
Arc::new(Self { h2, abort, crypt_config, verbose })
|
||||
}
|
||||
|
||||
pub async fn start(
|
||||
client: HttpClient,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
datastore: &str,
|
||||
backup_type: &str,
|
||||
backup_id: &str,
|
||||
@ -64,7 +67,7 @@ impl BackupWriter {
|
||||
|
||||
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!())).await?;
|
||||
|
||||
Ok(BackupWriter::new(h2, abort, debug))
|
||||
Ok(BackupWriter::new(h2, abort, crypt_config, debug))
|
||||
}
|
||||
|
||||
pub async fn get(
|
||||
@ -159,16 +162,19 @@ impl BackupWriter {
|
||||
&self,
|
||||
data: Vec<u8>,
|
||||
file_name: &str,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
compress: bool,
|
||||
sign_only: bool,
|
||||
crypt_or_sign: Option<bool>,
|
||||
) -> Result<BackupStats, Error> {
|
||||
|
||||
let blob = if let Some(ref crypt_config) = crypt_config {
|
||||
if sign_only {
|
||||
DataBlob::create_signed(&data, crypt_config, compress)?
|
||||
let blob = if let Some(ref crypt_config) = self.crypt_config {
|
||||
if let Some(encrypt) = crypt_or_sign {
|
||||
if encrypt {
|
||||
DataBlob::encode(&data, Some(crypt_config), compress)?
|
||||
} else {
|
||||
DataBlob::create_signed(&data, crypt_config, compress)?
|
||||
}
|
||||
} else {
|
||||
DataBlob::encode(&data, Some(crypt_config), compress)?
|
||||
DataBlob::encode(&data, None, compress)?
|
||||
}
|
||||
} else {
|
||||
DataBlob::encode(&data, None, compress)?
|
||||
@ -187,8 +193,8 @@ impl BackupWriter {
|
||||
&self,
|
||||
src_path: P,
|
||||
file_name: &str,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
compress: bool,
|
||||
crypt_or_sign: Option<bool>,
|
||||
) -> Result<BackupStats, Error> {
|
||||
|
||||
let src_path = src_path.as_ref();
|
||||
@ -203,25 +209,16 @@ impl BackupWriter {
|
||||
.await
|
||||
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
|
||||
|
||||
let blob = DataBlob::encode(&contents, crypt_config.as_ref().map(AsRef::as_ref), compress)?;
|
||||
let raw_data = blob.into_inner();
|
||||
let size = raw_data.len() as u64;
|
||||
let csum = openssl::sha::sha256(&raw_data);
|
||||
let param = json!({
|
||||
"encoded-size": size,
|
||||
"file-name": file_name,
|
||||
});
|
||||
self.h2.upload("POST", "blob", Some(param), "application/octet-stream", raw_data).await?;
|
||||
Ok(BackupStats { size, csum })
|
||||
self.upload_blob_from_data(contents, file_name, compress, crypt_or_sign).await
|
||||
}
|
||||
|
||||
pub async fn upload_stream(
|
||||
&self,
|
||||
previous_manifest: Option<Arc<BackupManifest>>,
|
||||
archive_name: &str,
|
||||
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
||||
prefix: &str,
|
||||
fixed_size: Option<u64>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
) -> Result<BackupStats, Error> {
|
||||
let known_chunks = Arc::new(Mutex::new(HashSet::new()));
|
||||
|
||||
@ -233,7 +230,18 @@ impl BackupWriter {
|
||||
let index_path = format!("{}_index", prefix);
|
||||
let close_path = format!("{}_close", prefix);
|
||||
|
||||
self.download_chunk_list(&index_path, archive_name, known_chunks.clone()).await?;
|
||||
if let Some(manifest) = previous_manifest {
|
||||
// try, but ignore errors
|
||||
match archive_type(archive_name) {
|
||||
Ok(ArchiveType::FixedIndex) => {
|
||||
let _ = self.download_previous_fixed_index(archive_name, &manifest, known_chunks.clone()).await;
|
||||
}
|
||||
Ok(ArchiveType::DynamicIndex) => {
|
||||
let _ = self.download_previous_dynamic_index(archive_name, &manifest, known_chunks.clone()).await;
|
||||
}
|
||||
_ => { /* do nothing */ }
|
||||
}
|
||||
}
|
||||
|
||||
let wid = self.h2.post(&index_path, Some(param)).await?.as_u64().unwrap();
|
||||
|
||||
@ -244,7 +252,7 @@ impl BackupWriter {
|
||||
stream,
|
||||
&prefix,
|
||||
known_chunks.clone(),
|
||||
crypt_config,
|
||||
self.crypt_config.clone(),
|
||||
self.verbose,
|
||||
)
|
||||
.await?;
|
||||
@ -374,41 +382,93 @@ impl BackupWriter {
|
||||
(verify_queue_tx, verify_result_rx)
|
||||
}
|
||||
|
||||
pub async fn download_chunk_list(
|
||||
pub async fn download_previous_fixed_index(
|
||||
&self,
|
||||
path: &str,
|
||||
archive_name: &str,
|
||||
manifest: &BackupManifest,
|
||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
) -> Result<FixedIndexReader, Error> {
|
||||
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let param = json!({ "archive-name": archive_name });
|
||||
let request = H2Client::request_builder("localhost", "GET", path, Some(param), None).unwrap();
|
||||
self.h2.download("previous", Some(param), &mut tmpfile).await?;
|
||||
|
||||
let h2request = self.h2.send_request(request, None).await?;
|
||||
let resp = h2request.await?;
|
||||
let index = FixedIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", archive_name, err))?;
|
||||
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(archive_name, &csum, size)?;
|
||||
|
||||
let status = resp.status();
|
||||
|
||||
if !status.is_success() {
|
||||
H2Client::h2api_response(resp).await?; // raise error
|
||||
unreachable!();
|
||||
}
|
||||
|
||||
let mut body = resp.into_body();
|
||||
let mut flow_control = body.flow_control().clone();
|
||||
|
||||
let mut stream = DigestListDecoder::new(body.map_err(Error::from));
|
||||
|
||||
while let Some(chunk) = stream.try_next().await? {
|
||||
let _ = flow_control.release_capacity(chunk.len());
|
||||
known_chunks.lock().unwrap().insert(chunk);
|
||||
// add index chunks to known chunks
|
||||
let mut known_chunks = known_chunks.lock().unwrap();
|
||||
for i in 0..index.index_count() {
|
||||
known_chunks.insert(*index.index_digest(i).unwrap());
|
||||
}
|
||||
|
||||
if self.verbose {
|
||||
println!("{}: known chunks list length is {}", archive_name, known_chunks.lock().unwrap().len());
|
||||
println!("{}: known chunks list length is {}", archive_name, index.index_count());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
pub async fn download_previous_dynamic_index(
|
||||
&self,
|
||||
archive_name: &str,
|
||||
manifest: &BackupManifest,
|
||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<DynamicIndexReader, Error> {
|
||||
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let param = json!({ "archive-name": archive_name });
|
||||
self.h2.download("previous", Some(param), &mut tmpfile).await?;
|
||||
|
||||
let index = DynamicIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read dynmamic index '{}' - {}", archive_name, err))?;
|
||||
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(archive_name, &csum, size)?;
|
||||
|
||||
// add index chunks to known chunks
|
||||
let mut known_chunks = known_chunks.lock().unwrap();
|
||||
for i in 0..index.index_count() {
|
||||
known_chunks.insert(*index.index_digest(i).unwrap());
|
||||
}
|
||||
|
||||
if self.verbose {
|
||||
println!("{}: known chunks list length is {}", archive_name, index.index_count());
|
||||
}
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
/// Download backup manifest (index.json) of last backup
|
||||
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
|
||||
|
||||
use std::convert::TryFrom;
|
||||
|
||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||
|
||||
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
|
||||
self.h2.download("previous", Some(param), &mut raw_data).await?;
|
||||
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
blob.verify_crc()?;
|
||||
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
let json: Value = serde_json::from_slice(&data[..])?;
|
||||
let manifest = BackupManifest::try_from(json)?;
|
||||
|
||||
Ok(manifest)
|
||||
}
|
||||
|
||||
fn upload_chunk_info_stream(
|
||||
|
@ -343,7 +343,7 @@ impl HttpClient {
|
||||
|
||||
/// Login
|
||||
///
|
||||
/// Login is done on demand, so this is onyl required if you need
|
||||
/// Login is done on demand, so this is only required if you need
|
||||
/// access to authentication data in 'AuthInfo'.
|
||||
pub async fn login(&self) -> Result<AuthInfo, Error> {
|
||||
self.auth.listen().await
|
||||
@ -400,21 +400,22 @@ impl HttpClient {
|
||||
if interactive && tty::stdin_isatty() {
|
||||
println!("fingerprint: {}", fp_string);
|
||||
loop {
|
||||
print!("Want to trust? (y/n): ");
|
||||
print!("Are you sure you want to continue connecting? (y/n): ");
|
||||
let _ = std::io::stdout().flush();
|
||||
let mut buf = [0u8; 1];
|
||||
use std::io::Read;
|
||||
match std::io::stdin().read_exact(&mut buf) {
|
||||
Ok(()) => {
|
||||
if buf[0] == b'y' || buf[0] == b'Y' {
|
||||
use std::io::{BufRead, BufReader};
|
||||
let mut line = String::new();
|
||||
match BufReader::new(std::io::stdin()).read_line(&mut line) {
|
||||
Ok(_) => {
|
||||
let trimmed = line.trim();
|
||||
if trimmed == "y" || trimmed == "Y" {
|
||||
return (true, Some(fp_string));
|
||||
} else if buf[0] == b'n' || buf[0] == b'N' {
|
||||
} else if trimmed == "n" || trimmed == "N" {
|
||||
return (false, None);
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
return (false, None);
|
||||
}
|
||||
Err(_) => return (false, None),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -465,7 +466,7 @@ impl HttpClient {
|
||||
&mut self,
|
||||
path: &str,
|
||||
output: &mut (dyn Write + Send),
|
||||
) -> Result<(), Error> {
|
||||
) -> Result<(), Error> {
|
||||
let mut req = Self::request_builder(&self.server, "GET", path, None).unwrap();
|
||||
|
||||
let client = self.client.clone();
|
||||
@ -706,7 +707,7 @@ impl H2Client {
|
||||
path: &str,
|
||||
param: Option<Value>,
|
||||
mut output: W,
|
||||
) -> Result<W, Error> {
|
||||
) -> Result<(), Error> {
|
||||
let request = Self::request_builder("localhost", "GET", path, param, None).unwrap();
|
||||
|
||||
let response_future = self.send_request(request, None).await?;
|
||||
@ -726,7 +727,7 @@ impl H2Client {
|
||||
output.write_all(&chunk)?;
|
||||
}
|
||||
|
||||
Ok(output)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn upload(
|
||||
|
@ -34,7 +34,7 @@ async fn pull_index_chunks<I: IndexFile>(
|
||||
continue;
|
||||
}
|
||||
//worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||
let chunk = chunk_reader.read_raw_chunk(&digest)?;
|
||||
let chunk = chunk_reader.read_raw_chunk(&digest).await?;
|
||||
|
||||
target.insert_chunk(&chunk, &digest)?;
|
||||
}
|
||||
@ -47,13 +47,13 @@ async fn download_manifest(
|
||||
filename: &std::path::Path,
|
||||
) -> Result<std::fs::File, Error> {
|
||||
|
||||
let tmp_manifest_file = std::fs::OpenOptions::new()
|
||||
let mut tmp_manifest_file = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.read(true)
|
||||
.open(&filename)?;
|
||||
|
||||
let mut tmp_manifest_file = reader.download(MANIFEST_BLOB_NAME, tmp_manifest_file).await?;
|
||||
reader.download(MANIFEST_BLOB_NAME, &mut tmp_manifest_file).await?;
|
||||
|
||||
tmp_manifest_file.seek(SeekFrom::Start(0))?;
|
||||
|
||||
@ -77,13 +77,13 @@ async fn pull_single_archive(
|
||||
tmp_path.set_extension("tmp");
|
||||
|
||||
worker.log(format!("sync archive {}", archive_name));
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.read(true)
|
||||
.open(&tmp_path)?;
|
||||
|
||||
let tmpfile = reader.download(archive_name, tmpfile).await?;
|
||||
reader.download(archive_name, &mut tmpfile).await?;
|
||||
|
||||
match archive_type(archive_name)? {
|
||||
ArchiveType::DynamicIndex => {
|
||||
@ -106,6 +106,34 @@ async fn pull_single_archive(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Note: The client.log.blob is uploaded after the backup, so it is
|
||||
// not mentioned in the manifest.
|
||||
async fn try_client_log_download(
|
||||
worker: &WorkerTask,
|
||||
reader: Arc<BackupReader>,
|
||||
path: &std::path::Path,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut tmp_path = path.to_owned();
|
||||
tmp_path.set_extension("tmp");
|
||||
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.read(true)
|
||||
.open(&tmp_path)?;
|
||||
|
||||
// Note: be silent if there is no log - only log successful download
|
||||
if let Ok(()) = reader.download(CLIENT_LOG_BLOB_NAME, tmpfile).await {
|
||||
if let Err(err) = std::fs::rename(&tmp_path, &path) {
|
||||
bail!("Atomic rename file {:?} failed - {}", path, err);
|
||||
}
|
||||
worker.log(format!("got backup log file {:?}", CLIENT_LOG_BLOB_NAME));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn pull_snapshot(
|
||||
worker: &WorkerTask,
|
||||
reader: Arc<BackupReader>,
|
||||
@ -117,6 +145,10 @@ async fn pull_snapshot(
|
||||
manifest_name.push(snapshot.relative_path());
|
||||
manifest_name.push(MANIFEST_BLOB_NAME);
|
||||
|
||||
let mut client_log_name = tgt_store.base_path();
|
||||
client_log_name.push(snapshot.relative_path());
|
||||
client_log_name.push(CLIENT_LOG_BLOB_NAME);
|
||||
|
||||
let mut tmp_manifest_name = manifest_name.clone();
|
||||
tmp_manifest_name.set_extension("tmp");
|
||||
|
||||
@ -137,6 +169,10 @@ async fn pull_snapshot(
|
||||
})?;
|
||||
|
||||
if manifest_blob.raw_data() == tmp_manifest_blob.raw_data() {
|
||||
if !client_log_name.exists() {
|
||||
try_client_log_download(worker, reader, &client_log_name).await?;
|
||||
}
|
||||
worker.log("no data changes");
|
||||
return Ok(()); // nothing changed
|
||||
}
|
||||
}
|
||||
@ -199,6 +235,10 @@ async fn pull_snapshot(
|
||||
bail!("Atomic rename file {:?} failed - {}", manifest_name, err);
|
||||
}
|
||||
|
||||
if !client_log_name.exists() {
|
||||
try_client_log_download(worker, reader, &client_log_name).await?;
|
||||
}
|
||||
|
||||
// cleanup - remove stale files
|
||||
tgt_store.cleanup_backup_dir(snapshot, &manifest)?;
|
||||
|
||||
@ -223,9 +263,11 @@ pub async fn pull_snapshot_from(
|
||||
}
|
||||
return Err(err);
|
||||
}
|
||||
worker.log(format!("sync snapshot {:?} done", snapshot.relative_path()));
|
||||
} else {
|
||||
worker.log(format!("re-sync snapshot {:?}", snapshot.relative_path()));
|
||||
pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await?
|
||||
pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await?;
|
||||
worker.log(format!("re-sync snapshot {:?} done", snapshot.relative_path()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -9,12 +9,12 @@ use std::thread;
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::stream::Stream;
|
||||
|
||||
use nix::dir::Dir;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
use nix::dir::Dir;
|
||||
|
||||
use crate::pxar;
|
||||
use pathpatterns::MatchEntry;
|
||||
|
||||
use crate::backup::CatalogWriter;
|
||||
|
||||
/// Stream implementation to encode and upload .pxar archives.
|
||||
@ -29,7 +29,6 @@ pub struct PxarBackupStream {
|
||||
}
|
||||
|
||||
impl Drop for PxarBackupStream {
|
||||
|
||||
fn drop(&mut self) {
|
||||
self.rx = None;
|
||||
self.child.take().unwrap().join().unwrap();
|
||||
@ -37,46 +36,49 @@ impl Drop for PxarBackupStream {
|
||||
}
|
||||
|
||||
impl PxarBackupStream {
|
||||
|
||||
pub fn new<W: Write + Send + 'static>(
|
||||
mut dir: Dir,
|
||||
path: PathBuf,
|
||||
dir: Dir,
|
||||
_path: PathBuf,
|
||||
device_set: Option<HashSet<u64>>,
|
||||
verbose: bool,
|
||||
_verbose: bool,
|
||||
skip_lost_and_found: bool,
|
||||
catalog: Arc<Mutex<CatalogWriter<W>>>,
|
||||
exclude_pattern: Vec<pxar::MatchPattern>,
|
||||
patterns: Vec<MatchEntry>,
|
||||
entries_max: usize,
|
||||
) -> Result<Self, Error> {
|
||||
|
||||
let (tx, rx) = std::sync::mpsc::sync_channel(10);
|
||||
|
||||
let buffer_size = 256*1024;
|
||||
let buffer_size = 256 * 1024;
|
||||
|
||||
let error = Arc::new(Mutex::new(None));
|
||||
let error2 = error.clone();
|
||||
let child = std::thread::Builder::new()
|
||||
.name("PxarBackupStream".to_string())
|
||||
.spawn({
|
||||
let error = Arc::clone(&error);
|
||||
move || {
|
||||
let mut catalog_guard = catalog.lock().unwrap();
|
||||
let writer = std::io::BufWriter::with_capacity(
|
||||
buffer_size,
|
||||
crate::tools::StdChannelWriter::new(tx),
|
||||
);
|
||||
|
||||
let catalog = catalog.clone();
|
||||
let child = std::thread::Builder::new().name("PxarBackupStream".to_string()).spawn(move || {
|
||||
let mut guard = catalog.lock().unwrap();
|
||||
let mut writer = std::io::BufWriter::with_capacity(buffer_size, crate::tools::StdChannelWriter::new(tx));
|
||||
|
||||
if let Err(err) = pxar::Encoder::encode(
|
||||
path,
|
||||
&mut dir,
|
||||
&mut writer,
|
||||
Some(&mut *guard),
|
||||
device_set,
|
||||
verbose,
|
||||
skip_lost_and_found,
|
||||
pxar::flags::DEFAULT,
|
||||
exclude_pattern,
|
||||
entries_max,
|
||||
) {
|
||||
let mut error = error2.lock().unwrap();
|
||||
*error = Some(err.to_string());
|
||||
}
|
||||
})?;
|
||||
let writer = pxar::encoder::sync::StandardWriter::new(writer);
|
||||
if let Err(err) = crate::pxar::create_archive(
|
||||
dir,
|
||||
writer,
|
||||
patterns,
|
||||
crate::pxar::Flags::DEFAULT,
|
||||
device_set,
|
||||
skip_lost_and_found,
|
||||
|_| Ok(()),
|
||||
entries_max,
|
||||
Some(&mut *catalog_guard),
|
||||
) {
|
||||
let mut error = error.lock().unwrap();
|
||||
*error = Some(err.to_string());
|
||||
}
|
||||
}
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
rx: Some(rx),
|
||||
@ -91,23 +93,31 @@ impl PxarBackupStream {
|
||||
verbose: bool,
|
||||
skip_lost_and_found: bool,
|
||||
catalog: Arc<Mutex<CatalogWriter<W>>>,
|
||||
exclude_pattern: Vec<pxar::MatchPattern>,
|
||||
patterns: Vec<MatchEntry>,
|
||||
entries_max: usize,
|
||||
) -> Result<Self, Error> {
|
||||
|
||||
let dir = nix::dir::Dir::open(dirname, OFlag::O_DIRECTORY, Mode::empty())?;
|
||||
let path = std::path::PathBuf::from(dirname);
|
||||
|
||||
Self::new(dir, path, device_set, verbose, skip_lost_and_found, catalog, exclude_pattern, entries_max)
|
||||
Self::new(
|
||||
dir,
|
||||
path,
|
||||
device_set,
|
||||
verbose,
|
||||
skip_lost_and_found,
|
||||
catalog,
|
||||
patterns,
|
||||
entries_max,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for PxarBackupStream {
|
||||
|
||||
type Item = Result<Vec<u8>, Error>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
{ // limit lock scope
|
||||
{
|
||||
// limit lock scope
|
||||
let error = self.error.lock().unwrap();
|
||||
if let Some(ref msg) = *error {
|
||||
return Poll::Ready(Some(Err(format_err!("{}", msg))));
|
||||
|
@ -1,70 +0,0 @@
|
||||
use anyhow::{Error};
|
||||
|
||||
use std::thread;
|
||||
use std::os::unix::io::FromRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::io::Write;
|
||||
|
||||
use crate::pxar;
|
||||
|
||||
/// Writer implementation to deccode a .pxar archive (download).
|
||||
|
||||
pub struct PxarDecodeWriter {
|
||||
pipe: Option<std::fs::File>,
|
||||
child: Option<thread::JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Drop for PxarDecodeWriter {
|
||||
|
||||
fn drop(&mut self) {
|
||||
drop(self.pipe.take());
|
||||
self.child.take().unwrap().join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
impl PxarDecodeWriter {
|
||||
|
||||
pub fn new(base: &Path, verbose: bool) -> Result<Self, Error> {
|
||||
let (rx, tx) = nix::unistd::pipe()?;
|
||||
|
||||
let base = PathBuf::from(base);
|
||||
|
||||
let child = thread::spawn(move|| {
|
||||
let mut reader = unsafe { std::fs::File::from_raw_fd(rx) };
|
||||
let mut decoder = pxar::SequentialDecoder::new(&mut reader, pxar::flags::DEFAULT);
|
||||
decoder.set_callback(move |path| {
|
||||
if verbose {
|
||||
println!("{:?}", path);
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
|
||||
if let Err(err) = decoder.restore(&base, &Vec::new()) {
|
||||
eprintln!("pxar decode failed - {}", err);
|
||||
}
|
||||
});
|
||||
|
||||
let pipe = unsafe { std::fs::File::from_raw_fd(tx) };
|
||||
|
||||
Ok(Self { pipe: Some(pipe), child: Some(child) })
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for PxarDecodeWriter {
|
||||
|
||||
fn write(&mut self, buffer: &[u8]) -> Result<usize, std::io::Error> {
|
||||
let pipe = match self.pipe {
|
||||
Some(ref mut pipe) => pipe,
|
||||
None => unreachable!(),
|
||||
};
|
||||
pipe.write(buffer)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> Result<(), std::io::Error> {
|
||||
let pipe = match self.pipe {
|
||||
Some(ref mut pipe) => pipe,
|
||||
None => unreachable!(),
|
||||
};
|
||||
pipe.flush()
|
||||
}
|
||||
}
|
@ -1,10 +1,12 @@
|
||||
use std::future::Future;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::{Error};
|
||||
use anyhow::Error;
|
||||
|
||||
use super::BackupReader;
|
||||
use crate::backup::{ReadChunk, DataBlob, CryptConfig};
|
||||
use crate::backup::{AsyncReadChunk, CryptConfig, DataBlob, ReadChunk};
|
||||
use crate::tools::runtime::block_on;
|
||||
|
||||
/// Read chunks from remote host using ``BackupReader``
|
||||
@ -12,11 +14,10 @@ pub struct RemoteChunkReader {
|
||||
client: Arc<BackupReader>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
cache_hint: HashMap<[u8; 32], usize>,
|
||||
cache: HashMap<[u8; 32], Vec<u8>>,
|
||||
cache: Mutex<HashMap<[u8; 32], Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl RemoteChunkReader {
|
||||
|
||||
/// Create a new instance.
|
||||
///
|
||||
/// Chunks listed in ``cache_hint`` are cached and kept in RAM.
|
||||
@ -25,50 +26,82 @@ impl RemoteChunkReader {
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
cache_hint: HashMap<[u8; 32], usize>,
|
||||
) -> Self {
|
||||
|
||||
Self { client, crypt_config, cache_hint, cache: HashMap::new() }
|
||||
Self {
|
||||
client,
|
||||
crypt_config,
|
||||
cache_hint,
|
||||
cache: Mutex::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadChunk for RemoteChunkReader {
|
||||
pub async fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
|
||||
|
||||
fn read_raw_chunk(&mut self, digest:&[u8; 32]) -> Result<DataBlob, Error> {
|
||||
|
||||
let mut chunk_data = Vec::with_capacity(4*1024*1024);
|
||||
|
||||
//tokio::task::block_in_place(|| futures::executor::block_on(self.client.download_chunk(&digest, &mut chunk_data)))?;
|
||||
block_on(async {
|
||||
// download_chunk returns the writer back to us, but we need to return a 'static value
|
||||
self.client
|
||||
.download_chunk(&digest, &mut chunk_data)
|
||||
.await
|
||||
.map(drop)
|
||||
})?;
|
||||
self.client
|
||||
.download_chunk(&digest, &mut chunk_data)
|
||||
.await?;
|
||||
|
||||
let chunk = DataBlob::from_raw(chunk_data)?;
|
||||
chunk.verify_crc()?;
|
||||
|
||||
Ok(chunk)
|
||||
}
|
||||
}
|
||||
|
||||
fn read_chunk(&mut self, digest:&[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||
impl ReadChunk for RemoteChunkReader {
|
||||
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
block_on(Self::read_raw_chunk(self, digest))
|
||||
}
|
||||
|
||||
if let Some(raw_data) = self.cache.get(digest) {
|
||||
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||
if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
|
||||
return Ok(raw_data.to_vec());
|
||||
}
|
||||
|
||||
let chunk = self.read_raw_chunk(digest)?;
|
||||
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
|
||||
let use_cache = self.cache_hint.contains_key(digest);
|
||||
if use_cache {
|
||||
self.cache.insert(*digest, raw_data.to_vec());
|
||||
(*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
|
||||
}
|
||||
|
||||
Ok(raw_data)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl AsyncReadChunk for RemoteChunkReader {
|
||||
fn read_raw_chunk<'a>(
|
||||
&'a self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
||||
Box::pin(Self::read_raw_chunk(self, digest))
|
||||
}
|
||||
|
||||
fn read_chunk<'a>(
|
||||
&'a self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
|
||||
Box::pin(async move {
|
||||
if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
|
||||
return Ok(raw_data.to_vec());
|
||||
}
|
||||
|
||||
let chunk = Self::read_raw_chunk(self, digest).await?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
|
||||
let use_cache = self.cache_hint.contains_key(digest);
|
||||
if use_cache {
|
||||
(*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
|
||||
}
|
||||
|
||||
Ok(raw_data)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -190,7 +190,7 @@ pub fn check_acl_path(path: &str) -> Result<(), Error> {
|
||||
"system" => {
|
||||
if components_len == 1 { return Ok(()); }
|
||||
match components[1] {
|
||||
"log" | "status" | "tasks" | "time" => {
|
||||
"disks" | "log" | "status" | "tasks" | "time" => {
|
||||
if components_len == 2 { return Ok(()); }
|
||||
}
|
||||
"services" => { // /system/services/{service}
|
||||
|
@ -149,7 +149,7 @@ impl Interface {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write attributes not dependening on address family
|
||||
/// Write attributes not depending on address family
|
||||
fn write_iface_attributes(&self, w: &mut dyn Write) -> Result<(), Error> {
|
||||
|
||||
static EMPTY_LIST: Vec<String> = Vec::new();
|
||||
@ -187,7 +187,7 @@ impl Interface {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write attributes dependening on address family inet (IPv4)
|
||||
/// Write attributes depending on address family inet (IPv4)
|
||||
fn write_iface_attributes_v4(&self, w: &mut dyn Write, method: NetworkConfigMethod) -> Result<(), Error> {
|
||||
if method == NetworkConfigMethod::Static {
|
||||
if let Some(address) = &self.cidr {
|
||||
@ -211,7 +211,7 @@ impl Interface {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write attributes dependening on address family inet6 (IPv6)
|
||||
/// Write attributes depending on address family inet6 (IPv6)
|
||||
fn write_iface_attributes_v6(&self, w: &mut dyn Write, method: NetworkConfigMethod) -> Result<(), Error> {
|
||||
if method == NetworkConfigMethod::Static {
|
||||
if let Some(address) = &self.cidr6 {
|
||||
|
@ -141,7 +141,7 @@ pub fn get_network_interfaces() -> Result<HashMap<String, bool>, Error> {
|
||||
|
||||
pub fn compute_file_diff(filename: &str, shadow: &str) -> Result<String, Error> {
|
||||
|
||||
let output = Command::new("/usr/bin/diff")
|
||||
let output = Command::new("diff")
|
||||
.arg("-b")
|
||||
.arg("-u")
|
||||
.arg(filename)
|
||||
@ -165,10 +165,10 @@ pub fn assert_ifupdown2_installed() -> Result<(), Error> {
|
||||
|
||||
pub fn network_reload() -> Result<(), Error> {
|
||||
|
||||
let output = Command::new("/sbin/ifreload")
|
||||
let output = Command::new("ifreload")
|
||||
.arg("-a")
|
||||
.output()
|
||||
.map_err(|err| format_err!("failed to execute '/sbin/ifreload' - {}", err))?;
|
||||
.map_err(|err| format_err!("failed to execute 'ifreload' - {}", err))?;
|
||||
|
||||
crate::tools::command_output(output, None)
|
||||
.map_err(|err| format_err!("ifreload failed: {}", err))?;
|
||||
|
@ -46,7 +46,7 @@ lazy_static! {
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: GC_SCHEDULE_SCHEMA,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
}
|
||||
)]
|
||||
@ -66,6 +66,79 @@ pub struct SyncJobConfig {
|
||||
pub schedule: Option<String>,
|
||||
}
|
||||
|
||||
// FIXME: generate duplicate schemas/structs from one listing?
|
||||
#[api(
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
remote: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
},
|
||||
"remote-store": {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"remove-vanished": {
|
||||
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"next-run": {
|
||||
description: "Estimated time of the next run (UNIX epoch).",
|
||||
optional: true,
|
||||
type: Integer,
|
||||
},
|
||||
"last-run-state": {
|
||||
description: "Result of the last run.",
|
||||
optional: true,
|
||||
type: String,
|
||||
},
|
||||
"last-run-upid": {
|
||||
description: "Task UPID of the last run.",
|
||||
optional: true,
|
||||
type: String,
|
||||
},
|
||||
"last-run-endtime": {
|
||||
description: "Endtime of the last run.",
|
||||
optional: true,
|
||||
type: Integer,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
#[derive(Serialize,Deserialize)]
|
||||
/// Status of Sync Job
|
||||
pub struct SyncJobStatus {
|
||||
pub id: String,
|
||||
pub store: String,
|
||||
pub remote: String,
|
||||
pub remote_store: String,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub remove_vanished: Option<bool>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub schedule: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub next_run: Option<i64>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub last_run_state: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub last_run_upid: Option<String>,
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub last_run_endtime: Option<i64>,
|
||||
}
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let obj_schema = match SyncJobConfig::API_SCHEMA {
|
||||
Schema::Object(ref obj_schema) => obj_schema,
|
||||
|
@ -1,229 +0,0 @@
|
||||
//! Helpers to generate a binary search tree stored in an array from a
|
||||
//! sorted array.
|
||||
//!
|
||||
//! Specifically, for any given sorted array 'input' permute the
|
||||
//! array so that the following rule holds:
|
||||
//!
|
||||
//! For each array item with index i, the item at 2i+1 is smaller and
|
||||
//! the item 2i+2 is larger.
|
||||
//!
|
||||
//! This structure permits efficient (meaning: O(log(n)) binary
|
||||
//! searches: start with item i=0 (i.e. the root of the BST), compare
|
||||
//! the value with the searched item, if smaller proceed at item
|
||||
//! 2i+1, if larger proceed at item 2i+2, and repeat, until either
|
||||
//! the item is found, or the indexes grow beyond the array size,
|
||||
//! which means the entry does not exist.
|
||||
//!
|
||||
//! Effectively this implements bisection, but instead of jumping
|
||||
//! around wildly in the array during a single search we only search
|
||||
//! with strictly monotonically increasing indexes.
|
||||
//!
|
||||
//! Algorithm is from casync (camakebst.c), simplified and optimized
|
||||
//! for rust. Permutation function originally by L. Bressel, 2017. We
|
||||
//! pass permutation info to user provided callback, which actually
|
||||
//! implements the data copy.
|
||||
//!
|
||||
//! The Wikipedia Artikel for [Binary
|
||||
//! Heap](https://en.wikipedia.org/wiki/Binary_heap) gives a short
|
||||
//! intro howto store binary trees using an array.
|
||||
|
||||
use std::cmp::Ordering;
|
||||
|
||||
#[allow(clippy::many_single_char_names)]
|
||||
fn copy_binary_search_tree_inner<F: FnMut(usize, usize)>(
|
||||
copy_func: &mut F,
|
||||
// we work on input array input[o..o+n]
|
||||
n: usize,
|
||||
o: usize,
|
||||
e: usize,
|
||||
i: usize,
|
||||
) {
|
||||
let p = 1 << e;
|
||||
|
||||
let t = p + (p>>1) - 1;
|
||||
|
||||
let m = if n > t {
|
||||
// |...........p.............t....n........(2p)|
|
||||
p - 1
|
||||
} else {
|
||||
// |...........p.....n.......t.............(2p)|
|
||||
p - 1 - (t-n)
|
||||
};
|
||||
|
||||
(copy_func)(o+m, i);
|
||||
|
||||
if m > 0 {
|
||||
copy_binary_search_tree_inner(copy_func, m, o, e-1, i*2+1);
|
||||
}
|
||||
|
||||
if (m + 1) < n {
|
||||
copy_binary_search_tree_inner(copy_func, n-m-1, o+m+1, e-1, i*2+2);
|
||||
}
|
||||
}
|
||||
|
||||
/// This function calls the provided `copy_func()` with the permutaion
|
||||
/// info.
|
||||
///
|
||||
/// ```
|
||||
/// # use proxmox_backup::pxar::copy_binary_search_tree;
|
||||
/// copy_binary_search_tree(5, |src, dest| {
|
||||
/// println!("Copy {} to {}", src, dest);
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
/// This will produce the folowing output:
|
||||
///
|
||||
/// ```no-compile
|
||||
/// Copy 3 to 0
|
||||
/// Copy 1 to 1
|
||||
/// Copy 0 to 3
|
||||
/// Copy 2 to 4
|
||||
/// Copy 4 to 2
|
||||
/// ```
|
||||
///
|
||||
/// So this generates the following permuation: `[3,1,4,0,2]`.
|
||||
|
||||
pub fn copy_binary_search_tree<F: FnMut(usize, usize)>(
|
||||
n: usize,
|
||||
mut copy_func: F,
|
||||
) {
|
||||
if n == 0 { return };
|
||||
let e = (64 - n.leading_zeros() - 1) as usize; // fast log2(n)
|
||||
|
||||
copy_binary_search_tree_inner(&mut copy_func, n, 0, e, 0);
|
||||
}
|
||||
|
||||
|
||||
/// This function searches for the index where the comparison by the provided
|
||||
/// `compare()` function returns `Ordering::Equal`.
|
||||
/// The order of the comparison matters (noncommutative) and should be search
|
||||
/// value compared to value at given index as shown in the examples.
|
||||
/// The parameter `skip_multiples` defines the number of matches to ignore while
|
||||
/// searching before returning the index in order to lookup duplicate entries in
|
||||
/// the tree.
|
||||
///
|
||||
/// ```
|
||||
/// # use proxmox_backup::pxar::{copy_binary_search_tree, search_binary_tree_by};
|
||||
/// let mut vals = vec![0,1,2,2,2,3,4,5,6,6,7,8,8,8];
|
||||
///
|
||||
/// let clone = vals.clone();
|
||||
/// copy_binary_search_tree(vals.len(), |s, d| {
|
||||
/// vals[d] = clone[s];
|
||||
/// });
|
||||
/// let should_be = vec![5,2,8,1,3,6,8,0,2,2,4,6,7,8];
|
||||
/// assert_eq!(vals, should_be);
|
||||
///
|
||||
/// let find = 8;
|
||||
/// let skip_multiples = 0;
|
||||
/// let idx = search_binary_tree_by(0, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert_eq!(idx, Some(2));
|
||||
///
|
||||
/// let find = 8;
|
||||
/// let skip_multiples = 1;
|
||||
/// let idx = search_binary_tree_by(2, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert_eq!(idx, Some(6));
|
||||
///
|
||||
/// let find = 8;
|
||||
/// let skip_multiples = 1;
|
||||
/// let idx = search_binary_tree_by(6, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert_eq!(idx, Some(13));
|
||||
///
|
||||
/// let find = 5;
|
||||
/// let skip_multiples = 1;
|
||||
/// let idx = search_binary_tree_by(0, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert!(idx.is_none());
|
||||
///
|
||||
/// let find = 5;
|
||||
/// let skip_multiples = 0;
|
||||
/// // if start index is equal to the array length, `None` is returned.
|
||||
/// let idx = search_binary_tree_by(vals.len(), vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert!(idx.is_none());
|
||||
///
|
||||
/// let find = 5;
|
||||
/// let skip_multiples = 0;
|
||||
/// // if start index is larger than length, `None` is returned.
|
||||
/// let idx = search_binary_tree_by(vals.len() + 1, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert!(idx.is_none());
|
||||
/// ```
|
||||
|
||||
pub fn search_binary_tree_by<F: Copy + Fn(usize) -> Ordering>(
|
||||
start: usize,
|
||||
size: usize,
|
||||
skip_multiples: usize,
|
||||
compare: F
|
||||
) -> Option<usize> {
|
||||
if start >= size {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut skip = skip_multiples;
|
||||
let cmp = compare(start);
|
||||
if cmp == Ordering::Equal {
|
||||
if skip == 0 {
|
||||
// Found matching hash and want this one
|
||||
return Some(start);
|
||||
}
|
||||
// Found matching hash, but we should skip the first `skip_multiple`,
|
||||
// so continue search with reduced skip count.
|
||||
skip -= 1;
|
||||
}
|
||||
|
||||
if cmp == Ordering::Less || cmp == Ordering::Equal {
|
||||
let res = search_binary_tree_by(2 * start + 1, size, skip, compare);
|
||||
if res.is_some() {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
if cmp == Ordering::Greater || cmp == Ordering::Equal {
|
||||
let res = search_binary_tree_by(2 * start + 2, size, skip, compare);
|
||||
if res.is_some() {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binary_search_tree() {
|
||||
|
||||
fn run_test(len: usize) -> Vec<usize> {
|
||||
|
||||
const MARKER: usize = 0xfffffff;
|
||||
let mut output = vec![];
|
||||
for _i in 0..len { output.push(MARKER); }
|
||||
copy_binary_search_tree(len, |s, d| {
|
||||
assert!(output[d] == MARKER);
|
||||
output[d] = s;
|
||||
});
|
||||
if len < 32 { println!("GOT:{}:{:?}", len, output); }
|
||||
for i in 0..len {
|
||||
assert!(output[i] != MARKER);
|
||||
}
|
||||
output
|
||||
}
|
||||
|
||||
assert!(run_test(0).len() == 0);
|
||||
assert!(run_test(1) == [0]);
|
||||
assert!(run_test(2) == [1,0]);
|
||||
assert!(run_test(3) == [1,0,2]);
|
||||
assert!(run_test(4) == [2,1,3,0]);
|
||||
assert!(run_test(5) == [3,1,4,0,2]);
|
||||
assert!(run_test(6) == [3,1,5,0,2,4]);
|
||||
assert!(run_test(7) == [3,1,5,0,2,4,6]);
|
||||
assert!(run_test(8) == [4,2,6,1,3,5,7,0]);
|
||||
assert!(run_test(9) == [5,3,7,1,4,6,8,0,2]);
|
||||
assert!(run_test(10) == [6,3,8,1,5,7,9,0,2,4]);
|
||||
assert!(run_test(11) == [7,3,9,1,5,8,10,0,2,4,6]);
|
||||
assert!(run_test(12) == [7,3,10,1,5,9,11,0,2,4,6,8]);
|
||||
assert!(run_test(13) == [7,3,11,1,5,9,12,0,2,4,6,8,10]);
|
||||
assert!(run_test(14) == [7,3,11,1,5,9,13,0,2,4,6,8,10,12]);
|
||||
assert!(run_test(15) == [7,3,11,1,5,9,13,0,2,4,6,8,10,12,14]);
|
||||
assert!(run_test(16) == [8,4,12,2,6,10,14,1,3,5,7,9,11,13,15,0]);
|
||||
assert!(run_test(17) == [9,5,13,3,7,11,15,1,4,6,8,10,12,14,16,0,2]);
|
||||
|
||||
for len in 18..1000 {
|
||||
run_test(len);
|
||||
}
|
||||
}
|
1006
src/pxar/create.rs
Normal file
1006
src/pxar/create.rs
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,365 +0,0 @@
|
||||
//! *pxar* format decoder for seekable files
|
||||
//!
|
||||
//! This module contain the code to decode *pxar* archive files.
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::{OsString, OsStr};
|
||||
use std::io::{Read, Seek, SeekFrom};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use libc;
|
||||
|
||||
use super::binary_search_tree::search_binary_tree_by;
|
||||
use super::format_definition::*;
|
||||
use super::sequential_decoder::SequentialDecoder;
|
||||
use super::match_pattern::MatchPattern;
|
||||
|
||||
use proxmox::tools::io::ReadExt;
|
||||
|
||||
pub struct DirectoryEntry {
|
||||
/// Points to the `PxarEntry` of the directory
|
||||
start: u64,
|
||||
/// Points past the goodbye table tail
|
||||
end: u64,
|
||||
/// Filename of entry
|
||||
pub filename: OsString,
|
||||
/// Entry (mode, permissions)
|
||||
pub entry: PxarEntry,
|
||||
/// Extended attributes
|
||||
pub xattr: PxarAttributes,
|
||||
/// Payload size
|
||||
pub size: u64,
|
||||
/// Target path for symbolic links
|
||||
pub target: Option<PathBuf>,
|
||||
/// Start offset of the payload if present.
|
||||
pub payload_offset: Option<u64>,
|
||||
}
|
||||
|
||||
/// Trait to create ReadSeek Decoder trait objects.
|
||||
trait ReadSeek: Read + Seek {}
|
||||
impl <R: Read + Seek> ReadSeek for R {}
|
||||
|
||||
// This one needs Read+Seek
|
||||
pub struct Decoder {
|
||||
inner: SequentialDecoder<Box<dyn ReadSeek + Send>>,
|
||||
root_start: u64,
|
||||
root_end: u64,
|
||||
}
|
||||
|
||||
const HEADER_SIZE: u64 = std::mem::size_of::<PxarHeader>() as u64;
|
||||
const GOODBYE_ITEM_SIZE: u64 = std::mem::size_of::<PxarGoodbyeItem>() as u64;
|
||||
|
||||
impl Decoder {
|
||||
pub fn new<R: Read + Seek + Send + 'static>(mut reader: R) -> Result<Self, Error> {
|
||||
let root_end = reader.seek(SeekFrom::End(0))?;
|
||||
let boxed_reader: Box<dyn ReadSeek + 'static + Send> = Box::new(reader);
|
||||
let inner = SequentialDecoder::new(boxed_reader, super::flags::DEFAULT);
|
||||
|
||||
Ok(Self { inner, root_start: 0, root_end })
|
||||
}
|
||||
|
||||
pub fn set_callback<F: Fn(&Path) -> Result<(), Error> + Send + 'static>(&mut self, callback: F ) {
|
||||
self.inner.set_callback(callback);
|
||||
}
|
||||
|
||||
pub fn root(&mut self) -> Result<DirectoryEntry, Error> {
|
||||
self.seek(SeekFrom::Start(0))?;
|
||||
let header: PxarHeader = self.inner.read_item()?;
|
||||
check_ca_header::<PxarEntry>(&header, PXAR_ENTRY)?;
|
||||
let entry: PxarEntry = self.inner.read_item()?;
|
||||
let (header, xattr) = self.inner.read_attributes()?;
|
||||
let (size, payload_offset) = match header.htype {
|
||||
PXAR_PAYLOAD => (header.size - HEADER_SIZE, Some(self.seek(SeekFrom::Current(0))?)),
|
||||
_ => (0, None),
|
||||
};
|
||||
|
||||
Ok(DirectoryEntry {
|
||||
start: self.root_start,
|
||||
end: self.root_end,
|
||||
filename: OsString::new(), // Empty
|
||||
entry,
|
||||
xattr,
|
||||
size,
|
||||
target: None,
|
||||
payload_offset,
|
||||
})
|
||||
}
|
||||
|
||||
fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
|
||||
let pos = self.inner.get_reader_mut().seek(pos)?;
|
||||
Ok(pos)
|
||||
}
|
||||
|
||||
pub(crate) fn root_end_offset(&self) -> u64 {
|
||||
self.root_end
|
||||
}
|
||||
|
||||
/// Restore the subarchive starting at `dir` to the provided target `path`.
|
||||
///
|
||||
/// Only restore the content matched by the MatchPattern `pattern`.
|
||||
/// An empty Vec `pattern` means restore all.
|
||||
pub fn restore(&mut self, dir: &DirectoryEntry, path: &Path, pattern: &Vec<MatchPattern>) -> Result<(), Error> {
|
||||
let start = dir.start;
|
||||
self.seek(SeekFrom::Start(start))?;
|
||||
self.inner.restore(path, pattern)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn read_directory_entry(
|
||||
&mut self,
|
||||
start: u64,
|
||||
end: u64,
|
||||
) -> Result<DirectoryEntry, Error> {
|
||||
self.seek(SeekFrom::Start(start))?;
|
||||
|
||||
let head: PxarHeader = self.inner.read_item()?;
|
||||
|
||||
if head.htype != PXAR_FILENAME {
|
||||
bail!("wrong filename header type for object [{}..{}]", start, end);
|
||||
}
|
||||
|
||||
let entry_start = start + head.size;
|
||||
|
||||
let filename = self.inner.read_filename(head.size)?;
|
||||
|
||||
let head: PxarHeader = self.inner.read_item()?;
|
||||
if head.htype == PXAR_FORMAT_HARDLINK {
|
||||
let (_, offset) = self.inner.read_hardlink(head.size)?;
|
||||
// TODO: Howto find correct end offset for hardlink target?
|
||||
// This is a bit tricky since we cannot find correct end in an efficient
|
||||
// way, on the other hand it doesn't really matter (for now) since target
|
||||
// is never a directory and end is not used in such cases.
|
||||
return self.read_directory_entry(start - offset, end);
|
||||
}
|
||||
check_ca_header::<PxarEntry>(&head, PXAR_ENTRY)?;
|
||||
let entry: PxarEntry = self.inner.read_item()?;
|
||||
let (header, xattr) = self.inner.read_attributes()?;
|
||||
let (size, payload_offset, target) = match header.htype {
|
||||
PXAR_PAYLOAD =>
|
||||
(header.size - HEADER_SIZE, Some(self.seek(SeekFrom::Current(0))?), None),
|
||||
PXAR_SYMLINK =>
|
||||
(header.size - HEADER_SIZE, None, Some(self.inner.read_link(header.size)?)),
|
||||
_ => (0, None, None),
|
||||
};
|
||||
|
||||
Ok(DirectoryEntry {
|
||||
start: entry_start,
|
||||
end,
|
||||
filename,
|
||||
entry,
|
||||
xattr,
|
||||
size,
|
||||
target,
|
||||
payload_offset,
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the goodbye table based on the provided end offset.
|
||||
///
|
||||
/// Get the goodbye table entries and the start and end offsets of the
|
||||
/// items they reference.
|
||||
/// If the start offset is provided, we use that to check the consistency of
|
||||
/// the data, else the start offset calculated based on the goodbye tail is
|
||||
/// used.
|
||||
pub(crate) fn goodbye_table(
|
||||
&mut self,
|
||||
start: Option<u64>,
|
||||
end: u64,
|
||||
) -> Result<Vec<(PxarGoodbyeItem, u64, u64)>, Error> {
|
||||
self.seek(SeekFrom::Start(end - GOODBYE_ITEM_SIZE))?;
|
||||
|
||||
let tail: PxarGoodbyeItem = self.inner.read_item()?;
|
||||
if tail.hash != PXAR_GOODBYE_TAIL_MARKER {
|
||||
bail!("missing goodbye tail marker for object at offset {}", end);
|
||||
}
|
||||
|
||||
// If the start offset was provided, we use and check based on that.
|
||||
// If not, we rely on the offset calculated from the goodbye table entry.
|
||||
let start = start.unwrap_or(end - tail.offset - tail.size);
|
||||
let goodbye_table_size = tail.size;
|
||||
if goodbye_table_size < (HEADER_SIZE + GOODBYE_ITEM_SIZE) {
|
||||
bail!("short goodbye table size for object [{}..{}]", start, end);
|
||||
}
|
||||
|
||||
let goodbye_inner_size = goodbye_table_size - HEADER_SIZE - GOODBYE_ITEM_SIZE;
|
||||
if (goodbye_inner_size % GOODBYE_ITEM_SIZE) != 0 {
|
||||
bail!(
|
||||
"wrong goodbye inner table size for entry [{}..{}]",
|
||||
start,
|
||||
end
|
||||
);
|
||||
}
|
||||
|
||||
let goodbye_start = end - goodbye_table_size;
|
||||
if tail.offset != (goodbye_start - start) {
|
||||
bail!(
|
||||
"wrong offset in goodbye tail marker for entry [{}..{}]",
|
||||
start,
|
||||
end
|
||||
);
|
||||
}
|
||||
|
||||
self.seek(SeekFrom::Start(goodbye_start))?;
|
||||
let head: PxarHeader = self.inner.read_item()?;
|
||||
if head.htype != PXAR_GOODBYE {
|
||||
bail!(
|
||||
"wrong goodbye table header type for entry [{}..{}]",
|
||||
start,
|
||||
end
|
||||
);
|
||||
}
|
||||
|
||||
if head.size != goodbye_table_size {
|
||||
bail!("wrong goodbye table size for entry [{}..{}]", start, end);
|
||||
}
|
||||
|
||||
let mut gb_entries = Vec::new();
|
||||
for i in 0..goodbye_inner_size / GOODBYE_ITEM_SIZE {
|
||||
let item: PxarGoodbyeItem = self.inner.read_item()?;
|
||||
if item.offset > (goodbye_start - start) {
|
||||
bail!(
|
||||
"goodbye entry {} offset out of range [{}..{}] {} {} {}",
|
||||
i,
|
||||
start,
|
||||
end,
|
||||
item.offset,
|
||||
goodbye_start,
|
||||
start
|
||||
);
|
||||
}
|
||||
let item_start = goodbye_start - item.offset;
|
||||
let item_end = item_start + item.size;
|
||||
if item_end > goodbye_start {
|
||||
bail!("goodbye entry {} end out of range [{}..{}]", i, start, end);
|
||||
}
|
||||
gb_entries.push((item, item_start, item_end));
|
||||
}
|
||||
|
||||
Ok(gb_entries)
|
||||
}
|
||||
|
||||
pub fn list_dir(&mut self, dir: &DirectoryEntry) -> Result<Vec<DirectoryEntry>, Error> {
|
||||
let start = dir.start;
|
||||
let end = dir.end;
|
||||
|
||||
//println!("list_dir1: {} {}", start, end);
|
||||
|
||||
if (end - start) < (HEADER_SIZE + GOODBYE_ITEM_SIZE) {
|
||||
bail!("detected short object [{}..{}]", start, end);
|
||||
}
|
||||
|
||||
let mut result = vec![];
|
||||
let goodbye_table = self.goodbye_table(Some(start), end)?;
|
||||
for (_, item_start, item_end) in goodbye_table {
|
||||
let entry = self.read_directory_entry(item_start, item_end)?;
|
||||
//println!("ENTRY: {} {} {:?}", item_start, item_end, entry.filename);
|
||||
result.push(entry);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn print_filenames<W: std::io::Write>(
|
||||
&mut self,
|
||||
output: &mut W,
|
||||
prefix: &mut PathBuf,
|
||||
dir: &DirectoryEntry,
|
||||
) -> Result<(), Error> {
|
||||
let mut list = self.list_dir(dir)?;
|
||||
|
||||
list.sort_unstable_by(|a, b| a.filename.cmp(&b.filename));
|
||||
|
||||
for item in &list {
|
||||
prefix.push(item.filename.clone());
|
||||
|
||||
let mode = item.entry.mode as u32;
|
||||
|
||||
let ifmt = mode & libc::S_IFMT;
|
||||
|
||||
writeln!(output, "{:?}", prefix)?;
|
||||
|
||||
match ifmt {
|
||||
libc::S_IFDIR => self.print_filenames(output, prefix, item)?,
|
||||
libc::S_IFREG | libc::S_IFLNK | libc::S_IFBLK | libc::S_IFCHR => {}
|
||||
_ => bail!("unknown item mode/type for {:?}", prefix),
|
||||
}
|
||||
|
||||
prefix.pop();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Lookup the item identified by `filename` in the provided `DirectoryEntry`.
|
||||
///
|
||||
/// Calculates the hash of the filename and searches for matching entries in
|
||||
/// the goodbye table of the provided `DirectoryEntry`.
|
||||
/// If found, also the filename is compared to avoid hash collision.
|
||||
/// If the filename does not match, the search resumes with the next entry in
|
||||
/// the goodbye table.
|
||||
/// If there is no entry with matching `filename`, `Ok(None)` is returned.
|
||||
pub fn lookup(
|
||||
&mut self,
|
||||
dir: &DirectoryEntry,
|
||||
filename: &OsStr,
|
||||
) -> Result<Option<DirectoryEntry>, Error> {
|
||||
let gbt = self.goodbye_table(Some(dir.start), dir.end)?;
|
||||
let hash = compute_goodbye_hash(filename.as_bytes());
|
||||
|
||||
let mut start_idx = 0;
|
||||
let mut skip_multiple = 0;
|
||||
loop {
|
||||
// Search for the next goodbye entry with matching hash.
|
||||
let idx = search_binary_tree_by(
|
||||
start_idx,
|
||||
gbt.len(),
|
||||
skip_multiple,
|
||||
|idx| hash.cmp(&gbt[idx].0.hash),
|
||||
);
|
||||
let (_item, start, end) = match idx {
|
||||
Some(idx) => &gbt[idx],
|
||||
None => return Ok(None),
|
||||
};
|
||||
|
||||
let entry = self.read_directory_entry(*start, *end)?;
|
||||
|
||||
// Possible hash collision, need to check if the found entry is indeed
|
||||
// the filename to lookup.
|
||||
if entry.filename == filename {
|
||||
return Ok(Some(entry));
|
||||
}
|
||||
// Hash collision, check the next entry in the goodbye table by starting
|
||||
// from given index but skipping one more match (so hash at index itself).
|
||||
start_idx = idx.unwrap();
|
||||
skip_multiple = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Read the payload of the file given by `entry`.
|
||||
///
|
||||
/// This will read a files payload as raw bytes starting from `offset` after
|
||||
/// the payload marker, reading `size` bytes.
|
||||
/// If the payload from `offset` to EOF is smaller than `size` bytes, the
|
||||
/// buffer with reduced size is returned.
|
||||
/// If `offset` is larger than the payload size of the `DirectoryEntry`, an
|
||||
/// empty buffer is returned.
|
||||
pub fn read(&mut self, entry: &DirectoryEntry, size: usize, offset: u64) -> Result<Vec<u8>, Error> {
|
||||
let start_offset = entry.payload_offset
|
||||
.ok_or_else(|| format_err!("entry has no payload offset"))?;
|
||||
if offset >= entry.size {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
let len = if u64::try_from(size)? > entry.size {
|
||||
usize::try_from(entry.size)?
|
||||
} else {
|
||||
size
|
||||
};
|
||||
self.seek(SeekFrom::Start(start_offset + offset))?;
|
||||
let data = self.inner.get_reader_mut().read_exact_allocated(len)?;
|
||||
|
||||
Ok(data)
|
||||
}
|
||||
}
|
@ -2,117 +2,149 @@ use std::ffi::{OsStr, OsString};
|
||||
use std::os::unix::io::{AsRawFd, RawFd};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use nix::errno::Errno;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use nix::dir::Dir;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
use nix::NixPath;
|
||||
use nix::sys::stat::{mkdirat, Mode};
|
||||
|
||||
use super::format_definition::{PxarAttributes, PxarEntry};
|
||||
use proxmox::sys::error::SysError;
|
||||
use pxar::Metadata;
|
||||
|
||||
use crate::pxar::tools::{assert_single_path_component, perms_from_metadata};
|
||||
|
||||
pub struct PxarDir {
|
||||
pub filename: OsString,
|
||||
pub entry: PxarEntry,
|
||||
pub attr: PxarAttributes,
|
||||
pub dir: Option<nix::dir::Dir>,
|
||||
}
|
||||
|
||||
pub struct PxarDirStack {
|
||||
root: RawFd,
|
||||
data: Vec<PxarDir>,
|
||||
file_name: OsString,
|
||||
metadata: Metadata,
|
||||
dir: Option<Dir>,
|
||||
}
|
||||
|
||||
impl PxarDir {
|
||||
pub fn new(filename: &OsStr, entry: PxarEntry, attr: PxarAttributes) -> Self {
|
||||
pub fn new(file_name: OsString, metadata: Metadata) -> Self {
|
||||
Self {
|
||||
filename: filename.to_os_string(),
|
||||
entry,
|
||||
attr,
|
||||
file_name,
|
||||
metadata,
|
||||
dir: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn create_dir(&self, parent: RawFd, create_new: bool) -> Result<nix::dir::Dir, nix::Error> {
|
||||
let res = self
|
||||
.filename
|
||||
.with_nix_path(|cstr| unsafe { libc::mkdirat(parent, cstr.as_ptr(), libc::S_IRWXU) })?;
|
||||
pub fn with_dir(dir: Dir, metadata: Metadata) -> Self {
|
||||
Self {
|
||||
file_name: OsString::from("."),
|
||||
metadata,
|
||||
dir: Some(dir),
|
||||
}
|
||||
}
|
||||
|
||||
match Errno::result(res) {
|
||||
Ok(_) => {}
|
||||
fn create_dir(&mut self, parent: RawFd, allow_existing_dirs: bool) -> Result<RawFd, Error> {
|
||||
match mkdirat(
|
||||
parent,
|
||||
self.file_name.as_os_str(),
|
||||
perms_from_metadata(&self.metadata)?,
|
||||
) {
|
||||
Ok(()) => (),
|
||||
Err(err) => {
|
||||
if err == nix::Error::Sys(nix::errno::Errno::EEXIST) {
|
||||
if create_new {
|
||||
return Err(err);
|
||||
}
|
||||
} else {
|
||||
return Err(err);
|
||||
if !(allow_existing_dirs && err.already_exists()) {
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let dir = nix::dir::Dir::openat(
|
||||
self.open_dir(parent)
|
||||
}
|
||||
|
||||
fn open_dir(&mut self, parent: RawFd) -> Result<RawFd, Error> {
|
||||
let dir = Dir::openat(
|
||||
parent,
|
||||
self.filename.as_os_str(),
|
||||
self.file_name.as_os_str(),
|
||||
OFlag::O_DIRECTORY,
|
||||
Mode::empty(),
|
||||
)?;
|
||||
|
||||
Ok(dir)
|
||||
let fd = dir.as_raw_fd();
|
||||
self.dir = Some(dir);
|
||||
|
||||
Ok(fd)
|
||||
}
|
||||
|
||||
pub fn try_as_raw_fd(&self) -> Option<RawFd> {
|
||||
self.dir.as_ref().map(AsRawFd::as_raw_fd)
|
||||
}
|
||||
|
||||
pub fn metadata(&self) -> &Metadata {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
pub fn file_name(&self) -> &OsStr {
|
||||
&self.file_name
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PxarDirStack {
|
||||
dirs: Vec<PxarDir>,
|
||||
path: PathBuf,
|
||||
created: usize,
|
||||
}
|
||||
|
||||
impl PxarDirStack {
|
||||
pub fn new(parent: RawFd) -> Self {
|
||||
pub fn new(root: Dir, metadata: Metadata) -> Self {
|
||||
Self {
|
||||
root: parent,
|
||||
data: Vec::new(),
|
||||
dirs: vec![PxarDir::with_dir(root, metadata)],
|
||||
path: PathBuf::from("/"),
|
||||
created: 1, // the root directory exists
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push(&mut self, dir: PxarDir) {
|
||||
self.data.push(dir);
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.dirs.is_empty()
|
||||
}
|
||||
|
||||
pub fn pop(&mut self) -> Option<PxarDir> {
|
||||
self.data.pop()
|
||||
pub fn push(&mut self, file_name: OsString, metadata: Metadata) -> Result<(), Error> {
|
||||
assert_single_path_component(&file_name)?;
|
||||
self.path.push(&file_name);
|
||||
self.dirs.push(PxarDir::new(file_name, metadata));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn as_path_buf(&self) -> PathBuf {
|
||||
let path: PathBuf = self.data.iter().map(|d| d.filename.clone()).collect();
|
||||
path
|
||||
}
|
||||
|
||||
pub fn last(&self) -> Option<&PxarDir> {
|
||||
self.data.last()
|
||||
}
|
||||
|
||||
pub fn last_mut(&mut self) -> Option<&mut PxarDir> {
|
||||
self.data.last_mut()
|
||||
}
|
||||
|
||||
pub fn last_dir_fd(&self) -> Option<RawFd> {
|
||||
let last_dir = self.data.last()?;
|
||||
match &last_dir.dir {
|
||||
Some(d) => Some(d.as_raw_fd()),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_all_dirs(&mut self, create_new: bool) -> Result<RawFd, Error> {
|
||||
let mut current_fd = self.root;
|
||||
for d in &mut self.data {
|
||||
match &d.dir {
|
||||
Some(dir) => current_fd = dir.as_raw_fd(),
|
||||
None => {
|
||||
let dir = d
|
||||
.create_dir(current_fd, create_new)
|
||||
.map_err(|err| format_err!("create dir failed - {}", err))?;
|
||||
current_fd = dir.as_raw_fd();
|
||||
d.dir = Some(dir);
|
||||
}
|
||||
pub fn pop(&mut self) -> Result<Option<PxarDir>, Error> {
|
||||
let out = self.dirs.pop();
|
||||
if !self.path.pop() {
|
||||
if self.path.as_os_str() == "/" {
|
||||
// we just finished the root directory, make sure this can only happen once:
|
||||
self.path = PathBuf::new();
|
||||
} else {
|
||||
bail!("lost track of path");
|
||||
}
|
||||
}
|
||||
self.created = self.created.min(self.dirs.len());
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
Ok(current_fd)
|
||||
pub fn last_dir_fd(&mut self, allow_existing_dirs: bool) -> Result<RawFd, Error> {
|
||||
// should not be possible given the way we use it:
|
||||
assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
|
||||
|
||||
let mut fd = self.dirs[self.created - 1]
|
||||
.try_as_raw_fd()
|
||||
.ok_or_else(|| format_err!("lost track of directory file descriptors"))?;
|
||||
while self.created < self.dirs.len() {
|
||||
fd = self.dirs[self.created].create_dir(fd, allow_existing_dirs)?;
|
||||
self.created += 1;
|
||||
}
|
||||
|
||||
Ok(fd)
|
||||
}
|
||||
|
||||
pub fn create_last_dir(&mut self, allow_existing_dirs: bool) -> Result<(), Error> {
|
||||
let _: RawFd = self.last_dir_fd(allow_existing_dirs)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn root_dir_fd(&self) -> Result<RawFd, Error> {
|
||||
// should not be possible given the way we use it:
|
||||
assert!(!self.dirs.is_empty(), "PxarDirStack underrun");
|
||||
|
||||
self.dirs[0]
|
||||
.try_as_raw_fd()
|
||||
.ok_or_else(|| format_err!("lost track of directory file descriptors"))
|
||||
}
|
||||
}
|
||||
|
1332
src/pxar/encoder.rs
1332
src/pxar/encoder.rs
File diff suppressed because it is too large
Load Diff
358
src/pxar/extract.rs
Normal file
358
src/pxar/extract.rs
Normal file
@ -0,0 +1,358 @@
|
||||
//! Code for extraction of pxar contents onto the file system.
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::{CStr, CString, OsStr, OsString};
|
||||
use std::io;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use nix::dir::Dir;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
use pathpatterns::{MatchEntry, MatchList, MatchType};
|
||||
use pxar::format::Device;
|
||||
use pxar::Metadata;
|
||||
|
||||
use proxmox::c_result;
|
||||
use proxmox::tools::fs::{create_path, CreateOptions};
|
||||
|
||||
use crate::pxar::dir_stack::PxarDirStack;
|
||||
use crate::pxar::Flags;
|
||||
use crate::pxar::metadata;
|
||||
|
||||
pub fn extract_archive<T, F>(
|
||||
mut decoder: pxar::decoder::Decoder<T>,
|
||||
destination: &Path,
|
||||
match_list: &[MatchEntry],
|
||||
feature_flags: Flags,
|
||||
allow_existing_dirs: bool,
|
||||
mut callback: F,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: pxar::decoder::SeqRead,
|
||||
F: FnMut(&Path),
|
||||
{
|
||||
// we use this to keep track of our directory-traversal
|
||||
decoder.enable_goodbye_entries(true);
|
||||
|
||||
let root = decoder
|
||||
.next()
|
||||
.ok_or_else(|| format_err!("found empty pxar archive"))?
|
||||
.map_err(|err| format_err!("error reading pxar archive: {}", err))?;
|
||||
|
||||
if !root.is_dir() {
|
||||
bail!("pxar archive does not start with a directory entry!");
|
||||
}
|
||||
|
||||
create_path(
|
||||
&destination,
|
||||
None,
|
||||
Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
|
||||
)
|
||||
.map_err(|err| format_err!("error creating directory {:?}: {}", destination, err))?;
|
||||
|
||||
let dir = Dir::open(
|
||||
destination,
|
||||
OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
|
||||
Mode::empty(),
|
||||
)
|
||||
.map_err(|err| format_err!("unable to open target directory {:?}: {}", destination, err,))?;
|
||||
|
||||
let mut extractor = Extractor::new(
|
||||
dir,
|
||||
root.metadata().clone(),
|
||||
allow_existing_dirs,
|
||||
feature_flags,
|
||||
);
|
||||
|
||||
let mut match_stack = Vec::new();
|
||||
let mut current_match = true;
|
||||
while let Some(entry) = decoder.next() {
|
||||
use pxar::EntryKind;
|
||||
|
||||
let entry = entry.map_err(|err| format_err!("error reading pxar archive: {}", err))?;
|
||||
|
||||
let file_name_os = entry.file_name();
|
||||
|
||||
// safety check: a file entry in an archive must never contain slashes:
|
||||
if file_name_os.as_bytes().contains(&b'/') {
|
||||
bail!("archive file entry contains slashes, which is invalid and a security concern");
|
||||
}
|
||||
|
||||
let file_name = CString::new(file_name_os.as_bytes())
|
||||
.map_err(|_| format_err!("encountered file name with null-bytes"))?;
|
||||
|
||||
let metadata = entry.metadata();
|
||||
|
||||
let match_result = match_list.matches(
|
||||
entry.path().as_os_str().as_bytes(),
|
||||
Some(metadata.file_type() as u32),
|
||||
);
|
||||
|
||||
let did_match = match match_result {
|
||||
Some(MatchType::Include) => true,
|
||||
Some(MatchType::Exclude) => false,
|
||||
None => current_match,
|
||||
};
|
||||
match (did_match, entry.kind()) {
|
||||
(_, EntryKind::Directory) => {
|
||||
callback(entry.path());
|
||||
|
||||
let create = current_match && match_result != Some(MatchType::Exclude);
|
||||
extractor.enter_directory(file_name_os.to_owned(), metadata.clone(), create)?;
|
||||
|
||||
// We're starting a new directory, push our old matching state and replace it with
|
||||
// our new one:
|
||||
match_stack.push(current_match);
|
||||
current_match = did_match;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
(_, EntryKind::GoodbyeTable) => {
|
||||
// go up a directory
|
||||
extractor
|
||||
.leave_directory()
|
||||
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
|
||||
|
||||
// We left a directory, also get back our previous matching state. This is in sync
|
||||
// with `dir_stack` so this should never be empty except for the final goodbye
|
||||
// table, in which case we get back to the default of `true`.
|
||||
current_match = match_stack.pop().unwrap_or(true);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
(true, EntryKind::Symlink(link)) => {
|
||||
callback(entry.path());
|
||||
extractor.extract_symlink(&file_name, metadata, link.as_ref())
|
||||
}
|
||||
(true, EntryKind::Hardlink(link)) => {
|
||||
callback(entry.path());
|
||||
extractor.extract_hardlink(&file_name, link.as_os_str())
|
||||
}
|
||||
(true, EntryKind::Device(dev)) => {
|
||||
if extractor.contains_flags(Flags::WITH_DEVICE_NODES) {
|
||||
callback(entry.path());
|
||||
extractor.extract_device(&file_name, metadata, dev)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
(true, EntryKind::Fifo) => {
|
||||
if extractor.contains_flags(Flags::WITH_FIFOS) {
|
||||
callback(entry.path());
|
||||
extractor.extract_special(&file_name, metadata, 0)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
(true, EntryKind::Socket) => {
|
||||
if extractor.contains_flags(Flags::WITH_SOCKETS) {
|
||||
callback(entry.path());
|
||||
extractor.extract_special(&file_name, metadata, 0)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
(true, EntryKind::File { size, .. }) => extractor.extract_file(
|
||||
&file_name,
|
||||
metadata,
|
||||
*size,
|
||||
&mut decoder.contents().ok_or_else(|| {
|
||||
format_err!("found regular file entry without contents in archive")
|
||||
})?,
|
||||
),
|
||||
(false, _) => Ok(()), // skip this
|
||||
}
|
||||
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
|
||||
}
|
||||
|
||||
if !extractor.dir_stack.is_empty() {
|
||||
bail!("unexpected eof while decoding pxar archive");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Common state for file extraction.
|
||||
pub(crate) struct Extractor {
|
||||
feature_flags: Flags,
|
||||
allow_existing_dirs: bool,
|
||||
dir_stack: PxarDirStack,
|
||||
}
|
||||
|
||||
impl Extractor {
|
||||
/// Create a new extractor state for a target directory.
|
||||
pub fn new(
|
||||
root_dir: Dir,
|
||||
metadata: Metadata,
|
||||
allow_existing_dirs: bool,
|
||||
feature_flags: Flags,
|
||||
) -> Self {
|
||||
Self {
|
||||
dir_stack: PxarDirStack::new(root_dir, metadata),
|
||||
allow_existing_dirs,
|
||||
feature_flags,
|
||||
}
|
||||
}
|
||||
|
||||
/// When encountering a directory during extraction, this is used to keep track of it. If
|
||||
/// `create` is true it is immediately created and its metadata will be updated once we leave
|
||||
/// it. If `create` is false it will only be created if it is going to have any actual content.
|
||||
pub fn enter_directory(
|
||||
&mut self,
|
||||
file_name: OsString,
|
||||
metadata: Metadata,
|
||||
create: bool,
|
||||
) -> Result<(), Error> {
|
||||
self.dir_stack.push(file_name, metadata)?;
|
||||
|
||||
if create {
|
||||
self.dir_stack.create_last_dir(self.allow_existing_dirs)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When done with a directory we need to make sure we're
|
||||
pub fn leave_directory(&mut self) -> Result<(), Error> {
|
||||
let dir = self
|
||||
.dir_stack
|
||||
.pop()
|
||||
.map_err(|err| format_err!("unexpected end of directory entry: {}", err))?
|
||||
.ok_or_else(|| format_err!("broken pxar archive (directory stack underrun)"))?;
|
||||
|
||||
if let Some(fd) = dir.try_as_raw_fd() {
|
||||
metadata::apply(
|
||||
self.feature_flags,
|
||||
dir.metadata(),
|
||||
fd,
|
||||
&CString::new(dir.file_name().as_bytes())?,
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn contains_flags(&self, flag: Flags) -> bool {
|
||||
self.feature_flags.contains(flag)
|
||||
}
|
||||
|
||||
fn parent_fd(&mut self) -> Result<RawFd, Error> {
|
||||
self.dir_stack.last_dir_fd(self.allow_existing_dirs)
|
||||
}
|
||||
|
||||
pub fn extract_symlink(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
metadata: &Metadata,
|
||||
link: &OsStr,
|
||||
) -> Result<(), Error> {
|
||||
let parent = self.parent_fd()?;
|
||||
nix::unistd::symlinkat(link, Some(parent), file_name)?;
|
||||
metadata::apply_at(self.feature_flags, metadata, parent, file_name)
|
||||
}
|
||||
|
||||
pub fn extract_hardlink(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
link: &OsStr,
|
||||
) -> Result<(), Error> {
|
||||
crate::pxar::tools::assert_relative_path(link)?;
|
||||
|
||||
let parent = self.parent_fd()?;
|
||||
let root = self.dir_stack.root_dir_fd()?;
|
||||
let target = CString::new(link.as_bytes())?;
|
||||
nix::unistd::linkat(
|
||||
Some(root),
|
||||
target.as_c_str(),
|
||||
Some(parent),
|
||||
file_name,
|
||||
nix::unistd::LinkatFlags::NoSymlinkFollow,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn extract_device(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
metadata: &Metadata,
|
||||
device: &Device,
|
||||
) -> Result<(), Error> {
|
||||
self.extract_special(file_name, metadata, device.to_dev_t())
|
||||
}
|
||||
|
||||
pub fn extract_special(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
metadata: &Metadata,
|
||||
device: libc::dev_t,
|
||||
) -> Result<(), Error> {
|
||||
let mode = metadata.stat.mode;
|
||||
let mode = u32::try_from(mode).map_err(|_| {
|
||||
format_err!(
|
||||
"device node's mode contains illegal bits: 0x{:x} (0o{:o})",
|
||||
mode,
|
||||
mode,
|
||||
)
|
||||
})?;
|
||||
let parent = self.parent_fd()?;
|
||||
unsafe { c_result!(libc::mknodat(parent, file_name.as_ptr(), mode, device)) }
|
||||
.map_err(|err| format_err!("failed to create device node: {}", err))?;
|
||||
|
||||
metadata::apply_at(self.feature_flags, metadata, parent, file_name)
|
||||
}
|
||||
|
||||
pub fn extract_file(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
metadata: &Metadata,
|
||||
size: u64,
|
||||
contents: &mut dyn io::Read,
|
||||
) -> Result<(), Error> {
|
||||
let parent = self.parent_fd()?;
|
||||
let mut file = unsafe {
|
||||
std::fs::File::from_raw_fd(nix::fcntl::openat(
|
||||
parent,
|
||||
file_name,
|
||||
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
||||
Mode::from_bits(0o600).unwrap(),
|
||||
)?)
|
||||
};
|
||||
|
||||
let extracted = io::copy(&mut *contents, &mut file)?;
|
||||
if size != extracted {
|
||||
bail!("extracted {} bytes of a file of {} bytes", extracted, size);
|
||||
}
|
||||
|
||||
metadata::apply(self.feature_flags, metadata, file.as_raw_fd(), file_name)
|
||||
}
|
||||
|
||||
pub async fn async_extract_file<T: tokio::io::AsyncRead + Unpin>(
|
||||
&mut self,
|
||||
file_name: &CStr,
|
||||
metadata: &Metadata,
|
||||
size: u64,
|
||||
contents: &mut T,
|
||||
) -> Result<(), Error> {
|
||||
let parent = self.parent_fd()?;
|
||||
let mut file = tokio::fs::File::from_std(unsafe {
|
||||
std::fs::File::from_raw_fd(nix::fcntl::openat(
|
||||
parent,
|
||||
file_name,
|
||||
OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_WRONLY | OFlag::O_CLOEXEC,
|
||||
Mode::from_bits(0o600).unwrap(),
|
||||
)?)
|
||||
});
|
||||
|
||||
let extracted = tokio::io::copy(&mut *contents, &mut file).await?;
|
||||
if size != extracted {
|
||||
bail!("extracted {} bytes of a file of {} bytes", extracted, size);
|
||||
}
|
||||
|
||||
metadata::apply(self.feature_flags, metadata, file.as_raw_fd(), file_name)
|
||||
}
|
||||
}
|
@ -3,315 +3,332 @@
|
||||
//! Flags for known supported features for a given filesystem can be derived
|
||||
//! from the superblocks magic number.
|
||||
|
||||
/// FAT-style 2s time granularity
|
||||
pub const WITH_2SEC_TIME: u64 = 0x40;
|
||||
/// Preserve read only flag of files
|
||||
pub const WITH_READ_ONLY: u64 = 0x80;
|
||||
/// Preserve unix permissions
|
||||
pub const WITH_PERMISSIONS: u64 = 0x100;
|
||||
/// Include symbolik links
|
||||
pub const WITH_SYMLINKS: u64 = 0x200;
|
||||
/// Include device nodes
|
||||
pub const WITH_DEVICE_NODES: u64 = 0x400;
|
||||
/// Include FIFOs
|
||||
pub const WITH_FIFOS: u64 = 0x800;
|
||||
/// Include Sockets
|
||||
pub const WITH_SOCKETS: u64 = 0x1000;
|
||||
use bitflags::bitflags;
|
||||
|
||||
/// Preserve DOS file flag `HIDDEN`
|
||||
pub const WITH_FLAG_HIDDEN: u64 = 0x2000;
|
||||
/// Preserve DOS file flag `SYSTEM`
|
||||
pub const WITH_FLAG_SYSTEM: u64 = 0x4000;
|
||||
/// Preserve DOS file flag `ARCHIVE`
|
||||
pub const WITH_FLAG_ARCHIVE: u64 = 0x8000;
|
||||
bitflags! {
|
||||
pub struct Flags: u64 {
|
||||
/// FAT-style 2s time granularity
|
||||
const WITH_2SEC_TIME = 0x40;
|
||||
/// Preserve read only flag of files
|
||||
const WITH_READ_ONLY = 0x80;
|
||||
/// Preserve unix permissions
|
||||
const WITH_PERMISSIONS = 0x100;
|
||||
/// Include symbolik links
|
||||
const WITH_SYMLINKS = 0x200;
|
||||
/// Include device nodes
|
||||
const WITH_DEVICE_NODES = 0x400;
|
||||
/// Include FIFOs
|
||||
const WITH_FIFOS = 0x800;
|
||||
/// Include Sockets
|
||||
const WITH_SOCKETS = 0x1000;
|
||||
|
||||
// chattr() flags
|
||||
/// Linux file attribute `APPEND`
|
||||
pub const WITH_FLAG_APPEND: u64 = 0x10000;
|
||||
/// Linux file attribute `NOATIME`
|
||||
pub const WITH_FLAG_NOATIME: u64 = 0x20000;
|
||||
/// Linux file attribute `COMPR`
|
||||
pub const WITH_FLAG_COMPR: u64 = 0x40000;
|
||||
/// Linux file attribute `NOCOW`
|
||||
pub const WITH_FLAG_NOCOW: u64 = 0x80000;
|
||||
/// Linux file attribute `NODUMP`
|
||||
pub const WITH_FLAG_NODUMP: u64 = 0x0010_0000;
|
||||
/// Linux file attribute `DIRSYNC`
|
||||
pub const WITH_FLAG_DIRSYNC: u64 = 0x0020_0000;
|
||||
/// Linux file attribute `IMMUTABLE`
|
||||
pub const WITH_FLAG_IMMUTABLE: u64 = 0x0040_0000;
|
||||
/// Linux file attribute `SYNC`
|
||||
pub const WITH_FLAG_SYNC: u64 = 0x0080_0000;
|
||||
/// Linux file attribute `NOCOMP`
|
||||
pub const WITH_FLAG_NOCOMP: u64 = 0x0100_0000;
|
||||
/// Linux file attribute `PROJINHERIT`
|
||||
pub const WITH_FLAG_PROJINHERIT: u64 = 0x0200_0000;
|
||||
/// Preserve DOS file flag `HIDDEN`
|
||||
const WITH_FLAG_HIDDEN = 0x2000;
|
||||
/// Preserve DOS file flag `SYSTEM`
|
||||
const WITH_FLAG_SYSTEM = 0x4000;
|
||||
/// Preserve DOS file flag `ARCHIVE`
|
||||
const WITH_FLAG_ARCHIVE = 0x8000;
|
||||
|
||||
// chattr() flags
|
||||
/// Linux file attribute `APPEND`
|
||||
const WITH_FLAG_APPEND = 0x10000;
|
||||
/// Linux file attribute `NOATIME`
|
||||
const WITH_FLAG_NOATIME = 0x20000;
|
||||
/// Linux file attribute `COMPR`
|
||||
const WITH_FLAG_COMPR = 0x40000;
|
||||
/// Linux file attribute `NOCOW`
|
||||
const WITH_FLAG_NOCOW = 0x80000;
|
||||
/// Linux file attribute `NODUMP`
|
||||
const WITH_FLAG_NODUMP = 0x0010_0000;
|
||||
/// Linux file attribute `DIRSYNC`
|
||||
const WITH_FLAG_DIRSYNC = 0x0020_0000;
|
||||
/// Linux file attribute `IMMUTABLE`
|
||||
const WITH_FLAG_IMMUTABLE = 0x0040_0000;
|
||||
/// Linux file attribute `SYNC`
|
||||
const WITH_FLAG_SYNC = 0x0080_0000;
|
||||
/// Linux file attribute `NOCOMP`
|
||||
const WITH_FLAG_NOCOMP = 0x0100_0000;
|
||||
/// Linux file attribute `PROJINHERIT`
|
||||
const WITH_FLAG_PROJINHERIT = 0x0200_0000;
|
||||
|
||||
|
||||
/// Preserve BTRFS subvolume flag
|
||||
pub const WITH_SUBVOLUME: u64 = 0x0400_0000;
|
||||
/// Preserve BTRFS read-only subvolume flag
|
||||
pub const WITH_SUBVOLUME_RO: u64 = 0x0800_0000;
|
||||
/// Preserve BTRFS subvolume flag
|
||||
const WITH_SUBVOLUME = 0x0400_0000;
|
||||
/// Preserve BTRFS read-only subvolume flag
|
||||
const WITH_SUBVOLUME_RO = 0x0800_0000;
|
||||
|
||||
/// Preserve Extended Attribute metadata
|
||||
pub const WITH_XATTRS: u64 = 0x1000_0000;
|
||||
/// Preserve Access Control List metadata
|
||||
pub const WITH_ACL: u64 = 0x2000_0000;
|
||||
/// Preserve SELinux security context
|
||||
pub const WITH_SELINUX: u64 = 0x4000_0000;
|
||||
/// Preserve "security.capability" xattr
|
||||
pub const WITH_FCAPS: u64 = 0x8000_0000;
|
||||
/// Preserve Extended Attribute metadata
|
||||
const WITH_XATTRS = 0x1000_0000;
|
||||
/// Preserve Access Control List metadata
|
||||
const WITH_ACL = 0x2000_0000;
|
||||
/// Preserve SELinux security context
|
||||
const WITH_SELINUX = 0x4000_0000;
|
||||
/// Preserve "security.capability" xattr
|
||||
const WITH_FCAPS = 0x8000_0000;
|
||||
|
||||
/// Preserve XFS/ext4/ZFS project quota ID
|
||||
pub const WITH_QUOTA_PROJID: u64 = 0x0001_0000_0000;
|
||||
/// Preserve XFS/ext4/ZFS project quota ID
|
||||
const WITH_QUOTA_PROJID = 0x0001_0000_0000;
|
||||
|
||||
/// Support ".pxarexclude" files
|
||||
pub const EXCLUDE_FILE: u64 = 0x1000_0000_0000_0000;
|
||||
/// Exclude submounts
|
||||
pub const EXCLUDE_SUBMOUNTS: u64 = 0x4000_0000_0000_0000;
|
||||
/// Exclude entries with chattr flag NODUMP
|
||||
pub const EXCLUDE_NODUMP: u64 = 0x8000_0000_0000_0000;
|
||||
/// Support ".pxarexclude" files
|
||||
const EXCLUDE_FILE = 0x1000_0000_0000_0000;
|
||||
/// Exclude submounts
|
||||
const EXCLUDE_SUBMOUNTS = 0x4000_0000_0000_0000;
|
||||
/// Exclude entries with chattr flag NODUMP
|
||||
const EXCLUDE_NODUMP = 0x8000_0000_0000_0000;
|
||||
|
||||
/// Definitions of typical feature flags for the *pxar* encoder/decoder.
|
||||
/// By this expensive syscalls for unsupported features are avoided.
|
||||
// Definitions of typical feature flags for the *pxar* encoder/decoder.
|
||||
// By this expensive syscalls for unsupported features are avoided.
|
||||
|
||||
/// All chattr file attributes
|
||||
pub const WITH_CHATTR: u64 =
|
||||
WITH_FLAG_APPEND|
|
||||
WITH_FLAG_NOATIME|
|
||||
WITH_FLAG_COMPR|
|
||||
WITH_FLAG_NOCOW|
|
||||
WITH_FLAG_NODUMP|
|
||||
WITH_FLAG_DIRSYNC|
|
||||
WITH_FLAG_IMMUTABLE|
|
||||
WITH_FLAG_SYNC|
|
||||
WITH_FLAG_NOCOMP|
|
||||
WITH_FLAG_PROJINHERIT;
|
||||
/// All chattr file attributes
|
||||
const WITH_CHATTR =
|
||||
Flags::WITH_FLAG_APPEND.bits() |
|
||||
Flags::WITH_FLAG_NOATIME.bits() |
|
||||
Flags::WITH_FLAG_COMPR.bits() |
|
||||
Flags::WITH_FLAG_NOCOW.bits() |
|
||||
Flags::WITH_FLAG_NODUMP.bits() |
|
||||
Flags::WITH_FLAG_DIRSYNC.bits() |
|
||||
Flags::WITH_FLAG_IMMUTABLE.bits() |
|
||||
Flags::WITH_FLAG_SYNC.bits() |
|
||||
Flags::WITH_FLAG_NOCOMP.bits() |
|
||||
Flags::WITH_FLAG_PROJINHERIT.bits();
|
||||
|
||||
/// All FAT file attributes
|
||||
pub const WITH_FAT_ATTRS: u64 =
|
||||
WITH_FLAG_HIDDEN|
|
||||
WITH_FLAG_SYSTEM|
|
||||
WITH_FLAG_ARCHIVE;
|
||||
/// All FAT file attributes
|
||||
const WITH_FAT_ATTRS =
|
||||
Flags::WITH_FLAG_HIDDEN.bits() |
|
||||
Flags::WITH_FLAG_SYSTEM.bits() |
|
||||
Flags::WITH_FLAG_ARCHIVE.bits();
|
||||
|
||||
/// All bits that may also be exposed via fuse
|
||||
pub const WITH_FUSE: u64 =
|
||||
WITH_2SEC_TIME|
|
||||
WITH_READ_ONLY|
|
||||
WITH_PERMISSIONS|
|
||||
WITH_SYMLINKS|
|
||||
WITH_DEVICE_NODES|
|
||||
WITH_FIFOS|
|
||||
WITH_SOCKETS|
|
||||
WITH_FAT_ATTRS|
|
||||
WITH_CHATTR|
|
||||
WITH_XATTRS;
|
||||
/// All bits that may also be exposed via fuse
|
||||
const WITH_FUSE =
|
||||
Flags::WITH_2SEC_TIME.bits() |
|
||||
Flags::WITH_READ_ONLY.bits() |
|
||||
Flags::WITH_PERMISSIONS.bits() |
|
||||
Flags::WITH_SYMLINKS.bits() |
|
||||
Flags::WITH_DEVICE_NODES.bits() |
|
||||
Flags::WITH_FIFOS.bits() |
|
||||
Flags::WITH_SOCKETS.bits() |
|
||||
Flags::WITH_FAT_ATTRS.bits() |
|
||||
Flags::WITH_CHATTR.bits() |
|
||||
Flags::WITH_XATTRS.bits();
|
||||
|
||||
|
||||
/// Default feature flags for encoder/decoder
|
||||
pub const DEFAULT: u64 =
|
||||
WITH_SYMLINKS|
|
||||
WITH_DEVICE_NODES|
|
||||
WITH_FIFOS|
|
||||
WITH_SOCKETS|
|
||||
WITH_FLAG_HIDDEN|
|
||||
WITH_FLAG_SYSTEM|
|
||||
WITH_FLAG_ARCHIVE|
|
||||
WITH_FLAG_APPEND|
|
||||
WITH_FLAG_NOATIME|
|
||||
WITH_FLAG_COMPR|
|
||||
WITH_FLAG_NOCOW|
|
||||
//WITH_FLAG_NODUMP|
|
||||
WITH_FLAG_DIRSYNC|
|
||||
WITH_FLAG_IMMUTABLE|
|
||||
WITH_FLAG_SYNC|
|
||||
WITH_FLAG_NOCOMP|
|
||||
WITH_FLAG_PROJINHERIT|
|
||||
WITH_SUBVOLUME|
|
||||
WITH_SUBVOLUME_RO|
|
||||
WITH_XATTRS|
|
||||
WITH_ACL|
|
||||
WITH_SELINUX|
|
||||
WITH_FCAPS|
|
||||
WITH_QUOTA_PROJID|
|
||||
EXCLUDE_NODUMP|
|
||||
EXCLUDE_FILE;
|
||||
|
||||
// form /usr/include/linux/fs.h
|
||||
const FS_APPEND_FL: u32 = 0x0000_0020;
|
||||
const FS_NOATIME_FL: u32 = 0x0000_0080;
|
||||
const FS_COMPR_FL: u32 = 0x0000_0004;
|
||||
const FS_NOCOW_FL: u32 = 0x0080_0000;
|
||||
const FS_NODUMP_FL: u32 = 0x0000_0040;
|
||||
const FS_DIRSYNC_FL: u32 = 0x0001_0000;
|
||||
const FS_IMMUTABLE_FL: u32 = 0x0000_0010;
|
||||
const FS_SYNC_FL: u32 = 0x0000_0008;
|
||||
const FS_NOCOMP_FL: u32 = 0x0000_0400;
|
||||
const FS_PROJINHERIT_FL: u32 = 0x2000_0000;
|
||||
|
||||
static CHATTR_MAP: [(u64, u32); 10] = [
|
||||
( WITH_FLAG_APPEND, FS_APPEND_FL ),
|
||||
( WITH_FLAG_NOATIME, FS_NOATIME_FL ),
|
||||
( WITH_FLAG_COMPR, FS_COMPR_FL ),
|
||||
( WITH_FLAG_NOCOW, FS_NOCOW_FL ),
|
||||
( WITH_FLAG_NODUMP, FS_NODUMP_FL ),
|
||||
( WITH_FLAG_DIRSYNC, FS_DIRSYNC_FL ),
|
||||
( WITH_FLAG_IMMUTABLE, FS_IMMUTABLE_FL ),
|
||||
( WITH_FLAG_SYNC, FS_SYNC_FL ),
|
||||
( WITH_FLAG_NOCOMP, FS_NOCOMP_FL ),
|
||||
( WITH_FLAG_PROJINHERIT, FS_PROJINHERIT_FL ),
|
||||
];
|
||||
|
||||
pub fn feature_flags_from_chattr(attr: u32) -> u64 {
|
||||
|
||||
let mut flags = 0u64;
|
||||
|
||||
for (fe_flag, fs_flag) in &CHATTR_MAP {
|
||||
if (attr & fs_flag) != 0 { flags |= fe_flag; }
|
||||
}
|
||||
|
||||
flags
|
||||
}
|
||||
|
||||
// from /usr/include/linux/msdos_fs.h
|
||||
const ATTR_HIDDEN: u32 = 2;
|
||||
const ATTR_SYS: u32 = 4;
|
||||
const ATTR_ARCH: u32 = 32;
|
||||
|
||||
static FAT_ATTR_MAP: [(u64, u32); 3] = [
|
||||
( WITH_FLAG_HIDDEN, ATTR_HIDDEN ),
|
||||
( WITH_FLAG_SYSTEM, ATTR_SYS ),
|
||||
( WITH_FLAG_ARCHIVE, ATTR_ARCH ),
|
||||
];
|
||||
|
||||
pub fn feature_flags_from_fat_attr(attr: u32) -> u64 {
|
||||
|
||||
let mut flags = 0u64;
|
||||
|
||||
for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
|
||||
if (attr & fs_flag) != 0 { flags |= fe_flag; }
|
||||
}
|
||||
|
||||
flags
|
||||
}
|
||||
|
||||
|
||||
/// Return the supported *pxar* feature flags based on the magic number of the filesystem.
|
||||
pub fn feature_flags_from_magic(magic: i64) -> u64 {
|
||||
use proxmox::sys::linux::magic::*;
|
||||
match magic {
|
||||
MSDOS_SUPER_MAGIC => {
|
||||
WITH_2SEC_TIME|
|
||||
WITH_READ_ONLY|
|
||||
WITH_FAT_ATTRS
|
||||
},
|
||||
EXT4_SUPER_MAGIC => {
|
||||
WITH_2SEC_TIME|
|
||||
WITH_READ_ONLY|
|
||||
WITH_PERMISSIONS|
|
||||
WITH_SYMLINKS|
|
||||
WITH_DEVICE_NODES|
|
||||
WITH_FIFOS|
|
||||
WITH_SOCKETS|
|
||||
WITH_FLAG_APPEND|
|
||||
WITH_FLAG_NOATIME|
|
||||
WITH_FLAG_NODUMP|
|
||||
WITH_FLAG_DIRSYNC|
|
||||
WITH_FLAG_IMMUTABLE|
|
||||
WITH_FLAG_SYNC|
|
||||
WITH_XATTRS|
|
||||
WITH_ACL|
|
||||
WITH_SELINUX|
|
||||
WITH_FCAPS|
|
||||
WITH_QUOTA_PROJID
|
||||
},
|
||||
XFS_SUPER_MAGIC => {
|
||||
WITH_2SEC_TIME|
|
||||
WITH_READ_ONLY|
|
||||
WITH_PERMISSIONS|
|
||||
WITH_SYMLINKS|
|
||||
WITH_DEVICE_NODES|
|
||||
WITH_FIFOS|
|
||||
WITH_SOCKETS|
|
||||
WITH_FLAG_APPEND|
|
||||
WITH_FLAG_NOATIME|
|
||||
WITH_FLAG_NODUMP|
|
||||
WITH_FLAG_IMMUTABLE|
|
||||
WITH_FLAG_SYNC|
|
||||
WITH_XATTRS|
|
||||
WITH_ACL|
|
||||
WITH_SELINUX|
|
||||
WITH_FCAPS|
|
||||
WITH_QUOTA_PROJID
|
||||
},
|
||||
ZFS_SUPER_MAGIC => {
|
||||
WITH_2SEC_TIME|
|
||||
WITH_READ_ONLY|
|
||||
WITH_PERMISSIONS|
|
||||
WITH_SYMLINKS|
|
||||
WITH_DEVICE_NODES|
|
||||
WITH_FIFOS|
|
||||
WITH_SOCKETS|
|
||||
WITH_FLAG_APPEND|
|
||||
WITH_FLAG_NOATIME|
|
||||
WITH_FLAG_NODUMP|
|
||||
WITH_FLAG_DIRSYNC|
|
||||
WITH_FLAG_IMMUTABLE|
|
||||
WITH_FLAG_SYNC|
|
||||
WITH_XATTRS|
|
||||
WITH_ACL|
|
||||
WITH_SELINUX|
|
||||
WITH_FCAPS|
|
||||
WITH_QUOTA_PROJID
|
||||
},
|
||||
BTRFS_SUPER_MAGIC => {
|
||||
WITH_2SEC_TIME|
|
||||
WITH_READ_ONLY|
|
||||
WITH_PERMISSIONS|
|
||||
WITH_SYMLINKS|
|
||||
WITH_DEVICE_NODES|
|
||||
WITH_FIFOS|
|
||||
WITH_SOCKETS|
|
||||
WITH_FLAG_APPEND|
|
||||
WITH_FLAG_NOATIME|
|
||||
WITH_FLAG_COMPR|
|
||||
WITH_FLAG_NOCOW|
|
||||
WITH_FLAG_NODUMP|
|
||||
WITH_FLAG_DIRSYNC|
|
||||
WITH_FLAG_IMMUTABLE|
|
||||
WITH_FLAG_SYNC|
|
||||
WITH_FLAG_NOCOMP|
|
||||
WITH_XATTRS|
|
||||
WITH_ACL|
|
||||
WITH_SELINUX|
|
||||
WITH_SUBVOLUME|
|
||||
WITH_SUBVOLUME_RO|
|
||||
WITH_FCAPS
|
||||
},
|
||||
TMPFS_MAGIC => {
|
||||
WITH_2SEC_TIME|
|
||||
WITH_READ_ONLY|
|
||||
WITH_PERMISSIONS|
|
||||
WITH_SYMLINKS|
|
||||
WITH_DEVICE_NODES|
|
||||
WITH_FIFOS|
|
||||
WITH_SOCKETS|
|
||||
WITH_ACL|
|
||||
WITH_SELINUX
|
||||
},
|
||||
// FUSE mounts are special as the supported feature set
|
||||
// is not clear a priori.
|
||||
FUSE_SUPER_MAGIC => {
|
||||
WITH_FUSE
|
||||
},
|
||||
_ => {
|
||||
WITH_2SEC_TIME|
|
||||
WITH_READ_ONLY|
|
||||
WITH_PERMISSIONS|
|
||||
WITH_SYMLINKS|
|
||||
WITH_DEVICE_NODES|
|
||||
WITH_FIFOS|
|
||||
WITH_SOCKETS
|
||||
},
|
||||
/// Default feature flags for encoder/decoder
|
||||
const DEFAULT =
|
||||
Flags::WITH_SYMLINKS.bits() |
|
||||
Flags::WITH_DEVICE_NODES.bits() |
|
||||
Flags::WITH_FIFOS.bits() |
|
||||
Flags::WITH_SOCKETS.bits() |
|
||||
Flags::WITH_FLAG_HIDDEN.bits() |
|
||||
Flags::WITH_FLAG_SYSTEM.bits() |
|
||||
Flags::WITH_FLAG_ARCHIVE.bits() |
|
||||
Flags::WITH_FLAG_APPEND.bits() |
|
||||
Flags::WITH_FLAG_NOATIME.bits() |
|
||||
Flags::WITH_FLAG_COMPR.bits() |
|
||||
Flags::WITH_FLAG_NOCOW.bits() |
|
||||
//WITH_FLAG_NODUMP.bits() |
|
||||
Flags::WITH_FLAG_DIRSYNC.bits() |
|
||||
Flags::WITH_FLAG_IMMUTABLE.bits() |
|
||||
Flags::WITH_FLAG_SYNC.bits() |
|
||||
Flags::WITH_FLAG_NOCOMP.bits() |
|
||||
Flags::WITH_FLAG_PROJINHERIT.bits() |
|
||||
Flags::WITH_SUBVOLUME.bits() |
|
||||
Flags::WITH_SUBVOLUME_RO.bits() |
|
||||
Flags::WITH_XATTRS.bits() |
|
||||
Flags::WITH_ACL.bits() |
|
||||
Flags::WITH_SELINUX.bits() |
|
||||
Flags::WITH_FCAPS.bits() |
|
||||
Flags::WITH_QUOTA_PROJID.bits() |
|
||||
Flags::EXCLUDE_NODUMP.bits() |
|
||||
Flags::EXCLUDE_FILE.bits();
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Flags {
|
||||
fn default() -> Flags {
|
||||
Flags::DEFAULT
|
||||
}
|
||||
}
|
||||
|
||||
impl Flags {
|
||||
/// Get a set of feature flags from file attributes.
|
||||
pub fn from_chattr(attr: u32) -> Flags {
|
||||
// form /usr/include/linux/fs.h
|
||||
const FS_APPEND_FL: u32 = 0x0000_0020;
|
||||
const FS_NOATIME_FL: u32 = 0x0000_0080;
|
||||
const FS_COMPR_FL: u32 = 0x0000_0004;
|
||||
const FS_NOCOW_FL: u32 = 0x0080_0000;
|
||||
const FS_NODUMP_FL: u32 = 0x0000_0040;
|
||||
const FS_DIRSYNC_FL: u32 = 0x0001_0000;
|
||||
const FS_IMMUTABLE_FL: u32 = 0x0000_0010;
|
||||
const FS_SYNC_FL: u32 = 0x0000_0008;
|
||||
const FS_NOCOMP_FL: u32 = 0x0000_0400;
|
||||
const FS_PROJINHERIT_FL: u32 = 0x2000_0000;
|
||||
|
||||
const CHATTR_MAP: [(Flags, u32); 10] = [
|
||||
( Flags::WITH_FLAG_APPEND, FS_APPEND_FL ),
|
||||
( Flags::WITH_FLAG_NOATIME, FS_NOATIME_FL ),
|
||||
( Flags::WITH_FLAG_COMPR, FS_COMPR_FL ),
|
||||
( Flags::WITH_FLAG_NOCOW, FS_NOCOW_FL ),
|
||||
( Flags::WITH_FLAG_NODUMP, FS_NODUMP_FL ),
|
||||
( Flags::WITH_FLAG_DIRSYNC, FS_DIRSYNC_FL ),
|
||||
( Flags::WITH_FLAG_IMMUTABLE, FS_IMMUTABLE_FL ),
|
||||
( Flags::WITH_FLAG_SYNC, FS_SYNC_FL ),
|
||||
( Flags::WITH_FLAG_NOCOMP, FS_NOCOMP_FL ),
|
||||
( Flags::WITH_FLAG_PROJINHERIT, FS_PROJINHERIT_FL ),
|
||||
];
|
||||
|
||||
let mut flags = Flags::empty();
|
||||
|
||||
for (fe_flag, fs_flag) in &CHATTR_MAP {
|
||||
if (attr & fs_flag) != 0 {
|
||||
flags |= *fe_flag;
|
||||
}
|
||||
}
|
||||
|
||||
flags
|
||||
}
|
||||
|
||||
/// Get a set of feature flags from FAT attributes.
|
||||
pub fn from_fat_attr(attr: u32) -> Flags {
|
||||
// from /usr/include/linux/msdos_fs.h
|
||||
const ATTR_HIDDEN: u32 = 2;
|
||||
const ATTR_SYS: u32 = 4;
|
||||
const ATTR_ARCH: u32 = 32;
|
||||
|
||||
const FAT_ATTR_MAP: [(Flags, u32); 3] = [
|
||||
( Flags::WITH_FLAG_HIDDEN, ATTR_HIDDEN ),
|
||||
( Flags::WITH_FLAG_SYSTEM, ATTR_SYS ),
|
||||
( Flags::WITH_FLAG_ARCHIVE, ATTR_ARCH ),
|
||||
];
|
||||
|
||||
let mut flags = Flags::empty();
|
||||
|
||||
for (fe_flag, fs_flag) in &FAT_ATTR_MAP {
|
||||
if (attr & fs_flag) != 0 {
|
||||
flags |= *fe_flag;
|
||||
}
|
||||
}
|
||||
|
||||
flags
|
||||
}
|
||||
|
||||
/// Return the supported *pxar* feature flags based on the magic number of the filesystem.
|
||||
pub fn from_magic(magic: i64) -> Flags {
|
||||
use proxmox::sys::linux::magic::*;
|
||||
match magic {
|
||||
MSDOS_SUPER_MAGIC => {
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_FAT_ATTRS
|
||||
},
|
||||
EXT4_SUPER_MAGIC => {
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS |
|
||||
Flags::WITH_FLAG_APPEND |
|
||||
Flags::WITH_FLAG_NOATIME |
|
||||
Flags::WITH_FLAG_NODUMP |
|
||||
Flags::WITH_FLAG_DIRSYNC |
|
||||
Flags::WITH_FLAG_IMMUTABLE |
|
||||
Flags::WITH_FLAG_SYNC |
|
||||
Flags::WITH_XATTRS |
|
||||
Flags::WITH_ACL |
|
||||
Flags::WITH_SELINUX |
|
||||
Flags::WITH_FCAPS |
|
||||
Flags::WITH_QUOTA_PROJID
|
||||
},
|
||||
XFS_SUPER_MAGIC => {
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS |
|
||||
Flags::WITH_FLAG_APPEND |
|
||||
Flags::WITH_FLAG_NOATIME |
|
||||
Flags::WITH_FLAG_NODUMP |
|
||||
Flags::WITH_FLAG_IMMUTABLE |
|
||||
Flags::WITH_FLAG_SYNC |
|
||||
Flags::WITH_XATTRS |
|
||||
Flags::WITH_ACL |
|
||||
Flags::WITH_SELINUX |
|
||||
Flags::WITH_FCAPS |
|
||||
Flags::WITH_QUOTA_PROJID
|
||||
},
|
||||
ZFS_SUPER_MAGIC => {
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS |
|
||||
Flags::WITH_FLAG_APPEND |
|
||||
Flags::WITH_FLAG_NOATIME |
|
||||
Flags::WITH_FLAG_NODUMP |
|
||||
Flags::WITH_FLAG_DIRSYNC |
|
||||
Flags::WITH_FLAG_IMMUTABLE |
|
||||
Flags::WITH_FLAG_SYNC |
|
||||
Flags::WITH_XATTRS |
|
||||
Flags::WITH_ACL |
|
||||
Flags::WITH_SELINUX |
|
||||
Flags::WITH_FCAPS |
|
||||
Flags::WITH_QUOTA_PROJID
|
||||
},
|
||||
BTRFS_SUPER_MAGIC => {
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS |
|
||||
Flags::WITH_FLAG_APPEND |
|
||||
Flags::WITH_FLAG_NOATIME |
|
||||
Flags::WITH_FLAG_COMPR |
|
||||
Flags::WITH_FLAG_NOCOW |
|
||||
Flags::WITH_FLAG_NODUMP |
|
||||
Flags::WITH_FLAG_DIRSYNC |
|
||||
Flags::WITH_FLAG_IMMUTABLE |
|
||||
Flags::WITH_FLAG_SYNC |
|
||||
Flags::WITH_FLAG_NOCOMP |
|
||||
Flags::WITH_XATTRS |
|
||||
Flags::WITH_ACL |
|
||||
Flags::WITH_SELINUX |
|
||||
Flags::WITH_SUBVOLUME |
|
||||
Flags::WITH_SUBVOLUME_RO |
|
||||
Flags::WITH_FCAPS
|
||||
},
|
||||
TMPFS_MAGIC => {
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS |
|
||||
Flags::WITH_ACL |
|
||||
Flags::WITH_SELINUX
|
||||
},
|
||||
// FUSE mounts are special as the supported feature set
|
||||
// is not clear a priori.
|
||||
FUSE_SUPER_MAGIC => {
|
||||
Flags::WITH_FUSE
|
||||
},
|
||||
_ => {
|
||||
Flags::WITH_2SEC_TIME |
|
||||
Flags::WITH_READ_ONLY |
|
||||
Flags::WITH_PERMISSIONS |
|
||||
Flags::WITH_SYMLINKS |
|
||||
Flags::WITH_DEVICE_NODES |
|
||||
Flags::WITH_FIFOS |
|
||||
Flags::WITH_SOCKETS
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,263 +0,0 @@
|
||||
//! *pxar* binary format definition
|
||||
//!
|
||||
//! Please note the all values are stored in little endian ordering.
|
||||
//!
|
||||
//! The Archive contains a list of items. Each item starts with a
|
||||
//! `PxarHeader`, followed by the item data.
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use endian_trait::Endian;
|
||||
use anyhow::{bail, Error};
|
||||
use siphasher::sip::SipHasher24;
|
||||
|
||||
|
||||
/// Header types identifying items stored in the archive
|
||||
pub const PXAR_ENTRY: u64 = 0x1396fabcea5bbb51;
|
||||
pub const PXAR_FILENAME: u64 = 0x6dbb6ebcb3161f0b;
|
||||
pub const PXAR_SYMLINK: u64 = 0x664a6fb6830e0d6c;
|
||||
pub const PXAR_DEVICE: u64 = 0xac3dace369dfe643;
|
||||
pub const PXAR_XATTR: u64 = 0xb8157091f80bc486;
|
||||
pub const PXAR_ACL_USER: u64 = 0x297dc88b2ef12faf;
|
||||
pub const PXAR_ACL_GROUP: u64 = 0x36f2acb56cb3dd0b;
|
||||
pub const PXAR_ACL_GROUP_OBJ: u64 = 0x23047110441f38f3;
|
||||
pub const PXAR_ACL_DEFAULT: u64 = 0xfe3eeda6823c8cd0;
|
||||
pub const PXAR_ACL_DEFAULT_USER: u64 = 0xbdf03df9bd010a91;
|
||||
pub const PXAR_ACL_DEFAULT_GROUP: u64 = 0xa0cb1168782d1f51;
|
||||
pub const PXAR_FCAPS: u64 = 0xf7267db0afed0629;
|
||||
pub const PXAR_QUOTA_PROJID: u64 = 0x161baf2d8772a72b;
|
||||
|
||||
/// Marks item as hardlink
|
||||
/// compute_goodbye_hash(b"__PROXMOX_FORMAT_HARDLINK__");
|
||||
pub const PXAR_FORMAT_HARDLINK: u64 = 0x2c5e06f634f65b86;
|
||||
/// Marks the beginnig of the payload (actual content) of regular files
|
||||
pub const PXAR_PAYLOAD: u64 = 0x8b9e1d93d6dcffc9;
|
||||
/// Marks item as entry of goodbye table
|
||||
pub const PXAR_GOODBYE: u64 = 0xdfd35c5e8327c403;
|
||||
/// The end marker used in the GOODBYE object
|
||||
pub const PXAR_GOODBYE_TAIL_MARKER: u64 = 0x57446fa533702943;
|
||||
|
||||
#[derive(Debug, Endian)]
|
||||
#[repr(C)]
|
||||
pub struct PxarHeader {
|
||||
/// The item type (see `PXAR_` constants).
|
||||
pub htype: u64,
|
||||
/// The size of the item, including the size of `PxarHeader`.
|
||||
pub size: u64,
|
||||
}
|
||||
|
||||
#[derive(Endian)]
|
||||
#[repr(C)]
|
||||
pub struct PxarEntry {
|
||||
pub mode: u64,
|
||||
pub flags: u64,
|
||||
pub uid: u32,
|
||||
pub gid: u32,
|
||||
pub mtime: u64,
|
||||
}
|
||||
|
||||
#[derive(Endian)]
|
||||
#[repr(C)]
|
||||
pub struct PxarDevice {
|
||||
pub major: u64,
|
||||
pub minor: u64,
|
||||
}
|
||||
|
||||
#[derive(Endian)]
|
||||
#[repr(C)]
|
||||
pub struct PxarGoodbyeItem {
|
||||
/// SipHash24 of the directory item name. The last GOODBYE item
|
||||
/// uses the special hash value `PXAR_GOODBYE_TAIL_MARKER`.
|
||||
pub hash: u64,
|
||||
/// The offset from the start of the GOODBYE object to the start
|
||||
/// of the matching directory item (point to a FILENAME). The last
|
||||
/// GOODBYE item points to the start of the matching ENTRY
|
||||
/// object.
|
||||
pub offset: u64,
|
||||
/// The overall size of the directory item. The last GOODBYE item
|
||||
/// repeats the size of the GOODBYE item.
|
||||
pub size: u64,
|
||||
}
|
||||
|
||||
/// Helper function to extract file names from binary archive.
|
||||
pub fn read_os_string(buffer: &[u8]) -> std::ffi::OsString {
|
||||
let len = buffer.len();
|
||||
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
let name = if len > 0 && buffer[len - 1] == 0 {
|
||||
std::ffi::OsStr::from_bytes(&buffer[0..len - 1])
|
||||
} else {
|
||||
std::ffi::OsStr::from_bytes(&buffer)
|
||||
};
|
||||
|
||||
name.into()
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq)]
|
||||
#[repr(C)]
|
||||
pub struct PxarXAttr {
|
||||
pub name: Vec<u8>,
|
||||
pub value: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Ord for PxarXAttr {
|
||||
fn cmp(&self, other: &PxarXAttr) -> Ordering {
|
||||
self.name.cmp(&other.name)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for PxarXAttr {
|
||||
fn partial_cmp(&self, other: &PxarXAttr) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for PxarXAttr {
|
||||
fn eq(&self, other: &PxarXAttr) -> bool {
|
||||
self.name == other.name
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[repr(C)]
|
||||
pub struct PxarFCaps {
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Endian, Eq)]
|
||||
#[repr(C)]
|
||||
pub struct PxarACLUser {
|
||||
pub uid: u64,
|
||||
pub permissions: u64,
|
||||
//pub name: Vec<u64>, not impl for now
|
||||
}
|
||||
|
||||
// TODO if also name is impl, sort by uid, then by name and last by permissions
|
||||
impl Ord for PxarACLUser {
|
||||
fn cmp(&self, other: &PxarACLUser) -> Ordering {
|
||||
match self.uid.cmp(&other.uid) {
|
||||
// uids are equal, entries ordered by permissions
|
||||
Ordering::Equal => self.permissions.cmp(&other.permissions),
|
||||
// uids are different, entries ordered by uid
|
||||
uid_order => uid_order,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for PxarACLUser {
|
||||
fn partial_cmp(&self, other: &PxarACLUser) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for PxarACLUser {
|
||||
fn eq(&self, other: &PxarACLUser) -> bool {
|
||||
self.uid == other.uid && self.permissions == other.permissions
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Endian, Eq)]
|
||||
#[repr(C)]
|
||||
pub struct PxarACLGroup {
|
||||
pub gid: u64,
|
||||
pub permissions: u64,
|
||||
//pub name: Vec<u64>, not impl for now
|
||||
}
|
||||
|
||||
// TODO if also name is impl, sort by gid, then by name and last by permissions
|
||||
impl Ord for PxarACLGroup {
|
||||
fn cmp(&self, other: &PxarACLGroup) -> Ordering {
|
||||
match self.gid.cmp(&other.gid) {
|
||||
// gids are equal, entries are ordered by permissions
|
||||
Ordering::Equal => self.permissions.cmp(&other.permissions),
|
||||
// gids are different, entries ordered by gid
|
||||
gid_ordering => gid_ordering,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for PxarACLGroup {
|
||||
fn partial_cmp(&self, other: &PxarACLGroup) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for PxarACLGroup {
|
||||
fn eq(&self, other: &PxarACLGroup) -> bool {
|
||||
self.gid == other.gid && self.permissions == other.permissions
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Endian)]
|
||||
#[repr(C)]
|
||||
pub struct PxarACLGroupObj {
|
||||
pub permissions: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Endian)]
|
||||
#[repr(C)]
|
||||
pub struct PxarACLDefault {
|
||||
pub user_obj_permissions: u64,
|
||||
pub group_obj_permissions: u64,
|
||||
pub other_permissions: u64,
|
||||
pub mask_permissions: u64,
|
||||
}
|
||||
|
||||
pub(crate) struct PxarACL {
|
||||
pub users: Vec<PxarACLUser>,
|
||||
pub groups: Vec<PxarACLGroup>,
|
||||
pub group_obj: Option<PxarACLGroupObj>,
|
||||
pub default: Option<PxarACLDefault>,
|
||||
}
|
||||
|
||||
pub const PXAR_ACL_PERMISSION_READ: u64 = 4;
|
||||
pub const PXAR_ACL_PERMISSION_WRITE: u64 = 2;
|
||||
pub const PXAR_ACL_PERMISSION_EXECUTE: u64 = 1;
|
||||
|
||||
#[derive(Debug, Endian)]
|
||||
#[repr(C)]
|
||||
pub struct PxarQuotaProjID {
|
||||
pub projid: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct PxarAttributes {
|
||||
pub xattrs: Vec<PxarXAttr>,
|
||||
pub fcaps: Option<PxarFCaps>,
|
||||
pub quota_projid: Option<PxarQuotaProjID>,
|
||||
pub acl_user: Vec<PxarACLUser>,
|
||||
pub acl_group: Vec<PxarACLGroup>,
|
||||
pub acl_group_obj: Option<PxarACLGroupObj>,
|
||||
pub acl_default: Option<PxarACLDefault>,
|
||||
pub acl_default_user: Vec<PxarACLUser>,
|
||||
pub acl_default_group: Vec<PxarACLGroup>,
|
||||
}
|
||||
|
||||
/// Create SipHash values for goodby tables.
|
||||
//pub fn compute_goodbye_hash(name: &std::ffi::CStr) -> u64 {
|
||||
pub fn compute_goodbye_hash(name: &[u8]) -> u64 {
|
||||
use std::hash::Hasher;
|
||||
let mut hasher = SipHasher24::new_with_keys(0x8574442b0f1d84b3, 0x2736ed30d1c22ec1);
|
||||
hasher.write(name);
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
pub fn check_ca_header<T>(head: &PxarHeader, htype: u64) -> Result<(), Error> {
|
||||
if head.htype != htype {
|
||||
bail!(
|
||||
"got wrong header type ({:016x} != {:016x})",
|
||||
head.htype,
|
||||
htype
|
||||
);
|
||||
}
|
||||
if head.size != (std::mem::size_of::<T>() + std::mem::size_of::<PxarHeader>()) as u64 {
|
||||
bail!("got wrong header size for type {:016x}", htype);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The format requires to build sorted directory lookup tables in
|
||||
/// memory, so we restrict the number of allowed entries to limit
|
||||
/// maximum memory usage.
|
||||
pub const ENCODER_MAX_ENTRIES: usize = 1024 * 1024;
|
1428
src/pxar/fuse.rs
1428
src/pxar/fuse.rs
File diff suppressed because it is too large
Load Diff
@ -1,36 +0,0 @@
|
||||
use libc;
|
||||
use nix::sys::stat::FileStat;
|
||||
|
||||
#[inline(always)]
|
||||
pub fn is_directory(stat: &FileStat) -> bool {
|
||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFDIR
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn is_symlink(stat: &FileStat) -> bool {
|
||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFLNK
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn is_reg_file(stat: &FileStat) -> bool {
|
||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFREG
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn is_block_dev(stat: &FileStat) -> bool {
|
||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFBLK
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn is_char_dev(stat: &FileStat) -> bool {
|
||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFCHR
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn is_fifo(stat: &FileStat) -> bool {
|
||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFIFO
|
||||
}
|
||||
#[inline(always)]
|
||||
pub fn is_socket(stat: &FileStat) -> bool {
|
||||
(stat.st_mode & libc::S_IFMT) == libc::S_IFSOCK
|
||||
}
|
@ -1,514 +0,0 @@
|
||||
//! `MatchPattern` defines a match pattern used to match filenames encountered
|
||||
//! during encoding or decoding of a `pxar` archive.
|
||||
//! `fnmatch` is used internally to match filenames against the patterns.
|
||||
//! Shell wildcard pattern can be used to match multiple filenames, see manpage
|
||||
//! `glob(7)`.
|
||||
//! `**` is treated special, as it matches multiple directories in a path.
|
||||
|
||||
use std::ffi::{CStr, CString};
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use std::os::unix::io::{FromRawFd, RawFd};
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use libc::{c_char, c_int};
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl;
|
||||
use nix::fcntl::{AtFlags, OFlag};
|
||||
use nix::sys::stat;
|
||||
use nix::sys::stat::{FileStat, Mode};
|
||||
use nix::NixPath;
|
||||
|
||||
pub const FNM_NOMATCH: c_int = 1;
|
||||
|
||||
extern "C" {
|
||||
fn fnmatch(pattern: *const c_char, string: *const c_char, flags: c_int) -> c_int;
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
pub enum MatchType {
|
||||
None,
|
||||
Positive,
|
||||
Negative,
|
||||
PartialPositive,
|
||||
PartialNegative,
|
||||
}
|
||||
|
||||
/// `MatchPattern` provides functionality for filename glob pattern matching
|
||||
/// based on glibc's `fnmatch`.
|
||||
/// Positive matches return `MatchType::PartialPositive` or `MatchType::Positive`.
|
||||
/// Patterns starting with `!` are interpreted as negation, meaning they will
|
||||
/// return `MatchType::PartialNegative` or `MatchType::Negative`.
|
||||
/// No matches result in `MatchType::None`.
|
||||
/// # Examples:
|
||||
/// ```
|
||||
/// # use std::ffi::CString;
|
||||
/// # use self::proxmox_backup::pxar::{MatchPattern, MatchType};
|
||||
/// # fn main() -> Result<(), anyhow::Error> {
|
||||
/// let filename = CString::new("some.conf")?;
|
||||
/// let is_dir = false;
|
||||
///
|
||||
/// /// Positive match of any file ending in `.conf` in any subdirectory
|
||||
/// let positive = MatchPattern::from_line(b"**/*.conf")?.unwrap();
|
||||
/// let m_positive = positive.as_slice().matches_filename(&filename, is_dir)?;
|
||||
/// assert!(m_positive == MatchType::Positive);
|
||||
///
|
||||
/// /// Negative match of filenames starting with `s`
|
||||
/// let negative = MatchPattern::from_line(b"![s]*")?.unwrap();
|
||||
/// let m_negative = negative.as_slice().matches_filename(&filename, is_dir)?;
|
||||
/// assert!(m_negative == MatchType::Negative);
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
#[derive(Clone, Eq, PartialOrd)]
|
||||
pub struct MatchPattern {
|
||||
pattern: Vec<u8>,
|
||||
match_positive: bool,
|
||||
match_dir_only: bool,
|
||||
}
|
||||
|
||||
impl std::cmp::PartialEq for MatchPattern {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.pattern == other.pattern
|
||||
&& self.match_positive == other.match_positive
|
||||
&& self.match_dir_only == other.match_dir_only
|
||||
}
|
||||
}
|
||||
|
||||
impl std::cmp::Ord for MatchPattern {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
(&self.pattern, &self.match_positive, &self.match_dir_only)
|
||||
.cmp(&(&other.pattern, &other.match_positive, &other.match_dir_only))
|
||||
}
|
||||
}
|
||||
|
||||
impl MatchPattern {
|
||||
/// Read a list of `MatchPattern` from file.
|
||||
/// The file is read line by line (lines terminated by newline character),
|
||||
/// each line may only contain one pattern.
|
||||
/// Leading `/` are ignored and lines starting with `#` are interpreted as
|
||||
/// comments and not included in the resulting list.
|
||||
/// Patterns ending in `/` will match only directories.
|
||||
///
|
||||
/// On success, a list of match pattern is returned as well as the raw file
|
||||
/// byte buffer together with the files stats.
|
||||
/// This is done in order to avoid reading the file more than once during
|
||||
/// encoding of the archive.
|
||||
pub fn from_file<P: ?Sized + NixPath>(
|
||||
parent_fd: RawFd,
|
||||
filename: &P,
|
||||
) -> Result<Option<(Vec<MatchPattern>, Vec<u8>, FileStat)>, nix::Error> {
|
||||
let stat = match stat::fstatat(parent_fd, filename, AtFlags::AT_SYMLINK_NOFOLLOW) {
|
||||
Ok(stat) => stat,
|
||||
Err(nix::Error::Sys(Errno::ENOENT)) => return Ok(None),
|
||||
Err(err) => return Err(err),
|
||||
};
|
||||
|
||||
let filefd = fcntl::openat(parent_fd, filename, OFlag::O_NOFOLLOW, Mode::empty())?;
|
||||
let mut file = unsafe { File::from_raw_fd(filefd) };
|
||||
|
||||
let mut content_buffer = Vec::new();
|
||||
let _bytes = file.read_to_end(&mut content_buffer)
|
||||
.map_err(|_| Errno::EIO)?;
|
||||
|
||||
let mut match_pattern = Vec::new();
|
||||
for line in content_buffer.split(|&c| c == b'\n') {
|
||||
if line.is_empty() {
|
||||
continue;
|
||||
}
|
||||
if let Some(pattern) = Self::from_line(line)? {
|
||||
match_pattern.push(pattern);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some((match_pattern, content_buffer, stat)))
|
||||
}
|
||||
|
||||
/// Interprete a byte buffer as a sinlge line containing a valid
|
||||
/// `MatchPattern`.
|
||||
/// Pattern starting with `#` are interpreted as comments, returning `Ok(None)`.
|
||||
/// Pattern starting with '!' are interpreted as negative match pattern.
|
||||
/// Pattern with trailing `/` match only against directories.
|
||||
/// `.` as well as `..` and any pattern containing `\0` are invalid and will
|
||||
/// result in an error with Errno::EINVAL.
|
||||
pub fn from_line(line: &[u8]) -> Result<Option<MatchPattern>, nix::Error> {
|
||||
let mut input = line;
|
||||
|
||||
if input.starts_with(b"#") {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let match_positive = if input.starts_with(b"!") {
|
||||
// Reduce slice view to exclude "!"
|
||||
input = &input[1..];
|
||||
false
|
||||
} else {
|
||||
true
|
||||
};
|
||||
|
||||
// Paths ending in / match only directory names (no filenames)
|
||||
let match_dir_only = if input.ends_with(b"/") {
|
||||
let len = input.len();
|
||||
input = &input[..len - 1];
|
||||
true
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
// Ignore initial slash
|
||||
if input.starts_with(b"/") {
|
||||
input = &input[1..];
|
||||
}
|
||||
|
||||
if input.is_empty() || input == b"." || input == b".." || input.contains(&b'\0') {
|
||||
return Err(nix::Error::Sys(Errno::EINVAL));
|
||||
}
|
||||
|
||||
Ok(Some(MatchPattern {
|
||||
pattern: input.to_vec(),
|
||||
match_positive,
|
||||
match_dir_only,
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
/// Create a `MatchPatternSlice` of the `MatchPattern` to give a view of the
|
||||
/// `MatchPattern` without copying its content.
|
||||
pub fn as_slice<'a>(&'a self) -> MatchPatternSlice<'a> {
|
||||
MatchPatternSlice {
|
||||
pattern: self.pattern.as_slice(),
|
||||
match_positive: self.match_positive,
|
||||
match_dir_only: self.match_dir_only,
|
||||
}
|
||||
}
|
||||
|
||||
/// Dump the content of the `MatchPattern` to stdout.
|
||||
/// Intended for debugging purposes only.
|
||||
pub fn dump(&self) {
|
||||
match (self.match_positive, self.match_dir_only) {
|
||||
(true, true) => println!("{:#?}/", self.pattern),
|
||||
(true, false) => println!("{:#?}", self.pattern),
|
||||
(false, true) => println!("!{:#?}/", self.pattern),
|
||||
(false, false) => println!("!{:#?}", self.pattern),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a list of MatchPattern to bytes in order to write them to e.g.
|
||||
/// a file.
|
||||
pub fn to_bytes(patterns: &[MatchPattern]) -> Vec<u8> {
|
||||
let mut slices = Vec::new();
|
||||
for pattern in patterns {
|
||||
slices.push(pattern.as_slice());
|
||||
}
|
||||
|
||||
MatchPatternSlice::to_bytes(&slices)
|
||||
}
|
||||
|
||||
/// Invert the match type for this MatchPattern.
|
||||
pub fn invert(&mut self) {
|
||||
self.match_positive = !self.match_positive;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct MatchPatternSlice<'a> {
|
||||
pattern: &'a [u8],
|
||||
match_positive: bool,
|
||||
match_dir_only: bool,
|
||||
}
|
||||
|
||||
impl<'a> MatchPatternSlice<'a> {
|
||||
/// Returns the pattern before the first `/` encountered as `MatchPatternSlice`.
|
||||
/// If no slash is encountered, the `MatchPatternSlice` will be a copy of the
|
||||
/// original pattern.
|
||||
/// ```
|
||||
/// # use self::proxmox_backup::pxar::{MatchPattern, MatchPatternSlice, MatchType};
|
||||
/// # fn main() -> Result<(), anyhow::Error> {
|
||||
/// let pattern = MatchPattern::from_line(b"some/match/pattern/")?.unwrap();
|
||||
/// let slice = pattern.as_slice();
|
||||
/// let front = slice.get_front_pattern();
|
||||
/// /// ... will be the same as ...
|
||||
/// let front_pattern = MatchPattern::from_line(b"some")?.unwrap();
|
||||
/// let front_slice = front_pattern.as_slice();
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn get_front_pattern(&'a self) -> MatchPatternSlice<'a> {
|
||||
let (front, _) = self.split_at_slash();
|
||||
MatchPatternSlice {
|
||||
pattern: front,
|
||||
match_positive: self.match_positive,
|
||||
match_dir_only: self.match_dir_only,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the pattern after the first encountered `/` as `MatchPatternSlice`.
|
||||
/// If no slash is encountered, the `MatchPatternSlice` will be empty.
|
||||
/// ```
|
||||
/// # use self::proxmox_backup::pxar::{MatchPattern, MatchPatternSlice, MatchType};
|
||||
/// # fn main() -> Result<(), anyhow::Error> {
|
||||
/// let pattern = MatchPattern::from_line(b"some/match/pattern/")?.unwrap();
|
||||
/// let slice = pattern.as_slice();
|
||||
/// let rest = slice.get_rest_pattern();
|
||||
/// /// ... will be the same as ...
|
||||
/// let rest_pattern = MatchPattern::from_line(b"match/pattern/")?.unwrap();
|
||||
/// let rest_slice = rest_pattern.as_slice();
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn get_rest_pattern(&'a self) -> MatchPatternSlice<'a> {
|
||||
let (_, rest) = self.split_at_slash();
|
||||
MatchPatternSlice {
|
||||
pattern: rest,
|
||||
match_positive: self.match_positive,
|
||||
match_dir_only: self.match_dir_only,
|
||||
}
|
||||
}
|
||||
|
||||
/// Splits the `MatchPatternSlice` at the first slash encountered and returns the
|
||||
/// content before (front pattern) and after the slash (rest pattern),
|
||||
/// omitting the slash itself.
|
||||
/// Slices starting with `**/` are an exception to this, as the corresponding
|
||||
/// `MatchPattern` is intended to match multiple directories.
|
||||
/// These pattern slices therefore return a `*` as front pattern and the original
|
||||
/// pattern itself as rest pattern.
|
||||
fn split_at_slash(&'a self) -> (&'a [u8], &'a [u8]) {
|
||||
let pattern = if self.pattern.starts_with(b"./") {
|
||||
&self.pattern[2..]
|
||||
} else {
|
||||
self.pattern
|
||||
};
|
||||
|
||||
let (mut front, mut rest) = match pattern.iter().position(|&c| c == b'/') {
|
||||
Some(ind) => {
|
||||
let (front, rest) = pattern.split_at(ind);
|
||||
(front, &rest[1..])
|
||||
}
|
||||
None => (pattern, &pattern[0..0]),
|
||||
};
|
||||
// '**' is treated such that it maches any directory
|
||||
if front == b"**" {
|
||||
front = b"*";
|
||||
rest = pattern;
|
||||
}
|
||||
|
||||
(front, rest)
|
||||
}
|
||||
|
||||
/// Convert a list of `MatchPatternSlice`s to bytes in order to write them to e.g.
|
||||
/// a file.
|
||||
pub fn to_bytes(patterns: &[MatchPatternSlice]) -> Vec<u8> {
|
||||
let mut buffer = Vec::new();
|
||||
for pattern in patterns {
|
||||
if !pattern.match_positive { buffer.push(b'!'); }
|
||||
buffer.extend_from_slice(&pattern.pattern);
|
||||
if pattern.match_dir_only { buffer.push(b'/'); }
|
||||
buffer.push(b'\n');
|
||||
}
|
||||
buffer
|
||||
}
|
||||
|
||||
/// Match the given filename against this `MatchPatternSlice`.
|
||||
/// If the filename matches the pattern completely, `MatchType::Positive` or
|
||||
/// `MatchType::Negative` is returned, depending if the match pattern is was
|
||||
/// declared as positive (no `!` prefix) or negative (`!` prefix).
|
||||
/// If the pattern matched only up to the first slash of the pattern,
|
||||
/// `MatchType::PartialPositive` or `MatchType::PartialNegatie` is returned.
|
||||
/// If the pattern was postfixed by a trailing `/` a match is only valid if
|
||||
/// the parameter `is_dir` equals `true`.
|
||||
/// No match results in `MatchType::None`.
|
||||
pub fn matches_filename(&self, filename: &CStr, is_dir: bool) -> Result<MatchType, Error> {
|
||||
let mut res = MatchType::None;
|
||||
let (front, _) = self.split_at_slash();
|
||||
|
||||
let front = CString::new(front).unwrap();
|
||||
let fnmatch_res = unsafe {
|
||||
let front_ptr = front.as_ptr() as *const libc::c_char;
|
||||
let filename_ptr = filename.as_ptr() as *const libc::c_char;
|
||||
fnmatch(front_ptr, filename_ptr, 0)
|
||||
};
|
||||
if fnmatch_res < 0 {
|
||||
bail!("error in fnmatch inside of MatchPattern");
|
||||
}
|
||||
if fnmatch_res == 0 {
|
||||
res = if self.match_positive {
|
||||
MatchType::PartialPositive
|
||||
} else {
|
||||
MatchType::PartialNegative
|
||||
};
|
||||
}
|
||||
|
||||
let full = if self.pattern.starts_with(b"**/") {
|
||||
CString::new(&self.pattern[3..]).unwrap()
|
||||
} else {
|
||||
CString::new(&self.pattern[..]).unwrap()
|
||||
};
|
||||
let fnmatch_res = unsafe {
|
||||
let full_ptr = full.as_ptr() as *const libc::c_char;
|
||||
let filename_ptr = filename.as_ptr() as *const libc::c_char;
|
||||
fnmatch(full_ptr, filename_ptr, 0)
|
||||
};
|
||||
if fnmatch_res < 0 {
|
||||
bail!("error in fnmatch inside of MatchPattern");
|
||||
}
|
||||
if fnmatch_res == 0 {
|
||||
res = if self.match_positive {
|
||||
MatchType::Positive
|
||||
} else {
|
||||
MatchType::Negative
|
||||
};
|
||||
}
|
||||
|
||||
if !is_dir && self.match_dir_only {
|
||||
res = MatchType::None;
|
||||
}
|
||||
|
||||
if !is_dir && (res == MatchType::PartialPositive || res == MatchType::PartialNegative) {
|
||||
res = MatchType::None;
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Match the given filename against the set of `MatchPatternSlice`s.
|
||||
///
|
||||
/// A positive match is intended to includes the full subtree (unless another
|
||||
/// negative match excludes entries later).
|
||||
/// The `MatchType` together with an updated `MatchPatternSlice` list for passing
|
||||
/// to the matched child is returned.
|
||||
/// ```
|
||||
/// # use std::ffi::CString;
|
||||
/// # use self::proxmox_backup::pxar::{MatchPattern, MatchPatternSlice, MatchType};
|
||||
/// # fn main() -> Result<(), anyhow::Error> {
|
||||
/// let patterns = vec![
|
||||
/// MatchPattern::from_line(b"some/match/pattern/")?.unwrap(),
|
||||
/// MatchPattern::from_line(b"to_match/")?.unwrap()
|
||||
/// ];
|
||||
/// let mut slices = Vec::new();
|
||||
/// for pattern in &patterns {
|
||||
/// slices.push(pattern.as_slice());
|
||||
/// }
|
||||
/// let filename = CString::new("some")?;
|
||||
/// let is_dir = true;
|
||||
/// let (match_type, child_pattern) = MatchPatternSlice::match_filename_include(
|
||||
/// &filename,
|
||||
/// is_dir,
|
||||
/// &slices
|
||||
/// )?;
|
||||
/// assert_eq!(match_type, MatchType::PartialPositive);
|
||||
/// /// child pattern will be the same as ...
|
||||
/// let pattern = MatchPattern::from_line(b"match/pattern/")?.unwrap();
|
||||
/// let slice = pattern.as_slice();
|
||||
///
|
||||
/// let filename = CString::new("to_match")?;
|
||||
/// let is_dir = true;
|
||||
/// let (match_type, child_pattern) = MatchPatternSlice::match_filename_include(
|
||||
/// &filename,
|
||||
/// is_dir,
|
||||
/// &slices
|
||||
/// )?;
|
||||
/// assert_eq!(match_type, MatchType::Positive);
|
||||
/// /// child pattern will be the same as ...
|
||||
/// let pattern = MatchPattern::from_line(b"**/*")?.unwrap();
|
||||
/// let slice = pattern.as_slice();
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn match_filename_include(
|
||||
filename: &CStr,
|
||||
is_dir: bool,
|
||||
match_pattern: &'a [MatchPatternSlice<'a>],
|
||||
) -> Result<(MatchType, Vec<MatchPatternSlice<'a>>), Error> {
|
||||
let mut child_pattern = Vec::new();
|
||||
let mut match_state = MatchType::None;
|
||||
|
||||
for pattern in match_pattern {
|
||||
match pattern.matches_filename(filename, is_dir)? {
|
||||
MatchType::None => continue,
|
||||
MatchType::Positive => match_state = MatchType::Positive,
|
||||
MatchType::Negative => match_state = MatchType::Negative,
|
||||
MatchType::PartialPositive => {
|
||||
if match_state != MatchType::Negative && match_state != MatchType::Positive {
|
||||
match_state = MatchType::PartialPositive;
|
||||
}
|
||||
child_pattern.push(pattern.get_rest_pattern());
|
||||
}
|
||||
MatchType::PartialNegative => {
|
||||
if match_state == MatchType::PartialPositive {
|
||||
match_state = MatchType::PartialNegative;
|
||||
}
|
||||
child_pattern.push(pattern.get_rest_pattern());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((match_state, child_pattern))
|
||||
}
|
||||
|
||||
/// Match the given filename against the set of `MatchPatternSlice`s.
|
||||
///
|
||||
/// A positive match is intended to exclude the full subtree, independent of
|
||||
/// matches deeper down the tree.
|
||||
/// The `MatchType` together with an updated `MatchPattern` list for passing
|
||||
/// to the matched child is returned.
|
||||
/// ```
|
||||
/// # use std::ffi::CString;
|
||||
/// # use self::proxmox_backup::pxar::{MatchPattern, MatchPatternSlice, MatchType};
|
||||
/// # fn main() -> Result<(), anyhow::Error> {
|
||||
/// let patterns = vec![
|
||||
/// MatchPattern::from_line(b"some/match/pattern/")?.unwrap(),
|
||||
/// MatchPattern::from_line(b"to_match/")?.unwrap()
|
||||
/// ];
|
||||
/// let mut slices = Vec::new();
|
||||
/// for pattern in &patterns {
|
||||
/// slices.push(pattern.as_slice());
|
||||
/// }
|
||||
/// let filename = CString::new("some")?;
|
||||
/// let is_dir = true;
|
||||
/// let (match_type, child_pattern) = MatchPatternSlice::match_filename_exclude(
|
||||
/// &filename,
|
||||
/// is_dir,
|
||||
/// &slices,
|
||||
/// )?;
|
||||
/// assert_eq!(match_type, MatchType::PartialPositive);
|
||||
/// /// child pattern will be the same as ...
|
||||
/// let pattern = MatchPattern::from_line(b"match/pattern/")?.unwrap();
|
||||
/// let slice = pattern.as_slice();
|
||||
///
|
||||
/// let filename = CString::new("to_match")?;
|
||||
/// let is_dir = true;
|
||||
/// let (match_type, child_pattern) = MatchPatternSlice::match_filename_exclude(
|
||||
/// &filename,
|
||||
/// is_dir,
|
||||
/// &slices,
|
||||
/// )?;
|
||||
/// assert_eq!(match_type, MatchType::Positive);
|
||||
/// /// child pattern will be empty
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn match_filename_exclude(
|
||||
filename: &CStr,
|
||||
is_dir: bool,
|
||||
match_pattern: &'a [MatchPatternSlice<'a>],
|
||||
) -> Result<(MatchType, Vec<MatchPatternSlice<'a>>), Error> {
|
||||
let mut child_pattern = Vec::new();
|
||||
let mut match_state = MatchType::None;
|
||||
|
||||
for pattern in match_pattern {
|
||||
match pattern.matches_filename(filename, is_dir)? {
|
||||
MatchType::None => {}
|
||||
MatchType::Positive => match_state = MatchType::Positive,
|
||||
MatchType::Negative => match_state = MatchType::Negative,
|
||||
match_type => {
|
||||
if match_state != MatchType::Positive && match_state != MatchType::Negative {
|
||||
match_state = match_type;
|
||||
}
|
||||
child_pattern.push(pattern.get_rest_pattern());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((match_state, child_pattern))
|
||||
}
|
||||
}
|
319
src/pxar/metadata.rs
Normal file
319
src/pxar/metadata.rs
Normal file
@ -0,0 +1,319 @@
|
||||
use std::ffi::{CStr, CString};
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
use pxar::Metadata;
|
||||
|
||||
use proxmox::c_result;
|
||||
use proxmox::sys::error::SysError;
|
||||
use proxmox::tools::fd::RawFdNum;
|
||||
|
||||
use crate::pxar::tools::perms_from_metadata;
|
||||
use crate::pxar::Flags;
|
||||
use crate::tools::{acl, fs, xattr};
|
||||
|
||||
//
|
||||
// utility functions
|
||||
//
|
||||
|
||||
fn allow_notsupp<E: SysError>(err: E) -> Result<(), E> {
|
||||
if err.is_errno(Errno::EOPNOTSUPP) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
|
||||
fn allow_notsupp_remember<E: SysError>(err: E, not_supp: &mut bool) -> Result<(), E> {
|
||||
if err.is_errno(Errno::EOPNOTSUPP) {
|
||||
*not_supp = true;
|
||||
Ok(())
|
||||
} else {
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
|
||||
fn nsec_to_update_timespec(mtime_nsec: u64) -> [libc::timespec; 2] {
|
||||
// restore mtime
|
||||
const UTIME_OMIT: i64 = (1 << 30) - 2;
|
||||
const NANOS_PER_SEC: i64 = 1_000_000_000;
|
||||
|
||||
let sec = (mtime_nsec as i64) / NANOS_PER_SEC;
|
||||
let nsec = (mtime_nsec as i64) % NANOS_PER_SEC;
|
||||
|
||||
let times: [libc::timespec; 2] = [
|
||||
libc::timespec {
|
||||
tv_sec: 0,
|
||||
tv_nsec: UTIME_OMIT,
|
||||
},
|
||||
libc::timespec {
|
||||
tv_sec: sec,
|
||||
tv_nsec: nsec,
|
||||
},
|
||||
];
|
||||
|
||||
times
|
||||
}
|
||||
|
||||
//
|
||||
// metadata application:
|
||||
//
|
||||
|
||||
pub fn apply_at(
|
||||
flags: Flags,
|
||||
metadata: &Metadata,
|
||||
parent: RawFd,
|
||||
file_name: &CStr,
|
||||
) -> Result<(), Error> {
|
||||
let fd = proxmox::tools::fd::Fd::openat(
|
||||
&unsafe { RawFdNum::from_raw_fd(parent) },
|
||||
file_name,
|
||||
OFlag::O_PATH | OFlag::O_CLOEXEC | OFlag::O_NOFOLLOW,
|
||||
Mode::empty(),
|
||||
)?;
|
||||
|
||||
apply(flags, metadata, fd.as_raw_fd(), file_name)
|
||||
}
|
||||
|
||||
pub fn apply(flags: Flags, metadata: &Metadata, fd: RawFd, file_name: &CStr) -> Result<(), Error> {
|
||||
let c_proc_path = CString::new(format!("/proc/self/fd/{}", fd)).unwrap();
|
||||
|
||||
if metadata.stat.flags != 0 {
|
||||
todo!("apply flags!");
|
||||
}
|
||||
|
||||
unsafe {
|
||||
// UID and GID first, as this fails if we lose access anyway.
|
||||
c_result!(libc::chown(
|
||||
c_proc_path.as_ptr(),
|
||||
metadata.stat.uid,
|
||||
metadata.stat.gid
|
||||
))
|
||||
.map(drop)
|
||||
.or_else(allow_notsupp)?;
|
||||
}
|
||||
|
||||
let mut skip_xattrs = false;
|
||||
apply_xattrs(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)?;
|
||||
add_fcaps(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs)?;
|
||||
apply_acls(flags, &c_proc_path, metadata)?;
|
||||
apply_quota_project_id(flags, fd, metadata)?;
|
||||
|
||||
// Finally mode and time. We may lose access with mode, but the changing the mode also
|
||||
// affects times.
|
||||
if !metadata.is_symlink() {
|
||||
c_result!(unsafe {
|
||||
libc::chmod(c_proc_path.as_ptr(), perms_from_metadata(metadata)?.bits())
|
||||
})
|
||||
.map(drop)
|
||||
.or_else(allow_notsupp)?;
|
||||
}
|
||||
|
||||
let res = c_result!(unsafe {
|
||||
libc::utimensat(
|
||||
libc::AT_FDCWD,
|
||||
c_proc_path.as_ptr(),
|
||||
nsec_to_update_timespec(metadata.stat.mtime).as_ptr(),
|
||||
0,
|
||||
)
|
||||
});
|
||||
match res {
|
||||
Ok(_) => (),
|
||||
Err(ref err) if err.is_errno(Errno::EOPNOTSUPP) => (),
|
||||
Err(ref err) if err.is_errno(Errno::EPERM) => {
|
||||
println!(
|
||||
"failed to restore mtime attribute on {:?}: {}",
|
||||
file_name, err
|
||||
);
|
||||
}
|
||||
Err(err) => return Err(err.into()),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_fcaps(
|
||||
flags: Flags,
|
||||
c_proc_path: *const libc::c_char,
|
||||
metadata: &Metadata,
|
||||
skip_xattrs: &mut bool,
|
||||
) -> Result<(), Error> {
|
||||
if *skip_xattrs || !flags.contains(Flags::WITH_FCAPS) {
|
||||
return Ok(());
|
||||
}
|
||||
let fcaps = match metadata.fcaps.as_ref() {
|
||||
Some(fcaps) => fcaps,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
c_result!(unsafe {
|
||||
libc::setxattr(
|
||||
c_proc_path,
|
||||
xattr::xattr_name_fcaps().as_ptr(),
|
||||
fcaps.data.as_ptr() as *const libc::c_void,
|
||||
fcaps.data.len(),
|
||||
0,
|
||||
)
|
||||
})
|
||||
.map(drop)
|
||||
.or_else(|err| allow_notsupp_remember(err, skip_xattrs))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_xattrs(
|
||||
flags: Flags,
|
||||
c_proc_path: *const libc::c_char,
|
||||
metadata: &Metadata,
|
||||
skip_xattrs: &mut bool,
|
||||
) -> Result<(), Error> {
|
||||
if *skip_xattrs || !flags.contains(Flags::WITH_XATTRS) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
for xattr in &metadata.xattrs {
|
||||
if *skip_xattrs {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if !xattr::is_valid_xattr_name(xattr.name()) {
|
||||
println!("skipping invalid xattr named {:?}", xattr.name());
|
||||
continue;
|
||||
}
|
||||
|
||||
c_result!(unsafe {
|
||||
libc::setxattr(
|
||||
c_proc_path,
|
||||
xattr.name().as_ptr() as *const libc::c_char,
|
||||
xattr.value().as_ptr() as *const libc::c_void,
|
||||
xattr.value().len(),
|
||||
0,
|
||||
)
|
||||
})
|
||||
.map(drop)
|
||||
.or_else(|err| allow_notsupp_remember(err, &mut *skip_xattrs))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_acls(flags: Flags, c_proc_path: &CStr, metadata: &Metadata) -> Result<(), Error> {
|
||||
if !flags.contains(Flags::WITH_ACL) || metadata.acl.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut acl = acl::ACL::init(5)?;
|
||||
|
||||
// acl type access:
|
||||
acl.add_entry_full(
|
||||
acl::ACL_USER_OBJ,
|
||||
None,
|
||||
acl::mode_user_to_acl_permissions(metadata.stat.mode),
|
||||
)?;
|
||||
|
||||
acl.add_entry_full(
|
||||
acl::ACL_OTHER,
|
||||
None,
|
||||
acl::mode_other_to_acl_permissions(metadata.stat.mode),
|
||||
)?;
|
||||
|
||||
match metadata.acl.group_obj.as_ref() {
|
||||
Some(group_obj) => {
|
||||
acl.add_entry_full(
|
||||
acl::ACL_MASK,
|
||||
None,
|
||||
acl::mode_group_to_acl_permissions(metadata.stat.mode),
|
||||
)?;
|
||||
acl.add_entry_full(acl::ACL_GROUP_OBJ, None, group_obj.permissions.0)?;
|
||||
}
|
||||
None => {
|
||||
acl.add_entry_full(
|
||||
acl::ACL_GROUP_OBJ,
|
||||
None,
|
||||
acl::mode_group_to_acl_permissions(metadata.stat.mode),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
for user in &metadata.acl.users {
|
||||
acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
|
||||
}
|
||||
|
||||
for group in &metadata.acl.groups {
|
||||
acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
|
||||
}
|
||||
|
||||
if !acl.is_valid() {
|
||||
bail!("Error while restoring ACL - ACL invalid");
|
||||
}
|
||||
|
||||
acl.set_file(c_proc_path, acl::ACL_TYPE_ACCESS)?;
|
||||
drop(acl);
|
||||
|
||||
// acl type default:
|
||||
if let Some(default) = metadata.acl.default.as_ref() {
|
||||
let mut acl = acl::ACL::init(5)?;
|
||||
|
||||
acl.add_entry_full(acl::ACL_USER_OBJ, None, default.user_obj_permissions.0)?;
|
||||
|
||||
acl.add_entry_full(acl::ACL_GROUP_OBJ, None, default.group_obj_permissions.0)?;
|
||||
|
||||
acl.add_entry_full(acl::ACL_OTHER, None, default.other_permissions.0)?;
|
||||
|
||||
if default.mask_permissions != pxar::format::acl::Permissions::NO_MASK {
|
||||
acl.add_entry_full(acl::ACL_MASK, None, default.mask_permissions.0)?;
|
||||
}
|
||||
|
||||
for user in &metadata.acl.default_users {
|
||||
acl.add_entry_full(acl::ACL_USER, Some(user.uid), user.permissions.0)?;
|
||||
}
|
||||
|
||||
for group in &metadata.acl.default_groups {
|
||||
acl.add_entry_full(acl::ACL_GROUP, Some(group.gid), group.permissions.0)?;
|
||||
}
|
||||
|
||||
if !acl.is_valid() {
|
||||
bail!("Error while restoring ACL - ACL invalid");
|
||||
}
|
||||
|
||||
acl.set_file(c_proc_path, acl::ACL_TYPE_DEFAULT)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_quota_project_id(flags: Flags, fd: RawFd, metadata: &Metadata) -> Result<(), Error> {
|
||||
if !flags.contains(Flags::WITH_QUOTA_PROJID) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let projid = match metadata.quota_project_id {
|
||||
Some(projid) => projid,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
let mut fsxattr = fs::FSXAttr::default();
|
||||
unsafe {
|
||||
fs::fs_ioc_fsgetxattr(fd, &mut fsxattr).map_err(|err| {
|
||||
format_err!(
|
||||
"error while getting fsxattr to restore quota project id - {}",
|
||||
err
|
||||
)
|
||||
})?;
|
||||
|
||||
fsxattr.fsx_projid = projid.projid as u32;
|
||||
|
||||
fs::fs_ioc_fssetxattr(fd, &fsxattr).map_err(|err| {
|
||||
format_err!(
|
||||
"error while setting fsxattr to restore quota project id - {}",
|
||||
err
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
@ -4,7 +4,7 @@
|
||||
//! format used in the [casync](https://github.com/systemd/casync)
|
||||
//! toolkit (we are not 100\% binary compatible). It is a file archive
|
||||
//! format defined by 'Lennart Poettering', specially defined for
|
||||
//! efficent deduplication.
|
||||
//! efficient deduplication.
|
||||
|
||||
//! Every archive contains items in the following order:
|
||||
//! * `ENTRY` -- containing general stat() data and related bits
|
||||
@ -47,33 +47,23 @@
|
||||
//! (user, group, acl, ...) because this is already defined by the
|
||||
//! linked `ENTRY`.
|
||||
|
||||
mod binary_search_tree;
|
||||
pub use binary_search_tree::*;
|
||||
|
||||
pub mod flags;
|
||||
pub use flags::*;
|
||||
|
||||
mod format_definition;
|
||||
pub use format_definition::*;
|
||||
|
||||
mod encoder;
|
||||
pub use encoder::*;
|
||||
|
||||
mod sequential_decoder;
|
||||
pub use sequential_decoder::*;
|
||||
|
||||
mod decoder;
|
||||
pub use decoder::*;
|
||||
|
||||
mod match_pattern;
|
||||
pub use match_pattern::*;
|
||||
|
||||
mod dir_stack;
|
||||
pub use dir_stack::*;
|
||||
|
||||
pub mod fuse;
|
||||
pub use fuse::*;
|
||||
|
||||
pub mod catalog;
|
||||
pub(crate) mod create;
|
||||
pub(crate) mod dir_stack;
|
||||
pub(crate) mod extract;
|
||||
pub(crate) mod metadata;
|
||||
pub mod fuse;
|
||||
pub(crate) mod tools;
|
||||
|
||||
mod helper;
|
||||
mod flags;
|
||||
pub use flags::Flags;
|
||||
|
||||
pub use create::create_archive;
|
||||
pub use extract::extract_archive;
|
||||
|
||||
/// The format requires to build sorted directory lookup tables in
|
||||
/// memory, so we restrict the number of allowed entries to limit
|
||||
/// maximum memory usage.
|
||||
pub const ENCODER_MAX_ENTRIES: usize = 1024 * 1024;
|
||||
|
||||
pub use tools::{format_multi_line_entry, format_single_line_entry};
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user