Compare commits
281 Commits
Author | SHA1 | Date | |
---|---|---|---|
d16ed66c88 | |||
3ec6e249b3 | |||
dfa517ad6c | |||
8b2ad84a25 | |||
3dacedce71 | |||
512d50a455 | |||
b53f637914 | |||
152a926149 | |||
7f388acea8 | |||
b2bfb46835 | |||
24406ebc0c | |||
1f24d9114c | |||
859fe9c1fb | |||
2107a5aebc | |||
3638341aa4 | |||
067fe514e6 | |||
8c6e5ce23c | |||
0351f23ba4 | |||
c1ff544eff | |||
69e5d71961 | |||
48e22a8900 | |||
a7a5f56daa | |||
05389a0109 | |||
b65390ebc9 | |||
3bad3e6e52 | |||
24be37e3f6 | |||
1008a69a13 | |||
521a0acb2e | |||
3b66040de6 | |||
af3a0ae7b1 | |||
4e36f78438 | |||
f28d9088ed | |||
56b814e378 | |||
0c136efe30 | |||
cdead6cd12 | |||
c950826e46 | |||
f91d58e157 | |||
1ff840ffad | |||
7443a6e092 | |||
3a9988638b | |||
96ee857752 | |||
887018bb79 | |||
9696f5193b | |||
e13c4f66bb | |||
8a25809573 | |||
d87b193b0b | |||
ea5289e869 | |||
1f6a4f587a | |||
705b2293ec | |||
d2c7ef09ba | |||
27f86f997e | |||
fc93d38076 | |||
a5a85d41ff | |||
08cb2038bd | |||
6f711c1737 | |||
42ec9f577f | |||
9de69cdb1a | |||
bd260569d3 | |||
36cb4b30ef | |||
4e717240bf | |||
e9764238df | |||
26f499b17b | |||
cc7995ac40 | |||
43abba4b4f | |||
58f950c546 | |||
c426e65893 | |||
caea8d611f | |||
7d0754a6d2 | |||
5afa0755ea | |||
40b63186a6 | |||
8f6088c130 | |||
2162e2c15d | |||
0d5ab04a90 | |||
4059285649 | |||
2e079b8bf2 | |||
4ff2c9b832 | |||
a8e2940ff3 | |||
d5d5f2174e | |||
2311238450 | |||
2ea501ffdf | |||
4eb4e94918 | |||
817bcda848 | |||
f6de2c7359 | |||
3f0b9c10ec | |||
2b66abbfab | |||
402c8861d8 | |||
3f683799a8 | |||
573bcd9a92 | |||
90779237ae | |||
1f82f9b7b5 | |||
19b5c3c43e | |||
fe3e65c3ea | |||
fdaab0df4e | |||
b957aa81bd | |||
8ea00f6e49 | |||
4bd789b0fa | |||
2f050cf2ed | |||
e22f4882e7 | |||
c65bc99a41 | |||
355c055e81 | |||
c2009e5309 | |||
23f74c190e | |||
a6f8728339 | |||
c1769a749c | |||
facd9801cf | |||
21302088de | |||
8268c9d161 | |||
b91b7d9ffd | |||
6e1f0c138f | |||
8567c0d29c | |||
d33d8f4e6a | |||
5b1cfa01f1 | |||
05d18b907a | |||
e44fe0c9f5 | |||
4cf0ced950 | |||
98425309b0 | |||
7b1e26699d | |||
676b0fde49 | |||
60f9a6ea8f | |||
1090fd4424 | |||
92c3fd2e22 | |||
e3efaa1972 | |||
0cf2b6441e | |||
d6d3b353be | |||
a67f7d0a07 | |||
c8137518fe | |||
cbef49bf4f | |||
0b99e5aebc | |||
29c55e5fc4 | |||
f386f512d0 | |||
3ddb14889a | |||
00c2327564 | |||
d79926795a | |||
c08fac4d69 | |||
c40440092d | |||
dc2ef2b54f | |||
b28253d650 | |||
f28cfb322a | |||
3bbe291c51 | |||
42d19fdf69 | |||
215968e033 | |||
eddd1a1b9c | |||
d2ce211899 | |||
1cb46c6f65 | |||
5d88c3a1c8 | |||
07fb504943 | |||
f675c5e978 | |||
4e37d9ce67 | |||
e303077132 | |||
6ef9bb59eb | |||
eeaa2c212b | |||
4a3adc3de8 | |||
abdb976340 | |||
3b62116ce6 | |||
e005f953d9 | |||
1c090810f5 | |||
e181d2f6da | |||
16021f6ab7 | |||
ba694720fc | |||
bde8e243cf | |||
3352ee5656 | |||
b29cbc414d | |||
026dc1d11f | |||
9438aca6c9 | |||
547f0c97e4 | |||
177a2de992 | |||
0686b1f4db | |||
0727e56a06 | |||
2fd3d57490 | |||
3f851d1321 | |||
1aef491e24 | |||
d0eccae37d | |||
a34154d900 | |||
c2cc32b4dd | |||
46405fa35d | |||
66af7f51bc | |||
c72ccd4e33 | |||
902b2cc278 | |||
8ecd7c9c21 | |||
7f17f7444a | |||
fb5a066500 | |||
d19c96d507 | |||
929a13b357 | |||
36c65ee0b0 | |||
3378fd9fe5 | |||
58c51cf3d9 | |||
5509b199fb | |||
bb59df9134 | |||
2564b0834f | |||
9321bbd1f5 | |||
4264e52220 | |||
6988b29bdc | |||
98c54240e6 | |||
d30c192589 | |||
67908b47fa | |||
ac7513e368 | |||
fbbcd85839 | |||
7a6b549270 | |||
0196b9bf5b | |||
739a51459a | |||
195d7c90ce | |||
6f3146c08c | |||
4b12879289 | |||
20b3094bcb | |||
df528ee6fa | |||
57e50fb906 | |||
3136792c95 | |||
3d571d5509 | |||
8e6e18b77c | |||
4d16badf6f | |||
a609cf210e | |||
1498659b4e | |||
4482f3fe11 | |||
5d85847f91 | |||
476b4acadc | |||
cf1bd08131 | |||
ec8f042459 | |||
431cc7b185 | |||
e693818afc | |||
3d68536fc2 | |||
26e78a2efb | |||
5444fa940b | |||
d4f2397d4c | |||
fab2413741 | |||
669c137fec | |||
fc6047fcb1 | |||
3014088684 | |||
144006fade | |||
b9cf6ee797 | |||
cdde66d277 | |||
239e49f927 | |||
ae66873ce9 | |||
bda48e04da | |||
ba97479848 | |||
6cad8ce4ce | |||
34020b929e | |||
33070956af | |||
da84cc52f4 | |||
9825748e5e | |||
2179359f40 | |||
9bb161c881 | |||
297e600730 | |||
ed7b3a7de2 | |||
0f358204bd | |||
ca6124d5fa | |||
7eacdc765b | |||
c443f58b09 | |||
ab1092392f | |||
1e3d9b103d | |||
386990ba09 | |||
bc853b028f | |||
d406de299b | |||
dfb31de8f0 | |||
7c3aa258f8 | |||
044055062c | |||
2b388026f8 | |||
707974fdb3 | |||
9069debcd8 | |||
fa2bdc1309 | |||
8e40aa63c1 | |||
d2522b2db6 | |||
ce8e3de401 | |||
7fa2779559 | |||
042afd6e52 | |||
ff30caeaf8 | |||
553cd12ba6 | |||
de1e1a9d95 | |||
91960d6162 | |||
4c24a48eb3 | |||
484e761dab | |||
059b7a252e | |||
1278aeec36 | |||
e53a4c4577 | |||
98ad58fbd2 | |||
98bb3b9016 | |||
eb80aac288 | |||
c26aad405f | |||
f03a0e509e | |||
4c1e8855cc | |||
85a9a5b68c | |||
f856e0774e |
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "0.2.3"
|
||||
version = "0.8.2"
|
||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3"
|
||||
@ -30,15 +30,20 @@ lazy_static = "1.4"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
nix = "0.16"
|
||||
num-traits = "0.2"
|
||||
once_cell = "1.3.1"
|
||||
openssl = "0.10"
|
||||
pam = "0.7"
|
||||
pam-sys = "0.5"
|
||||
percent-encoding = "2.1"
|
||||
pin-utils = "0.1.0"
|
||||
proxmox = { version = "0.1.38", features = [ "sortable-macro", "api-macro" ] }
|
||||
pathpatterns = "0.1.1"
|
||||
proxmox = { version = "0.1.42", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
||||
proxmox-fuse = "0.1.0"
|
||||
pxar = { version = "0.2.0", features = [ "tokio-io", "futures-io" ] }
|
||||
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
||||
regex = "1.2"
|
||||
rustyline = "6"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
|
12
Makefile
12
Makefile
@ -37,11 +37,15 @@ CARGO ?= cargo
|
||||
COMPILED_BINS := \
|
||||
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN))
|
||||
|
||||
export DEB_VERSION DEB_VERSION_UPSTREAM
|
||||
|
||||
SERVER_DEB=${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb
|
||||
SERVER_DBG_DEB=${PACKAGE}-server-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
||||
CLIENT_DBG_DEB=${PACKAGE}-client-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
||||
|
||||
DEBS=${SERVER_DEB} ${CLIENT_DEB}
|
||||
DEBS=${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB}
|
||||
|
||||
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
|
||||
|
||||
@ -56,7 +60,7 @@ $(SUBDIRS):
|
||||
test:
|
||||
#cargo test test_broadcast_future
|
||||
#cargo test $(CARGO_BUILD_ARGS)
|
||||
$(CARGO) test $(tests) $(CARGO_BUILD_ARGS)
|
||||
#$(CARGO) test $(tests) $(CARGO_BUILD_ARGS)
|
||||
|
||||
doc:
|
||||
$(CARGO) doc --no-deps $(CARGO_BUILD_ARGS)
|
||||
@ -140,5 +144,5 @@ install: $(COMPILED_BINS)
|
||||
upload: ${SERVER_DEB} ${CLIENT_DEB} ${DOC_DEB}
|
||||
# check if working directory is clean
|
||||
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
||||
tar cf - ${SERVER_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
||||
tar cf - ${CLIENT_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve" --dist buster
|
||||
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
||||
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve" --dist buster
|
||||
|
2
TODO.rst
2
TODO.rst
@ -30,8 +30,6 @@ Chores:
|
||||
|
||||
* move tools/xattr.rs and tools/acl.rs to proxmox/sys/linux/
|
||||
|
||||
* recompute PXAR_ header types from strings: avoid using numbers from casync
|
||||
|
||||
* remove pbs-* systemd timers and services on package purge
|
||||
|
||||
|
||||
|
92
debian/changelog
vendored
92
debian/changelog
vendored
@ -1,3 +1,95 @@
|
||||
rust-proxmox-backup (0.8.2-1) unstable; urgency=medium
|
||||
|
||||
* buildsys: also upload debug packages
|
||||
|
||||
* src/backup/manifest.rs: rename into_string -> to_string
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 11:58:51 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.1-1) unstable; urgency=medium
|
||||
|
||||
* remove authhenticated data blobs (not needed)
|
||||
|
||||
* add signature to manifest
|
||||
|
||||
* improve docs
|
||||
|
||||
* client: introduce --keyfd parameter
|
||||
|
||||
* ui improvements
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 10:01:25 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.0-1) unstable; urgency=medium
|
||||
|
||||
* implement get_runtime_with_builder
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 07 Jul 2020 10:15:26 +0200
|
||||
|
||||
rust-proxmox-backup (0.7.0-1) unstable; urgency=medium
|
||||
|
||||
* implement clone for RemoteChunkReader
|
||||
|
||||
* improve docs
|
||||
|
||||
* client: add --encryption boolen parameter
|
||||
|
||||
* client: use default encryption key if it is available
|
||||
|
||||
* d/rules: do not compress .pdf files
|
||||
|
||||
* ui: various fixes
|
||||
|
||||
* add beta text with link to bugtracker
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 07 Jul 2020 07:40:05 +0200
|
||||
|
||||
rust-proxmox-backup (0.6.0-1) unstable; urgency=medium
|
||||
|
||||
* make ReadChunk not require mutable self.
|
||||
|
||||
* ui: increase timeout for snapshot listing
|
||||
|
||||
* ui: consistently spell Datastore without space between words
|
||||
|
||||
* ui: disk create: sync and improve 'add-datastore' checkbox label
|
||||
|
||||
* proxmox-backup-client: add benchmark command
|
||||
|
||||
* pxar: fixup 'vanished-file' logic a bit
|
||||
|
||||
* ui: add verify button
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 03 Jul 2020 09:45:52 +0200
|
||||
|
||||
rust-proxmox-backup (0.5.0-1) unstable; urgency=medium
|
||||
|
||||
* partially revert commit 1f82f9b7b5d231da22a541432d5617cb303c0000
|
||||
|
||||
* ui: allow to Forget (delete) backup snapshots
|
||||
|
||||
* pxar: deal with files changing size during archiving
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 29 Jun 2020 13:00:54 +0200
|
||||
|
||||
rust-proxmox-backup (0.4.0-1) unstable; urgency=medium
|
||||
|
||||
* change api for incremental backups mode
|
||||
|
||||
* zfs disk management gui
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 26 Jun 2020 10:43:27 +0200
|
||||
|
||||
rust-proxmox-backup (0.3.0-1) unstable; urgency=medium
|
||||
|
||||
* support incremental backups mode
|
||||
|
||||
* new disk management
|
||||
|
||||
* single file restore for container backups
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 24 Jun 2020 10:12:57 +0200
|
||||
|
||||
rust-proxmox-backup (0.2.3-1) unstable; urgency=medium
|
||||
|
||||
* tools/systemd/time: fix compute_next_event for weekdays
|
||||
|
3
debian/control.in
vendored
3
debian/control.in
vendored
@ -3,11 +3,14 @@ Architecture: any
|
||||
Depends: fonts-font-awesome,
|
||||
libjs-extjs (>= 6.0.1),
|
||||
libzstd1 (>= 1.3.8),
|
||||
lvm2,
|
||||
proxmox-backup-docs,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit (>= 2.2-4),
|
||||
smartmontools,
|
||||
${misc:Depends},
|
||||
${shlibs:Depends},
|
||||
Recommends: zfsutils-linux,
|
||||
Description: Proxmox Backup Server daemon with tools and GUI
|
||||
This package contains the Proxmox Backup Server daemons and related
|
||||
tools. This includes a web-based graphical user interface.
|
||||
|
1
debian/lintian-overrides
vendored
Normal file
1
debian/lintian-overrides
vendored
Normal file
@ -0,0 +1 @@
|
||||
proxmox-backup-server: package-installs-apt-sources etc/apt/sources.list.d/pbstest-beta.list
|
28
debian/postinst
vendored
Normal file
28
debian/postinst
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
# modeled after dh_systemd_start output
|
||||
systemctl --system daemon-reload >/dev/null || true
|
||||
if [ -n "$2" ]; then
|
||||
_dh_action=try-reload-or-restart
|
||||
else
|
||||
_dh_action=start
|
||||
fi
|
||||
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
||||
;;
|
||||
|
||||
abort-upgrade|abort-remove|abort-deconfigure)
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "postinst called with unknown argument \`$1'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
10
debian/prerm
vendored
Normal file
10
debian/prerm
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
# modeled after dh_systemd_start output
|
||||
if [ -d /run/systemd/system ] && [ "$1" = remove ]; then
|
||||
deb-systemd-invoke stop 'proxmox-backup-banner.service' 'proxmox-backup-proxy.service' 'proxmox-backup.service' >/dev/null || true
|
||||
fi
|
1
debian/proxmox-backup-docs.link
vendored
Normal file
1
debian/proxmox-backup-docs.link
vendored
Normal file
@ -0,0 +1 @@
|
||||
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/docs/proxmox-backup.pdf
|
1
debian/proxmox-backup-server.install
vendored
1
debian/proxmox-backup-server.install
vendored
@ -1,6 +1,7 @@
|
||||
etc/proxmox-backup-proxy.service /lib/systemd/system/
|
||||
etc/proxmox-backup.service /lib/systemd/system/
|
||||
etc/proxmox-backup-banner.service /lib/systemd/system/
|
||||
etc/pbstest-beta.list /etc/apt/sources.list.d/
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
||||
|
9
debian/rules
vendored
9
debian/rules
vendored
@ -37,11 +37,14 @@ override_dh_auto_install:
|
||||
PROXY_USER=backup \
|
||||
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
|
||||
|
||||
override_dh_installinit:
|
||||
dh_installinit
|
||||
dh_installinit --name proxmox-backup-proxy
|
||||
override_dh_installsystemd:
|
||||
# note: we start/try-reload-restart services manually in postinst
|
||||
dh_installsystemd --no-start --no-restart-after-upgrade
|
||||
|
||||
# workaround https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=933541
|
||||
# TODO: remove once available (Debian 11 ?)
|
||||
override_dh_dwz:
|
||||
dh_dwz --no-dwz-multifile
|
||||
|
||||
override_dh_compress:
|
||||
dh_compress -X.pdf
|
||||
|
@ -1,11 +1,5 @@
|
||||
include ../defines.mk
|
||||
|
||||
ifeq ($(BUILD_MODE), release)
|
||||
COMPILEDIR := ../target/release
|
||||
else
|
||||
COMPILEDIR := ../target/debug
|
||||
endif
|
||||
|
||||
GENERATED_SYNOPSIS := \
|
||||
proxmox-backup-client/synopsis.rst \
|
||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||
@ -26,6 +20,15 @@ SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
BUILDDIR = output
|
||||
|
||||
ifeq ($(BUILD_MODE), release)
|
||||
COMPILEDIR := ../target/release
|
||||
SPHINXOPTS += -t release
|
||||
else
|
||||
COMPILEDIR := ../target/debug
|
||||
SPHINXOPTS += -t devbuild
|
||||
endif
|
||||
|
||||
|
||||
# Sphinx internal variables.
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) .
|
||||
|
||||
|
@ -1,9 +1,8 @@
|
||||
Administration Guide
|
||||
====================
|
||||
Backup Management
|
||||
=================
|
||||
|
||||
The administration guide.
|
||||
|
||||
.. todo:: either add a bit more explanation or remove the previous sentence
|
||||
.. The administration guide.
|
||||
.. todo:: either add a bit more explanation or remove the previous sentence
|
||||
|
||||
Terminology
|
||||
-----------
|
||||
@ -13,16 +12,16 @@ Backup Content
|
||||
|
||||
When doing deduplication, there are different strategies to get
|
||||
optimal results in terms of performance and/or deduplication rates.
|
||||
Depending on the type of data, one can split data into *fixed* or *variable*
|
||||
Depending on the type of data, it can be split into *fixed* or *variable*
|
||||
sized chunks.
|
||||
|
||||
Fixed sized chunking needs almost no CPU performance, and is used to
|
||||
Fixed sized chunking requires minimal CPU power, and is used to
|
||||
backup virtual machine images.
|
||||
|
||||
Variable sized chunking needs more CPU power, but is essential to get
|
||||
good deduplication rates for file archives.
|
||||
|
||||
The backup server supports both strategies.
|
||||
The Proxmox Backup Server supports both strategies.
|
||||
|
||||
|
||||
File Archives: ``<name>.pxar``
|
||||
@ -31,7 +30,7 @@ File Archives: ``<name>.pxar``
|
||||
.. see https://moinakg.wordpress.com/2013/06/22/high-performance-content-defined-chunking/
|
||||
|
||||
A file archive stores a full directory tree. Content is stored using
|
||||
the :ref:`pxar-format`, split into variable sized chunks. The format
|
||||
the :ref:`pxar-format`, split into variable-sized chunks. The format
|
||||
is optimized to achieve good deduplication rates.
|
||||
|
||||
|
||||
@ -39,7 +38,7 @@ Image Archives: ``<name>.img``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This is used for virtual machine images and other large binary
|
||||
data. Content is split into fixed sized chunks.
|
||||
data. Content is split into fixed-sized chunks.
|
||||
|
||||
|
||||
Binary Data (BLOBs)
|
||||
@ -56,7 +55,7 @@ Catalog File: ``catalog.pcat1``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The catalog file is an index for file archives. It contains
|
||||
the list of files and is used to speed-up search operations.
|
||||
the list of files and is used to speed up search operations.
|
||||
|
||||
|
||||
The Manifest: ``index.json``
|
||||
@ -74,12 +73,12 @@ The backup server groups backups by *type*, where *type* is one of:
|
||||
|
||||
``vm``
|
||||
This type is used for :term:`virtual machine`\ s. Typically
|
||||
contains the virtual machine's configuration and an image archive
|
||||
consists of the virtual machine's configuration file and an image archive
|
||||
for each disk.
|
||||
|
||||
``ct``
|
||||
This type is used for :term:`container`\ s. Contains the container's
|
||||
configuration and a single file archive for the container content.
|
||||
This type is used for :term:`container`\ s. Consists of the container's
|
||||
configuration and a single file archive for the filesystem content.
|
||||
|
||||
``host``
|
||||
This type is used for backups created from within the backed up machine.
|
||||
@ -90,7 +89,7 @@ The backup server groups backups by *type*, where *type* is one of:
|
||||
Backup ID
|
||||
~~~~~~~~~
|
||||
|
||||
An unique ID. Usually the virtual machine or container ID. ``host``
|
||||
A unique ID. Usually the virtual machine or container ID. ``host``
|
||||
type backups normally use the hostname.
|
||||
|
||||
|
||||
@ -122,6 +121,13 @@ uniquely identifies a specific backup within a datastore.
|
||||
As you can see, the time format is RFC3399_ with Coordinated
|
||||
Universal Time (UTC_, identified by the trailing *Z*).
|
||||
|
||||
Backup Server Management
|
||||
------------------------
|
||||
|
||||
The command line tool to configure and manage the backup server is called
|
||||
:command:`proxmox-backup-manager`.
|
||||
|
||||
|
||||
|
||||
:term:`DataStore`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
@ -134,20 +140,13 @@ Datastores are identified by a simple *ID*. You can configure it
|
||||
when setting up the backup server.
|
||||
|
||||
|
||||
Backup Server Management
|
||||
------------------------
|
||||
|
||||
The command line tool to configure and manage the backup server is called
|
||||
:command:`proxmox-backup-manager`.
|
||||
|
||||
|
||||
Datastore Configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
A :term:`datastore` is a place to store backups. You can configure
|
||||
multiple datastores. At least one datastore needs to be
|
||||
configured. The datastore is identified by a simple `name` and points
|
||||
to a directory.
|
||||
You can configure multiple datastores. Minimum one datastore needs to be
|
||||
configured. The datastore is identified by a simple `name` and points to a
|
||||
directory on the filesystem.
|
||||
|
||||
The following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
||||
|
||||
@ -179,17 +178,58 @@ Finally, it is possible to remove the datastore configuration:
|
||||
File Layout
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. todo:: Add datastore file layout example
|
||||
After creating a datastore, the following default layout will appear:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# ls -arilh /backup/disk1/store1
|
||||
276493 -rw-r--r-- 1 backup backup 0 Jul 8 12:35 .lock
|
||||
276490 drwxr-x--- 1 backup backup 1064960 Jul 8 12:35 .chunks
|
||||
|
||||
`.lock` is an empty file used for process locking.
|
||||
|
||||
The `.chunks` directory contains folders, starting from `0000` and taking hexadecimal values until `ffff`. These
|
||||
directories will store the chunked data after a backup operation has been executed.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# ls -arilh /backup/disk1/store1/.chunks
|
||||
545824 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 ffff
|
||||
545823 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffe
|
||||
415621 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffd
|
||||
415620 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffc
|
||||
353187 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffb
|
||||
344995 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffa
|
||||
144079 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff9
|
||||
144078 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff8
|
||||
144077 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff7
|
||||
...
|
||||
403180 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000c
|
||||
403179 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000b
|
||||
403177 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000a
|
||||
402530 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0009
|
||||
402513 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0008
|
||||
402509 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0007
|
||||
276509 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0006
|
||||
276508 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0005
|
||||
276507 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0004
|
||||
276501 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0003
|
||||
276499 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0002
|
||||
276498 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0001
|
||||
276494 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0000
|
||||
276489 drwxr-xr-x 3 backup backup 4.0K Jul 8 12:35 ..
|
||||
276490 drwxr-x--- 1 backup backup 1.1M Jul 8 12:35 .
|
||||
|
||||
|
||||
|
||||
User Management
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Proxmox Backup support several authentication realms, and you need to
|
||||
Proxmox Backup Server supports several authentication realms, and you need to
|
||||
choose the realm when you add a new user. Possible realms are:
|
||||
|
||||
:pam: Linux PAM standard authentication. Use this if you want to
|
||||
authenticate as Linux system user (Users needs to exist on the
|
||||
authenticate as Linux system user (Users need to exist on the
|
||||
system).
|
||||
|
||||
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
||||
@ -216,8 +256,8 @@ normally want to add other users with less privileges:
|
||||
|
||||
# proxmox-backup-manager user create john@pbs --email john@example.com
|
||||
|
||||
The create command lets you specify many option like ``--email`` or
|
||||
``--password``, but you can update or change any of them using the
|
||||
The create command lets you specify many options like ``--email`` or
|
||||
``--password``. You can update or change any of them using the
|
||||
update command later:
|
||||
|
||||
.. code-block:: console
|
||||
@ -225,11 +265,10 @@ update command later:
|
||||
# proxmox-backup-manager user update john@pbs --firstname John --lastname Smith
|
||||
# proxmox-backup-manager user update john@pbs --comment "An example user."
|
||||
|
||||
|
||||
.. todo:: Mention how to set password without passing plaintext password as cli argument.
|
||||
|
||||
|
||||
The resulting use list looks like this:
|
||||
The resulting user list looks like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -242,16 +281,16 @@ The resulting use list looks like this:
|
||||
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
||||
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
|
||||
|
||||
Newly created users do not have an permissions. Please read the next
|
||||
Newly created users do not have any permissions. Please read the next
|
||||
section to learn how to set access permissions.
|
||||
|
||||
If you want to disable an user account, you can do that by setting ``--enable`` to ``0``
|
||||
If you want to disable a user account, you can do that by setting ``--enable`` to ``0``
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager user update john@pbs --enable 0
|
||||
|
||||
Or completely remove the users with:
|
||||
Or completely remove the user with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -261,20 +300,20 @@ Or completely remove the users with:
|
||||
Access Control
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Users do not have any permission by default. Instead you need to
|
||||
specify what is allowed and what not. You can do this by assigning
|
||||
By default new users do not have any permission. Instead you need to
|
||||
specify what is allowed and what is not. You can do this by assigning
|
||||
roles to users on specific objects like datastores or remotes. The
|
||||
following roles exist:
|
||||
|
||||
**NoAccess**
|
||||
Disable Access - nothing is allowed.
|
||||
|
||||
**Admin**
|
||||
The Administrator can do anything.
|
||||
|
||||
**Audit**
|
||||
An Auditor can view things, but is not allowed to change settings.
|
||||
|
||||
**NoAccess**
|
||||
Disable Access - nothing is allowed.
|
||||
|
||||
**DatastoreAdmin**
|
||||
Can do anything on datastores.
|
||||
|
||||
@ -301,7 +340,6 @@ following roles exist:
|
||||
Is allowed to read data from a remote.
|
||||
|
||||
|
||||
|
||||
Backup Client usage
|
||||
-------------------
|
||||
|
||||
@ -316,8 +354,8 @@ on the backup server.
|
||||
|
||||
[[username@]server:]datastore
|
||||
|
||||
The default value for ``username`` ist ``root``. If no server is specified, the
|
||||
default is the local host (``localhost``).
|
||||
The default value for ``username`` ist ``root``. If no server is specified,
|
||||
the default is the local host (``localhost``).
|
||||
|
||||
You can pass the repository with the ``--repository`` command
|
||||
line option, or by setting the ``PBS_REPOSITORY`` environment
|
||||
@ -381,7 +419,7 @@ This section explains how to create a backup from within the machine. This can
|
||||
be a physical host, a virtual machine, or a container. Such backups may contain file
|
||||
and image archives. There are no restrictions in this case.
|
||||
|
||||
.. note:: If you want to backup virtual machines or containers on Proxmov VE, see :ref:`pve-integration`.
|
||||
.. note:: If you want to backup virtual machines or containers on Proxmox VE, see :ref:`pve-integration`.
|
||||
|
||||
For the following example you need to have a backup server set up, working
|
||||
credentials and need to know the repository name.
|
||||
@ -896,7 +934,3 @@ After that you should be able to see storage status with:
|
||||
.. include:: command-line-tools.rst
|
||||
|
||||
.. include:: services.rst
|
||||
|
||||
.. include host system admin at the end
|
||||
|
||||
.. include:: sysadmin.rst
|
||||
|
13
docs/conf.py
13
docs/conf.py
@ -17,7 +17,7 @@
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#
|
||||
# import os
|
||||
import os
|
||||
# import sys
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
@ -45,8 +45,11 @@ PygmentsBridge.latex_formatter = CustomLatexFormatter
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
|
||||
extensions = ["sphinx.ext.graphviz", "sphinx.ext.todo"]
|
||||
|
||||
todo_link_only = True
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
@ -76,9 +79,11 @@ author = 'Proxmox Support Team'
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '0.2'
|
||||
vstr = lambda s: '<devbuild>' if s is None else str(s)
|
||||
|
||||
version = vstr(os.getenv('DEB_VERSION_UPSTREAM'))
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '0.2-1'
|
||||
release = vstr(os.getenv('DEB_VERSION'))
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
@ -107,7 +112,7 @@ exclude_patterns = [
|
||||
'pxar/man1.rst',
|
||||
'epilog.rst',
|
||||
'pbs-copyright.rst',
|
||||
'sysadmin.rst',
|
||||
'local-zfs.rst'
|
||||
'package-repositories.rst',
|
||||
]
|
||||
|
||||
|
@ -11,8 +11,10 @@
|
||||
.. _Container: https://en.wikipedia.org/wiki/Container_(virtualization)
|
||||
.. _Zstandard: https://en.wikipedia.org/wiki/Zstandard
|
||||
.. _Proxmox: https://www.proxmox.com
|
||||
.. _Proxmox Community Forum: https://forum.proxmox.com
|
||||
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
||||
.. _Proxmox Backup: https://www.proxmox.com/proxmox-backup
|
||||
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
||||
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
||||
.. _Rust: https://www.rust-lang.org/
|
||||
.. _SHA-256: https://en.wikipedia.org/wiki/SHA-2
|
||||
|
@ -1,18 +1,15 @@
|
||||
.. Proxmox Backup documentation master file
|
||||
|
||||
Welcome to Proxmox Backup's documentation!
|
||||
==========================================
|
||||
Welcome to the Proxmox Backup documentation!
|
||||
============================================
|
||||
|
||||
Copyright (C) 2019 Proxmox Server Solutions GmbH
|
||||
Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
||||
|
||||
Permission is granted to copy, distribute and/or modify this document
|
||||
under the terms of the GNU Free Documentation License, Version 1.3 or
|
||||
any later version published by the Free Software Foundation; with no
|
||||
Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A
|
||||
copy of the license is included in the section entitled "GNU Free
|
||||
Documentation License".
|
||||
|
||||
.. todolist::
|
||||
Permission is granted to copy, distribute and/or modify this document under the
|
||||
terms of the GNU Free Documentation License, Version 1.3 or any later version
|
||||
published by the Free Software Foundation; with no Invariant Sections, no
|
||||
Front-Cover Texts, and no Back-Cover Texts. A copy of the license is included
|
||||
in the section entitled "GNU Free Documentation License".
|
||||
|
||||
|
||||
.. toctree::
|
||||
@ -22,6 +19,7 @@ Documentation License".
|
||||
introduction.rst
|
||||
installation.rst
|
||||
administration-guide.rst
|
||||
sysadmin.rst
|
||||
|
||||
.. raw:: latex
|
||||
|
||||
@ -37,5 +35,14 @@ Documentation License".
|
||||
glossary.rst
|
||||
GFDL.rst
|
||||
|
||||
.. only:: html and devbuild
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Developer Appendix
|
||||
|
||||
todos.rst
|
||||
|
||||
|
||||
* :ref:`genindex`
|
||||
|
||||
|
@ -1,101 +1,102 @@
|
||||
Introduction
|
||||
============
|
||||
|
||||
This documentation is written in :term:`reStructuredText` and formatted with :term:`Sphinx`.
|
||||
What is Proxmox Backup Server
|
||||
-----------------------------
|
||||
|
||||
Proxmox Backup Server is an enterprise-class client-server backup software that
|
||||
backups :term:`virtual machine`\ s, :term:`container`\ s, and physical hosts.
|
||||
It is specially optimized for the `Proxmox Virtual Environment`_ platform and
|
||||
allows you to backup your data securely, even between remote sites, providing
|
||||
easy management with a web-based user interface.
|
||||
|
||||
What is Proxmox Backup
|
||||
----------------------
|
||||
|
||||
Proxmox Backup is an enterprise class client-server backup software,
|
||||
specially optimized for the `Proxmox Virtual Environment`_ to backup
|
||||
:term:`virtual machine`\ s and :term:`container`\ s. It is also
|
||||
possible to backup physical hosts.
|
||||
|
||||
It supports deduplication, compression and authenticated encryption
|
||||
(AE_). Using :term:`Rust` as implementation language guarantees high
|
||||
Proxmox Backup Server supports deduplication, compression, and authenticated
|
||||
encryption (AE_). Using :term:`Rust` as implementation language guarantees high
|
||||
performance, low resource usage, and a safe, high quality code base.
|
||||
|
||||
Encryption is done at the client side. This makes backups to not fully
|
||||
trusted targets possible.
|
||||
It features strong encryption done on the client side. Thus, it's possible to
|
||||
backup data to not fully trusted targets.
|
||||
|
||||
|
||||
Architecture
|
||||
------------
|
||||
|
||||
Proxmox Backup uses a `Client-server model`_. The server is
|
||||
responsible to store the backup data and provides an API to create
|
||||
backups and restore data. It is possible to manage disks and
|
||||
other server side resources using this API.
|
||||
Proxmox Backup Server uses a `client-server model`_. The server stores the
|
||||
backup data and provides an API to create backups and restore data. With the
|
||||
API it's also possible to manage disks and other server side resources.
|
||||
|
||||
A backup client uses this API to access the backed up data,
|
||||
i.e. ``proxmox-backup-client`` is a command line tool to create
|
||||
backups and restore data. We deliver an integrated client for
|
||||
QEMU_ with `Proxmox Virtual Environment`_.
|
||||
The backup client uses this API to access the backed up data. With the command
|
||||
line tool ``proxmox-backup-client`` you can create backups and restore data.
|
||||
For QEMU_ with `Proxmox Virtual Environment`_ we deliver an integrated client.
|
||||
|
||||
A single backup is allowed to contain several archives. For example,
|
||||
when you backup a :term:`virtual machine`, each disk is stored as a
|
||||
separate archive inside that backup. The VM configuration also gets an
|
||||
extra file. This way, it is easy to access and restore important parts
|
||||
of the backup without having to scan the whole backup.
|
||||
A single backup is allowed to contain several archives. For example, when you
|
||||
backup a :term:`virtual machine`, each disk is stored as a separate archive
|
||||
inside that backup. The VM configuration itself is stored as an extra file.
|
||||
This way, it is easy to access and restore only important parts of the backup
|
||||
without the need to scan the whole backup.
|
||||
|
||||
|
||||
Main Features
|
||||
-------------
|
||||
|
||||
:Proxmox VE: The `Proxmox Virtual Environment`_ is fully
|
||||
supported. You can backup :term:`virtual machine`\ s and
|
||||
:Support for Proxmox VE: The `Proxmox Virtual Environment`_ is fully
|
||||
supported and you can easily backup :term:`virtual machine`\ s and
|
||||
:term:`container`\ s.
|
||||
|
||||
:GUI: We provide a graphical, web based user interface.
|
||||
:Performance: The whole software stack is written in :term:`Rust`,
|
||||
to provide high speed and memory efficiency.
|
||||
|
||||
:Deduplication: Incremental backups produce large amounts of duplicate
|
||||
data. The deduplication layer removes that redundancy and makes
|
||||
incremental backups small and space efficient.
|
||||
:Deduplication: Periodic backups produce large amounts of duplicate
|
||||
data. The deduplication layer avoids redundancy and minimizes the used
|
||||
storage space.
|
||||
|
||||
:Data Integrity: The built in `SHA-256`_ checksum algorithm assures the
|
||||
:Incremental backups: Changes between backups are typically low. Reading and
|
||||
sending only the delta reduces storage and network impact of backups.
|
||||
|
||||
:Data Integrity: The built-in `SHA-256`_ checksum algorithm assures the
|
||||
accuracy and consistency of your backups.
|
||||
|
||||
:Remote Sync: It is possible to efficiently synchronize data to remote
|
||||
sites. Only deltas containing new data are transferred.
|
||||
|
||||
:Performance: The whole software stack is written in :term:`Rust`,
|
||||
to provide high speed and memory efficiency.
|
||||
|
||||
:Compression: Ultra fast Zstandard_ compression is able to compress
|
||||
:Compression: The ultra fast Zstandard_ compression is able to compress
|
||||
several gigabytes of data per second.
|
||||
|
||||
:Encryption: Backups can be encrypted client-side using AES-256 in
|
||||
GCM_ mode. This authenticated encryption mode (AE_) provides very
|
||||
high performance on modern hardware.
|
||||
:Encryption: Backups can be encrypted on the client-side using AES-256 in
|
||||
Galois/Counter Mode (GCM_) mode. This authenticated encryption (AE_) mde
|
||||
provides very high performance on modern hardware.
|
||||
|
||||
:Open Source: No secrets. You have access to all the source code.
|
||||
:Web interface: Manage the Proxmox Backup Server with the integrated web-based
|
||||
user interface.
|
||||
|
||||
:Support: Commercial support options are available from `Proxmox`_.
|
||||
:Open Source: No secrets. Proxmox Backup Server is free and open-source
|
||||
software. The source code is licensed under AGPL, v3.
|
||||
|
||||
:Support: Enterprise support will be available from `Proxmox`_ once the beta
|
||||
phase is over.
|
||||
|
||||
|
||||
Why Backup?
|
||||
-----------
|
||||
Reasons for Data Backup?
|
||||
------------------------
|
||||
|
||||
The primary purpose of a backup is to protect against data loss. Data
|
||||
loss can be caused by faulty hardware, but also by human error.
|
||||
The main purpose of a backup is to protect against data loss. Data loss can be
|
||||
caused by faulty hardware but also by human error.
|
||||
|
||||
A common mistake is to delete a file or folder which is still
|
||||
required. Virtualization can amplify this problem. It is now
|
||||
easy to delete a whole virtual machine by pressing a single button.
|
||||
A common mistake is to accidentally delete a file or folder which is still
|
||||
required. Virtualization can even amplify this problem; it easily happens that
|
||||
a whole virtual machine is deleted by just pressing a single button.
|
||||
|
||||
Backups can serve as a toolkit for administrators to temporarily
|
||||
store data. For example, it is common practice to perform full backups
|
||||
before installing major software updates. If something goes wrong, you
|
||||
can restore the previous state.
|
||||
For administrators, backups can serve as a useful toolkit for temporarily
|
||||
storing data. For example, it is common practice to perform full backups before
|
||||
installing major software updates. If something goes wrong, you can easily
|
||||
restore the previous state.
|
||||
|
||||
Another reason for backups are legal requirements. Some data must be
|
||||
kept in a safe place for several years by law, so that it can be accessed if
|
||||
required.
|
||||
Another reason for backups are legal requirements. Some data, especially
|
||||
business records, must be kept in a safe place for several years by law, so
|
||||
that they can be accessed if required.
|
||||
|
||||
Data loss can be very costly as it can severely restrict your
|
||||
business. Therefore, make sure that you perform a backup regularly
|
||||
and run restore tests.
|
||||
In general, data loss is very costly as it can severely damage your business.
|
||||
Therefore, ensure that you perform regular backups and run restore tests.
|
||||
|
||||
|
||||
Software Stack
|
||||
@ -104,17 +105,43 @@ Software Stack
|
||||
.. todo:: Eplain why we use Rust (and Flutter)
|
||||
|
||||
|
||||
Getting Help
|
||||
------------
|
||||
|
||||
Community Support Forum
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
We always encourage our users to discuss and share their knowledge using the
|
||||
`Proxmox Community Forum`_. The forum is moderated by the Proxmox support team.
|
||||
The large user base is spread out all over the world. Needless to say that such
|
||||
a large forum is a great place to get information.
|
||||
|
||||
Mailing Lists
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Proxmox Backup Server is fully open-source and contributions are welcome! Here
|
||||
is the primary communication channel for developers:
|
||||
:Mailing list for developers: `PBS Development List`_
|
||||
|
||||
Bug Tracker
|
||||
~~~~~~~~~~~
|
||||
|
||||
Proxmox runs a public bug tracker at `<https://bugzilla.proxmox.com>`_. If an
|
||||
issue appears, file your report there. An issue can be a bug as well as a
|
||||
request for a new feature or enhancement. The bug tracker helps to keep track
|
||||
of the issue and will send a notification once it has been solved.
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Copyright (C) 2019 Proxmox Server Solutions GmbH
|
||||
Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
||||
|
||||
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
||||
|
||||
Proxmox Backup is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as
|
||||
published by the Free Software Foundation, either version 3 of the
|
||||
License, or (at your option) any later version.
|
||||
Proxmox Backup Server is free and open source software: you can use it,
|
||||
redistribute it, and/or modify it under the terms of the GNU Affero General
|
||||
Public License as published by the Free Software Foundation, either version 3
|
||||
of the License, or (at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
``WITHOUT ANY WARRANTY``; without even the implied warranty of
|
||||
|
401
docs/local-zfs.rst
Normal file
401
docs/local-zfs.rst
Normal file
@ -0,0 +1,401 @@
|
||||
ZFS on Linux
|
||||
------------
|
||||
|
||||
ZFS is a combined file system and logical volume manager designed by
|
||||
Sun Microsystems. There is no need to manually compile ZFS modules - all
|
||||
packages are included.
|
||||
|
||||
By using ZFS, it's possible to achieve maximum enterprise features with
|
||||
low budget hardware, but also high performance systems by leveraging
|
||||
SSD caching or even SSD only setups. ZFS can replace cost intense
|
||||
hardware raid cards by moderate CPU and memory load combined with easy
|
||||
management.
|
||||
|
||||
General ZFS advantages
|
||||
|
||||
* Easy configuration and management with GUI and CLI.
|
||||
* Reliable
|
||||
* Protection against data corruption
|
||||
* Data compression on file system level
|
||||
* Snapshots
|
||||
* Copy-on-write clone
|
||||
* Various raid levels: RAID0, RAID1, RAID10, RAIDZ-1, RAIDZ-2 and RAIDZ-3
|
||||
* Can use SSD for cache
|
||||
* Self healing
|
||||
* Continuous integrity checking
|
||||
* Designed for high storage capacities
|
||||
* Protection against data corruption
|
||||
* Asynchronous replication over network
|
||||
* Open Source
|
||||
* Encryption
|
||||
|
||||
Hardware
|
||||
~~~~~~~~~
|
||||
|
||||
ZFS depends heavily on memory, so you need at least 8GB to start. In
|
||||
practice, use as much you can get for your hardware/budget. To prevent
|
||||
data corruption, we recommend the use of high quality ECC RAM.
|
||||
|
||||
If you use a dedicated cache and/or log disk, you should use an
|
||||
enterprise class SSD (e.g. Intel SSD DC S3700 Series). This can
|
||||
increase the overall performance significantly.
|
||||
|
||||
IMPORTANT: Do not use ZFS on top of hardware controller which has its
|
||||
own cache management. ZFS needs to directly communicate with disks. An
|
||||
HBA adapter is the way to go, or something like LSI controller flashed
|
||||
in ``IT`` mode.
|
||||
|
||||
|
||||
ZFS Administration
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This section gives you some usage examples for common tasks. ZFS
|
||||
itself is really powerful and provides many options. The main commands
|
||||
to manage ZFS are `zfs` and `zpool`. Both commands come with great
|
||||
manual pages, which can be read with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# man zpool
|
||||
# man zfs
|
||||
|
||||
Create a new zpool
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To create a new pool, at least one disk is needed. The `ashift` should
|
||||
have the same sector-size (2 power of `ashift`) or larger as the
|
||||
underlying disk.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> <device>
|
||||
|
||||
Create a new pool with RAID-0
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Minimum 1 disk
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> <device1> <device2>
|
||||
|
||||
Create a new pool with RAID-1
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Minimum 2 disks
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> mirror <device1> <device2>
|
||||
|
||||
Create a new pool with RAID-10
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Minimum 4 disks
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> mirror <device1> <device2> mirror <device3> <device4>
|
||||
|
||||
Create a new pool with RAIDZ-1
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Minimum 3 disks
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> raidz1 <device1> <device2> <device3>
|
||||
|
||||
Create a new pool with RAIDZ-2
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Minimum 4 disks
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> raidz2 <device1> <device2> <device3> <device4>
|
||||
|
||||
Create a new pool with cache (L2ARC)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
It is possible to use a dedicated cache drive partition to increase
|
||||
the performance (use SSD).
|
||||
|
||||
As `<device>` it is possible to use more devices, like it's shown in
|
||||
"Create a new pool with RAID*".
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> <device> cache <cache_device>
|
||||
|
||||
Create a new pool with log (ZIL)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
It is possible to use a dedicated cache drive partition to increase
|
||||
the performance (SSD).
|
||||
|
||||
As `<device>` it is possible to use more devices, like it's shown in
|
||||
"Create a new pool with RAID*".
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> <device> log <log_device>
|
||||
|
||||
Add cache and log to an existing pool
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you have a pool without cache and log. First partition the SSD in
|
||||
2 partition with `parted` or `gdisk`
|
||||
|
||||
.. important:: Always use GPT partition tables.
|
||||
|
||||
The maximum size of a log device should be about half the size of
|
||||
physical memory, so this is usually quite small. The rest of the SSD
|
||||
can be used as cache.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool add -f <pool> log <device-part1> cache <device-part2>
|
||||
|
||||
|
||||
Changing a failed device
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool replace -f <pool> <old device> <new device>
|
||||
|
||||
|
||||
Changing a failed bootable device
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Depending on how Proxmox Backup was installed it is either using `grub` or `systemd-boot`
|
||||
as bootloader.
|
||||
|
||||
The first steps of copying the partition table, reissuing GUIDs and replacing
|
||||
the ZFS partition are the same. To make the system bootable from the new disk,
|
||||
different steps are needed which depend on the bootloader in use.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# sgdisk <healthy bootable device> -R <new device>
|
||||
# sgdisk -G <new device>
|
||||
# zpool replace -f <pool> <old zfs partition> <new zfs partition>
|
||||
|
||||
.. NOTE:: Use the `zpool status -v` command to monitor how far the resilvering process of the new disk has progressed.
|
||||
|
||||
With `systemd-boot`:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pve-efiboot-tool format <new disk's ESP>
|
||||
# pve-efiboot-tool init <new disk's ESP>
|
||||
|
||||
.. NOTE:: `ESP` stands for EFI System Partition, which is setup as partition #2 on
|
||||
bootable disks setup by the {pve} installer since version 5.4. For details, see
|
||||
xref:sysboot_systemd_boot_setup[Setting up a new partition for use as synced ESP].
|
||||
|
||||
With `grub`:
|
||||
|
||||
Usually `grub.cfg` is located in `/boot/grub/grub.cfg`
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# grub-install <new disk>
|
||||
# grub-mkconfig -o /path/to/grub.cfg
|
||||
|
||||
|
||||
Activate E-Mail Notification
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
ZFS comes with an event daemon, which monitors events generated by the
|
||||
ZFS kernel module. The daemon can also send emails on ZFS events like
|
||||
pool errors. Newer ZFS packages ship the daemon in a separate package,
|
||||
and you can install it using `apt-get`:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# apt-get install zfs-zed
|
||||
|
||||
To activate the daemon it is necessary to edit `/etc/zfs/zed.d/zed.rc` with your
|
||||
favourite editor, and uncomment the `ZED_EMAIL_ADDR` setting:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
ZED_EMAIL_ADDR="root"
|
||||
|
||||
Please note Proxmox Backup forwards mails to `root` to the email address
|
||||
configured for the root user.
|
||||
|
||||
IMPORTANT: The only setting that is required is `ZED_EMAIL_ADDR`. All
|
||||
other settings are optional.
|
||||
|
||||
Limit ZFS Memory Usage
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
It is good to use at most 50 percent (which is the default) of the
|
||||
system memory for ZFS ARC to prevent performance shortage of the
|
||||
host. Use your preferred editor to change the configuration in
|
||||
`/etc/modprobe.d/zfs.conf` and insert:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
options zfs zfs_arc_max=8589934592
|
||||
|
||||
This example setting limits the usage to 8GB.
|
||||
|
||||
.. IMPORTANT:: If your root file system is ZFS you must update your initramfs every time this value changes:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# update-initramfs -u
|
||||
|
||||
|
||||
SWAP on ZFS
|
||||
^^^^^^^^^^^
|
||||
|
||||
Swap-space created on a zvol may generate some troubles, like blocking the
|
||||
server or generating a high IO load, often seen when starting a Backup
|
||||
to an external Storage.
|
||||
|
||||
We strongly recommend to use enough memory, so that you normally do not
|
||||
run into low memory situations. Should you need or want to add swap, it is
|
||||
preferred to create a partition on a physical disk and use it as swapdevice.
|
||||
You can leave some space free for this purpose in the advanced options of the
|
||||
installer. Additionally, you can lower the `swappiness` value.
|
||||
A good value for servers is 10:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# sysctl -w vm.swappiness=10
|
||||
|
||||
To make the swappiness persistent, open `/etc/sysctl.conf` with
|
||||
an editor of your choice and add the following line:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
vm.swappiness = 10
|
||||
|
||||
.. table:: Linux kernel `swappiness` parameter values
|
||||
:widths:auto
|
||||
|
||||
==================== ===============================================================
|
||||
Value Strategy
|
||||
==================== ===============================================================
|
||||
vm.swappiness = 0 The kernel will swap only to avoid an 'out of memory' condition
|
||||
vm.swappiness = 1 Minimum amount of swapping without disabling it entirely.
|
||||
vm.swappiness = 10 Sometimes recommended to improve performance when sufficient memory exists in a system.
|
||||
vm.swappiness = 60 The default value.
|
||||
vm.swappiness = 100 The kernel will swap aggressively.
|
||||
==================== ===============================================================
|
||||
|
||||
ZFS Compression
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
To activate compression:
|
||||
.. code-block:: console
|
||||
|
||||
# zpool set compression=lz4 <pool>
|
||||
|
||||
We recommend using the `lz4` algorithm, since it adds very little CPU overhead.
|
||||
Other algorithms such as `lzjb` and `gzip-N` (where `N` is an integer `1-9` representing
|
||||
the compression ratio, 1 is fastest and 9 is best compression) are also available.
|
||||
Depending on the algorithm and how compressible the data is, having compression enabled can even increase
|
||||
I/O performance.
|
||||
|
||||
You can disable compression at any time with:
|
||||
.. code-block:: console
|
||||
|
||||
# zfs set compression=off <dataset>
|
||||
|
||||
Only new blocks will be affected by this change.
|
||||
|
||||
ZFS Special Device
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Since version 0.8.0 ZFS supports `special` devices. A `special` device in a
|
||||
pool is used to store metadata, deduplication tables, and optionally small
|
||||
file blocks.
|
||||
|
||||
A `special` device can improve the speed of a pool consisting of slow spinning
|
||||
hard disks with a lot of metadata changes. For example workloads that involve
|
||||
creating, updating or deleting a large number of files will benefit from the
|
||||
presence of a `special` device. ZFS datasets can also be configured to store
|
||||
whole small files on the `special` device which can further improve the
|
||||
performance. Use fast SSDs for the `special` device.
|
||||
|
||||
.. IMPORTANT:: The redundancy of the `special` device should match the one of the
|
||||
pool, since the `special` device is a point of failure for the whole pool.
|
||||
|
||||
.. WARNING:: Adding a `special` device to a pool cannot be undone!
|
||||
|
||||
Create a pool with `special` device and RAID-1:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> mirror <device1> <device2> special mirror <device3> <device4>
|
||||
|
||||
Adding a `special` device to an existing pool with RAID-1:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool add <pool> special mirror <device1> <device2>
|
||||
|
||||
ZFS datasets expose the `special_small_blocks=<size>` property. `size` can be
|
||||
`0` to disable storing small file blocks on the `special` device or a power of
|
||||
two in the range between `512B` to `128K`. After setting the property new file
|
||||
blocks smaller than `size` will be allocated on the `special` device.
|
||||
|
||||
.. IMPORTANT:: If the value for `special_small_blocks` is greater than or equal to
|
||||
the `recordsize` (default `128K`) of the dataset, *all* data will be written to
|
||||
the `special` device, so be careful!
|
||||
|
||||
Setting the `special_small_blocks` property on a pool will change the default
|
||||
value of that property for all child ZFS datasets (for example all containers
|
||||
in the pool will opt in for small file blocks).
|
||||
|
||||
Opt in for all file smaller than 4K-blocks pool-wide:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zfs set special_small_blocks=4K <pool>
|
||||
|
||||
Opt in for small file blocks for a single dataset:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zfs set special_small_blocks=4K <pool>/<filesystem>
|
||||
|
||||
Opt out from small file blocks for a single dataset:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zfs set special_small_blocks=0 <pool>/<filesystem>
|
||||
|
||||
Troubleshooting
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Corrupted cachefile
|
||||
|
||||
In case of a corrupted ZFS cachefile, some volumes may not be mounted during
|
||||
boot until mounted manually later.
|
||||
|
||||
For each pool, run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool set cachefile=/etc/zfs/zpool.cache POOLNAME
|
||||
|
||||
and afterwards update the `initramfs` by running:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# update-initramfs -u -k all
|
||||
|
||||
and finally reboot your node.
|
||||
|
||||
Sometimes the ZFS cachefile can get corrupted, and `zfs-import-cache.service`
|
||||
doesn't import the pools that aren't present in the cachefile.
|
||||
|
||||
Another workaround to this problem is enabling the `zfs-import-scan.service`,
|
||||
which searches and imports pools via device scanning (usually slower).
|
@ -3,17 +3,16 @@
|
||||
Debian Package Repositories
|
||||
---------------------------
|
||||
|
||||
All Debian based systems use APT_ as package
|
||||
management tool. The list of repositories is defined in
|
||||
``/etc/apt/sources.list`` and ``.list`` files found in the
|
||||
``/etc/apt/sources.d/`` directory. Updates can be installed directly with
|
||||
the ``apt`` command line tool, or via the GUI.
|
||||
All Debian based systems use APT_ as package management tool. The list of
|
||||
repositories is defined in ``/etc/apt/sources.list`` and ``.list`` files found
|
||||
in the ``/etc/apt/sources.d/`` directory. Updates can be installed directly
|
||||
with the ``apt`` command line tool, or via the GUI.
|
||||
|
||||
APT_ ``sources.list`` files list one package repository per line, with
|
||||
the most preferred source listed first. Empty lines are ignored and a
|
||||
``#`` character anywhere on a line marks the remainder of that line as a
|
||||
comment. The information available from the configured sources is
|
||||
acquired by ``apt update``.
|
||||
APT_ ``sources.list`` files list one package repository per line, with the most
|
||||
preferred source listed first. Empty lines are ignored and a ``#`` character
|
||||
anywhere on a line marks the remainder of that line as a comment. The
|
||||
information available from the configured sources is acquired by ``apt
|
||||
update``.
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list``
|
||||
@ -27,76 +26,87 @@ acquired by ``apt update``.
|
||||
|
||||
.. FIXME for 7.0: change security update suite to bullseye-security
|
||||
|
||||
In addition, Proxmox provides three different package repositories for
|
||||
the backup server binaries.
|
||||
In addition, you need a package repositories from Proxmox to get the backup
|
||||
server updates.
|
||||
|
||||
`Proxmox Backup`_ Enterprise Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
During the Proxmox Backup beta phase only one repository (pbstest) will be
|
||||
available. Once released, a Enterprise repository for production use and a
|
||||
no-subscription repository will be provided.
|
||||
|
||||
This is the default, stable, and recommended repository. It is available for
|
||||
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
||||
and is suitable for production use. The ``pbs-enterprise`` repository is
|
||||
enabled by default:
|
||||
.. comment
|
||||
`Proxmox Backup`_ Enterprise Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
|
||||
This will be the default, stable, and recommended repository. It is available for
|
||||
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
||||
and is suitable for production use. The ``pbs-enterprise`` repository is
|
||||
enabled by default:
|
||||
|
||||
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
||||
.. note:: During the Proxmox Backup beta phase only one repository (pbstest)
|
||||
will be available.
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
|
||||
|
||||
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
||||
|
||||
|
||||
To never miss important security fixes, the superuser (``root@pam`` user) is
|
||||
notified via email about new packages as soon as they are available. The
|
||||
change-log and details of each package can be viewed in the GUI (if available).
|
||||
To never miss important security fixes, the superuser (``root@pam`` user) is
|
||||
notified via email about new packages as soon as they are available. The
|
||||
change-log and details of each package can be viewed in the GUI (if available).
|
||||
|
||||
Please note that you need a valid subscription key to access this
|
||||
repository. More information regarding subscription levels and pricing can be
|
||||
found at https://www.proxmox.com/en/proxmox-backup/pricing.
|
||||
Please note that you need a valid subscription key to access this
|
||||
repository. More information regarding subscription levels and pricing can be
|
||||
found at https://www.proxmox.com/en/proxmox-backup/pricing.
|
||||
|
||||
.. note:: You can disable this repository by commenting out the above
|
||||
line using a `#` (at the start of the line). This prevents error
|
||||
messages if you do not have a subscription key. Please configure the
|
||||
``pbs-no-subscription`` repository in that case.
|
||||
.. note:: You can disable this repository by commenting out the above
|
||||
line using a `#` (at the start of the line). This prevents error
|
||||
messages if you do not have a subscription key. Please configure the
|
||||
``pbs-no-subscription`` repository in that case.
|
||||
|
||||
|
||||
`Proxmox Backup`_ No-Subscription Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
`Proxmox Backup`_ No-Subscription Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
As the name suggests, you do not need a subscription key to access
|
||||
this repository. It can be used for testing and non-production
|
||||
use. It is not recommended to use it on production servers, because these
|
||||
packages are not always heavily tested and validated.
|
||||
As the name suggests, you do not need a subscription key to access
|
||||
this repository. It can be used for testing and non-production
|
||||
use. It is not recommended to use it on production servers, because these
|
||||
packages are not always heavily tested and validated.
|
||||
|
||||
We recommend to configure this repository in ``/etc/apt/sources.list``.
|
||||
We recommend to configure this repository in ``/etc/apt/sources.list``.
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list``
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list``
|
||||
|
||||
deb http://ftp.debian.org/debian buster main contrib
|
||||
deb http://ftp.debian.org/debian buster-updates main contrib
|
||||
deb http://ftp.debian.org/debian buster main contrib
|
||||
deb http://ftp.debian.org/debian buster-updates main contrib
|
||||
|
||||
# PBS pbs-no-subscription repository provided by proxmox.com,
|
||||
# NOT recommended for production use
|
||||
deb http://download.proxmox.com/debian/bps buster pbs-no-subscription
|
||||
# PBS pbs-no-subscription repository provided by proxmox.com,
|
||||
# NOT recommended for production use
|
||||
deb http://download.proxmox.com/debian/pbs buster pbs-no-subscription
|
||||
|
||||
# security updates
|
||||
deb http://security.debian.org/debian-security buster/updates main contrib
|
||||
# security updates
|
||||
deb http://security.debian.org/debian-security buster/updates main contrib
|
||||
|
||||
|
||||
`Proxmox Backup`_ Test Repository
|
||||
`Proxmox Backup`_ Beta Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Finally, there is a repository called ``pbstest``. This one contains the
|
||||
latest packages and is heavily used by developers to test new
|
||||
During the public beta, there is a repository called ``pbstest``. This one
|
||||
contains the latest packages and is heavily used by developers to test new
|
||||
features.
|
||||
|
||||
.. warning:: the ``pbstest`` repository should (as the name implies)
|
||||
.. .. warning:: the ``pbstest`` repository should (as the name implies)
|
||||
only be used to test new features or bug fixes.
|
||||
|
||||
You can configure this using ``/etc/apt/sources.list`` by
|
||||
adding the following line:
|
||||
You can configure this using ``/etc/apt/sources.list`` by adding the following
|
||||
line:
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: sources.list entry for ``pbstest``
|
||||
|
||||
deb http://download.proxmox.com/debian/bps buster pbstest
|
||||
deb http://download.proxmox.com/debian/pbs buster pbstest
|
||||
|
||||
If you installed Proxmox Backup Server from the official beta ISO you should
|
||||
have this repository already configured in
|
||||
``/etc/apt/sources.list.d/pbstest-beta.list``
|
||||
|
@ -1,5 +1,5 @@
|
||||
Host System Administration
|
||||
--------------------------
|
||||
==========================
|
||||
|
||||
`Proxmox Backup`_ is based on the famous Debian_ Linux
|
||||
distribution. That means that you have access to the whole world of
|
||||
@ -23,8 +23,4 @@ either explain things which are different on `Proxmox Backup`_, or
|
||||
tasks which are commonly used on `Proxmox Backup`_. For other topics,
|
||||
please refer to the standard Debian documentation.
|
||||
|
||||
ZFS
|
||||
~~~
|
||||
|
||||
.. todo:: Add local ZFS admin guide (local.zfs.adoc)
|
||||
|
||||
.. include:: local-zfs.rst
|
||||
|
6
docs/todos.rst
Normal file
6
docs/todos.rst
Normal file
@ -0,0 +1,6 @@
|
||||
Documentation Todo List
|
||||
=======================
|
||||
|
||||
This is an auto-generated list of the todo references in the documentation.
|
||||
|
||||
.. todolist::
|
@ -7,7 +7,7 @@ DYNAMIC_UNITS := \
|
||||
proxmox-backup.service \
|
||||
proxmox-backup-proxy.service
|
||||
|
||||
all: $(UNITS) $(DYNAMIC_UNITS)
|
||||
all: $(UNITS) $(DYNAMIC_UNITS) pbstest-beta.list
|
||||
|
||||
clean:
|
||||
rm -f $(DYNAMIC_UNITS)
|
||||
|
1
etc/pbstest-beta.list
Normal file
1
etc/pbstest-beta.list
Normal file
@ -0,0 +1 @@
|
||||
deb http://download.proxmox.com/debian/pbs buster pbstest
|
@ -2,7 +2,7 @@
|
||||
Description=Proxmox Backup API Proxy Server
|
||||
Wants=network-online.target
|
||||
After=network.target
|
||||
Requires=proxmox-backup.service
|
||||
Wants=proxmox-backup.service
|
||||
After=proxmox-backup.service
|
||||
|
||||
[Service]
|
||||
|
@ -44,8 +44,8 @@ async fn run() -> Result<(), Error> {
|
||||
|
||||
let mut bytes = 0;
|
||||
for _ in 0..100 {
|
||||
let writer = DummyWriter { bytes: 0 };
|
||||
let writer = client.speedtest(writer).await?;
|
||||
let mut writer = DummyWriter { bytes: 0 };
|
||||
client.speedtest(&mut writer).await?;
|
||||
println!("Received {} bytes", writer.bytes);
|
||||
bytes += writer.bytes;
|
||||
}
|
||||
@ -59,8 +59,7 @@ async fn run() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
fn main() {
|
||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||
eprintln!("ERROR: {}", err);
|
||||
}
|
@ -17,7 +17,7 @@ async fn upload_speed() -> Result<usize, Error> {
|
||||
|
||||
let backup_time = chrono::Utc::now();
|
||||
|
||||
let client = BackupWriter::start(client, datastore, "host", "speedtest", backup_time, false).await?;
|
||||
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false).await?;
|
||||
|
||||
println!("start upload speed test");
|
||||
let res = client.upload_speedtest().await?;
|
@ -5,9 +5,11 @@ pub mod config;
|
||||
pub mod node;
|
||||
pub mod reader;
|
||||
mod subscription;
|
||||
pub mod status;
|
||||
pub mod types;
|
||||
pub mod version;
|
||||
pub mod pull;
|
||||
mod helpers;
|
||||
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::Router;
|
||||
@ -23,6 +25,7 @@ pub const SUBDIRS: SubdirMap = &[
|
||||
("nodes", &NODES_ROUTER),
|
||||
("pull", &pull::ROUTER),
|
||||
("reader", &reader::ROUTER),
|
||||
("status", &status::ROUTER),
|
||||
("subscription", &subscription::ROUTER),
|
||||
("version", &version::ROUTER),
|
||||
];
|
||||
|
@ -1,8 +1,8 @@
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
use chrono::{TimeZone, Local};
|
||||
use anyhow::{bail, Error};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use hyper::http::request::Parts;
|
||||
use hyper::{header, Body, Response, StatusCode};
|
||||
@ -13,17 +13,21 @@ use proxmox::api::{
|
||||
RpcEnvironment, RpcEnvironmentType, Permission, UserInformation};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
use proxmox::try_block;
|
||||
use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
|
||||
|
||||
use pxar::accessor::aio::Accessor;
|
||||
use pxar::EntryKind;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::api2::node::rrd::create_value_from_rrd;
|
||||
use crate::backup::*;
|
||||
use crate::config::datastore;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
use crate::tools;
|
||||
use crate::tools::{self, AsyncReaderStream, WrappedReaderStream};
|
||||
use crate::config::acl::{
|
||||
PRIV_DATASTORE_AUDIT,
|
||||
PRIV_DATASTORE_MODIFY,
|
||||
@ -42,32 +46,49 @@ fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> R
|
||||
|
||||
fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
|
||||
|
||||
let mut path = store.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(MANIFEST_BLOB_NAME);
|
||||
|
||||
let raw_data = file_get_contents(&path)?;
|
||||
let index_size = raw_data.len() as u64;
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
|
||||
let manifest = BackupManifest::try_from(blob)?;
|
||||
let (manifest, manifest_crypt_mode, index_size) = store.load_manifest(backup_dir)?;
|
||||
|
||||
let mut result = Vec::new();
|
||||
for item in manifest.files() {
|
||||
result.push(BackupContent {
|
||||
filename: item.filename.clone(),
|
||||
crypt_mode: Some(item.crypt_mode),
|
||||
size: Some(item.size),
|
||||
});
|
||||
}
|
||||
|
||||
result.push(BackupContent {
|
||||
filename: MANIFEST_BLOB_NAME.to_string(),
|
||||
crypt_mode: Some(manifest_crypt_mode),
|
||||
size: Some(index_size),
|
||||
});
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn get_all_snapshot_files(
|
||||
store: &DataStore,
|
||||
info: &BackupInfo,
|
||||
) -> Result<Vec<BackupContent>, Error> {
|
||||
let mut files = read_backup_index(&store, &info.backup_dir)?;
|
||||
|
||||
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
|
||||
acc.insert(item.filename.clone());
|
||||
acc
|
||||
});
|
||||
|
||||
for file in &info.files {
|
||||
if file_set.contains(file) { continue; }
|
||||
files.push(BackupContent {
|
||||
filename: file.to_string(),
|
||||
size: None,
|
||||
crypt_mode: None,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(files)
|
||||
}
|
||||
|
||||
fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
|
||||
|
||||
let mut group_hash = HashMap::new();
|
||||
@ -201,21 +222,9 @@ pub fn list_snapshot_files(
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
|
||||
|
||||
let mut files = read_backup_index(&datastore, &snapshot)?;
|
||||
|
||||
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
|
||||
|
||||
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
|
||||
acc.insert(item.filename.clone());
|
||||
acc
|
||||
});
|
||||
|
||||
for file in info.files {
|
||||
if file_set.contains(&file) { continue; }
|
||||
files.push(BackupContent { filename: file, size: None });
|
||||
}
|
||||
|
||||
Ok(files)
|
||||
get_all_snapshot_files(&datastore, &info)
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -336,25 +345,36 @@ pub fn list_snapshots (
|
||||
if owner != username { continue; }
|
||||
}
|
||||
|
||||
let mut result_item = SnapshotListItem {
|
||||
let mut size = None;
|
||||
|
||||
let files = match get_all_snapshot_files(&datastore, &info) {
|
||||
Ok(files) => {
|
||||
size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
|
||||
files
|
||||
},
|
||||
Err(err) => {
|
||||
eprintln!("error during snapshot file listing: '{}'", err);
|
||||
info
|
||||
.files
|
||||
.iter()
|
||||
.map(|x| BackupContent {
|
||||
filename: x.to_string(),
|
||||
size: None,
|
||||
crypt_mode: None,
|
||||
})
|
||||
.collect()
|
||||
},
|
||||
};
|
||||
|
||||
let result_item = SnapshotListItem {
|
||||
backup_type: group.backup_type().to_string(),
|
||||
backup_id: group.backup_id().to_string(),
|
||||
backup_time: info.backup_dir.backup_time().timestamp(),
|
||||
files: info.files,
|
||||
size: None,
|
||||
files,
|
||||
size,
|
||||
owner: Some(owner),
|
||||
};
|
||||
|
||||
if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) {
|
||||
let mut backup_size = 0;
|
||||
for item in index.iter() {
|
||||
if let Some(item_size) = item.size {
|
||||
backup_size += item_size;
|
||||
}
|
||||
}
|
||||
result_item.size = Some(backup_size);
|
||||
}
|
||||
|
||||
snapshots.push(result_item);
|
||||
}
|
||||
|
||||
@ -382,25 +402,92 @@ pub fn status(
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<StorageStatus, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
crate::tools::disks::disk_usage(&datastore.base_path())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme
|
||||
},
|
||||
)]
|
||||
/// Verify backups.
|
||||
///
|
||||
/// This function can verify a single backup snapshot, all backup from a backup group,
|
||||
/// or all backups in the datastore.
|
||||
pub fn verify(
|
||||
store: String,
|
||||
backup_type: Option<String>,
|
||||
backup_id: Option<String>,
|
||||
backup_time: Option<i64>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let base_path = datastore.base_path();
|
||||
let worker_id;
|
||||
|
||||
let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
|
||||
let mut backup_dir = None;
|
||||
let mut backup_group = None;
|
||||
|
||||
use nix::NixPath;
|
||||
match (backup_type, backup_id, backup_time) {
|
||||
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
|
||||
worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
|
||||
let dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
backup_dir = Some(dir);
|
||||
}
|
||||
(Some(backup_type), Some(backup_id), None) => {
|
||||
worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
|
||||
let group = BackupGroup::new(backup_type, backup_id);
|
||||
backup_group = Some(group);
|
||||
}
|
||||
(None, None, None) => {
|
||||
worker_id = store.clone();
|
||||
}
|
||||
_ => bail!("parameters do not spefify a backup group or snapshot"),
|
||||
}
|
||||
|
||||
let res = base_path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
|
||||
nix::errno::Errno::result(res)?;
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let bsize = stat.f_bsize as u64;
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"verify", Some(worker_id.clone()), &username, to_stdout, move |worker|
|
||||
{
|
||||
let success = if let Some(backup_dir) = backup_dir {
|
||||
verify_backup_dir(&datastore, &backup_dir, &worker)?
|
||||
} else if let Some(backup_group) = backup_group {
|
||||
verify_backup_group(&datastore, &backup_group, &worker)?
|
||||
} else {
|
||||
verify_all_backups(&datastore, &worker)?
|
||||
};
|
||||
if !success {
|
||||
bail!("verfication failed - please check the log for details");
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(StorageStatus {
|
||||
total: stat.f_blocks*bsize,
|
||||
used: (stat.f_blocks-stat.f_bfree)*bsize,
|
||||
avail: stat.f_bavail*bsize,
|
||||
})
|
||||
Ok(json!(upid_str))
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
@ -752,19 +839,22 @@ fn download_file(
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
|
||||
println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
|
||||
backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
|
||||
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(&file_name);
|
||||
|
||||
let file = tokio::fs::File::open(path)
|
||||
let file = tokio::fs::File::open(&path)
|
||||
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
|
||||
.await?;
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", &path, err);
|
||||
err
|
||||
});
|
||||
let body = Body::wrap_stream(payload);
|
||||
|
||||
// fixme: set other headers ?
|
||||
@ -776,6 +866,118 @@ fn download_file(
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&download_file_decoded),
|
||||
&ObjectSchema::new(
|
||||
"Download single decoded file from backup snapshot. Only works if it's not encrypted.",
|
||||
&sorted!([
|
||||
("store", false, &DATASTORE_SCHEMA),
|
||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||
("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
|
||||
]),
|
||||
)
|
||||
).access(None, &Permission::Privilege(
|
||||
&["datastore", "{store}"],
|
||||
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
|
||||
true)
|
||||
);
|
||||
|
||||
fn download_file_decoded(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
|
||||
async move {
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
|
||||
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
||||
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
|
||||
let files = read_backup_index(&datastore, &backup_dir)?;
|
||||
for file in files {
|
||||
if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
||||
bail!("cannot decode '{}' - is encrypted", file_name);
|
||||
}
|
||||
}
|
||||
|
||||
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(&file_name);
|
||||
|
||||
let extension = file_name.rsplitn(2, '.').next().unwrap();
|
||||
|
||||
let body = match extension {
|
||||
"didx" => {
|
||||
let index = DynamicIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||
Body::wrap_stream(AsyncReaderStream::new(reader)
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
||||
err
|
||||
}))
|
||||
},
|
||||
"fidx" => {
|
||||
let index = FixedIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||
Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
||||
err
|
||||
}))
|
||||
},
|
||||
"blob" => {
|
||||
let file = std::fs::File::open(&path)
|
||||
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))?;
|
||||
|
||||
Body::wrap_stream(
|
||||
WrappedReaderStream::new(DataBlobReader::new(file, None)?)
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
||||
err
|
||||
})
|
||||
)
|
||||
},
|
||||
extension => {
|
||||
bail!("cannot download '{}' files", extension);
|
||||
},
|
||||
};
|
||||
|
||||
// fixme: set other headers ?
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||
.body(body)
|
||||
.unwrap())
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&upload_backup_log),
|
||||
@ -846,6 +1048,212 @@ fn upload_backup_log(
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
"filepath": {
|
||||
description: "Base64 encoded path.",
|
||||
type: String,
|
||||
}
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
|
||||
},
|
||||
)]
|
||||
/// Get the entries of the given path of the catalog
|
||||
fn catalog(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
filepath: String,
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(CATALOG_NAME);
|
||||
|
||||
let index = DynamicIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
|
||||
let mut catalog_reader = CatalogReader::new(reader);
|
||||
let mut current = catalog_reader.root()?;
|
||||
let mut components = vec![];
|
||||
|
||||
|
||||
if filepath != "root" {
|
||||
components = base64::decode(filepath)?;
|
||||
if components.len() > 0 && components[0] == '/' as u8 {
|
||||
components.remove(0);
|
||||
}
|
||||
for component in components.split(|c| *c == '/' as u8) {
|
||||
if let Some(entry) = catalog_reader.lookup(¤t, component)? {
|
||||
current = entry;
|
||||
} else {
|
||||
bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut res = Vec::new();
|
||||
|
||||
for direntry in catalog_reader.read_dir(¤t)? {
|
||||
let mut components = components.clone();
|
||||
components.push('/' as u8);
|
||||
components.extend(&direntry.name);
|
||||
let path = base64::encode(components);
|
||||
let text = String::from_utf8_lossy(&direntry.name);
|
||||
let mut entry = json!({
|
||||
"filepath": path,
|
||||
"text": text,
|
||||
"type": CatalogEntryType::from(&direntry.attr).to_string(),
|
||||
"leaf": true,
|
||||
});
|
||||
match direntry.attr {
|
||||
DirEntryAttribute::Directory { start: _ } => {
|
||||
entry["leaf"] = false.into();
|
||||
},
|
||||
DirEntryAttribute::File { size, mtime } => {
|
||||
entry["size"] = size.into();
|
||||
entry["mtime"] = mtime.into();
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
res.push(entry);
|
||||
}
|
||||
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&pxar_file_download),
|
||||
&ObjectSchema::new(
|
||||
"Download single file from pxar file of a bacup snapshot. Only works if it's not encrypted.",
|
||||
&sorted!([
|
||||
("store", false, &DATASTORE_SCHEMA),
|
||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||
("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
|
||||
]),
|
||||
)
|
||||
).access(None, &Permission::Privilege(
|
||||
&["datastore", "{store}"],
|
||||
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
|
||||
true)
|
||||
);
|
||||
|
||||
fn pxar_file_download(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
|
||||
async move {
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
|
||||
let filepath = tools::required_string_param(¶m, "filepath")?.to_owned();
|
||||
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
|
||||
let mut components = base64::decode(&filepath)?;
|
||||
if components.len() > 0 && components[0] == '/' as u8 {
|
||||
components.remove(0);
|
||||
}
|
||||
|
||||
let mut split = components.splitn(2, |c| *c == '/' as u8);
|
||||
let pxar_name = split.next().unwrap();
|
||||
let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
|
||||
|
||||
path.push(OsStr::from_bytes(&pxar_name));
|
||||
|
||||
let index = DynamicIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let archive_size = reader.archive_size();
|
||||
let reader = LocalDynamicReadAt::new(reader);
|
||||
|
||||
let decoder = Accessor::new(reader, archive_size).await?;
|
||||
let root = decoder.open_root().await?;
|
||||
let file = root
|
||||
.lookup(OsStr::from_bytes(file_path)).await?
|
||||
.ok_or(format_err!("error opening '{:?}'", file_path))?;
|
||||
|
||||
let file = match file.kind() {
|
||||
EntryKind::File { .. } => file,
|
||||
EntryKind::Hardlink(_) => {
|
||||
decoder.follow_hardlink(&file).await?
|
||||
},
|
||||
// TODO symlink
|
||||
other => bail!("cannot download file of type {:?}", other),
|
||||
};
|
||||
|
||||
let body = Body::wrap_stream(
|
||||
AsyncReaderStream::new(file.contents().await?)
|
||||
.map_err(move |err| {
|
||||
eprintln!("error during streaming of '{:?}' - {}", filepath, err);
|
||||
err
|
||||
})
|
||||
);
|
||||
|
||||
// fixme: set other headers ?
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||
.body(body)
|
||||
.unwrap())
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -872,10 +1280,8 @@ fn get_rrd_stats(
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let rrd_dir = format!("datastore/{}", store);
|
||||
|
||||
crate::rrd::extract_data(
|
||||
&rrd_dir,
|
||||
create_value_from_rrd(
|
||||
&format!("datastore/{}", store),
|
||||
&[
|
||||
"total", "used",
|
||||
"read_ios", "read_bytes",
|
||||
@ -889,11 +1295,21 @@ fn get_rrd_stats(
|
||||
|
||||
#[sortable]
|
||||
const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
"catalog",
|
||||
&Router::new()
|
||||
.get(&API_METHOD_CATALOG)
|
||||
),
|
||||
(
|
||||
"download",
|
||||
&Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_FILE)
|
||||
),
|
||||
(
|
||||
"download-decoded",
|
||||
&Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_FILE_DECODED)
|
||||
),
|
||||
(
|
||||
"files",
|
||||
&Router::new()
|
||||
@ -915,6 +1331,11 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||
&Router::new()
|
||||
.post(&API_METHOD_PRUNE)
|
||||
),
|
||||
(
|
||||
"pxar-file-download",
|
||||
&Router::new()
|
||||
.download(&API_METHOD_PXAR_FILE_DOWNLOAD)
|
||||
),
|
||||
(
|
||||
"rrd",
|
||||
&Router::new()
|
||||
@ -936,6 +1357,11 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||
&Router::new()
|
||||
.upload(&API_METHOD_UPLOAD_BACKUP_LOG)
|
||||
),
|
||||
(
|
||||
"verify",
|
||||
&Router::new()
|
||||
.post(&API_METHOD_VERIFY)
|
||||
),
|
||||
];
|
||||
|
||||
const DATASTORE_INFO_ROUTER: Router = Router::new()
|
||||
|
@ -10,7 +10,7 @@ use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironm
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
|
||||
use crate::tools::{self, WrappedReaderStream};
|
||||
use crate::tools;
|
||||
use crate::server::{WorkerTask, H2Service};
|
||||
use crate::backup::*;
|
||||
use crate::api2::types::*;
|
||||
@ -199,7 +199,6 @@ pub const BACKUP_API_SUBDIRS: SubdirMap = &[
|
||||
),
|
||||
(
|
||||
"dynamic_index", &Router::new()
|
||||
.download(&API_METHOD_DYNAMIC_CHUNK_INDEX)
|
||||
.post(&API_METHOD_CREATE_DYNAMIC_INDEX)
|
||||
.put(&API_METHOD_DYNAMIC_APPEND)
|
||||
),
|
||||
@ -222,10 +221,13 @@ pub const BACKUP_API_SUBDIRS: SubdirMap = &[
|
||||
),
|
||||
(
|
||||
"fixed_index", &Router::new()
|
||||
.download(&API_METHOD_FIXED_CHUNK_INDEX)
|
||||
.post(&API_METHOD_CREATE_FIXED_INDEX)
|
||||
.put(&API_METHOD_FIXED_APPEND)
|
||||
),
|
||||
(
|
||||
"previous", &Router::new()
|
||||
.download(&API_METHOD_DOWNLOAD_PREVIOUS)
|
||||
),
|
||||
(
|
||||
"speedtest", &Router::new()
|
||||
.upload(&API_METHOD_UPLOAD_SPEEDTEST)
|
||||
@ -284,6 +286,8 @@ pub const API_METHOD_CREATE_FIXED_INDEX: ApiMethod = ApiMethod::new(
|
||||
.minimum(1)
|
||||
.schema()
|
||||
),
|
||||
("reuse-csum", true, &StringSchema::new("If set, compare last backup's \
|
||||
csum and reuse index for incremental backup if it matches.").schema()),
|
||||
]),
|
||||
)
|
||||
);
|
||||
@ -296,10 +300,9 @@ fn create_fixed_index(
|
||||
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
println!("PARAM: {:?}", param);
|
||||
|
||||
let name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||
let size = tools::required_integer_param(¶m, "size")? as usize;
|
||||
let reuse_csum = param["reuse-csum"].as_str();
|
||||
|
||||
let archive_name = name.clone();
|
||||
if !archive_name.ends_with(".fidx") {
|
||||
@ -307,12 +310,49 @@ fn create_fixed_index(
|
||||
}
|
||||
|
||||
let mut path = env.backup_dir.relative_path();
|
||||
path.push(archive_name);
|
||||
path.push(&archive_name);
|
||||
|
||||
let chunk_size = 4096*1024; // todo: ??
|
||||
|
||||
let index = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
|
||||
let wid = env.register_fixed_writer(index, name, size, chunk_size as u32)?;
|
||||
// do incremental backup if csum is set
|
||||
let mut reader = None;
|
||||
let mut incremental = false;
|
||||
if let Some(csum) = reuse_csum {
|
||||
incremental = true;
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => {
|
||||
bail!("cannot reuse index - no previous backup exists");
|
||||
}
|
||||
};
|
||||
|
||||
let mut last_path = last_backup.backup_dir.relative_path();
|
||||
last_path.push(&archive_name);
|
||||
|
||||
let index = match env.datastore.open_fixed_reader(last_path) {
|
||||
Ok(index) => index,
|
||||
Err(_) => {
|
||||
bail!("cannot reuse index - no previous backup exists for archive");
|
||||
}
|
||||
};
|
||||
|
||||
let (old_csum, _) = index.compute_csum();
|
||||
let old_csum = proxmox::tools::digest_to_hex(&old_csum);
|
||||
if old_csum != csum {
|
||||
bail!("expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
|
||||
csum, old_csum);
|
||||
}
|
||||
|
||||
reader = Some(index);
|
||||
}
|
||||
|
||||
let mut writer = env.datastore.create_fixed_writer(&path, size, chunk_size)?;
|
||||
|
||||
if let Some(reader) = reader {
|
||||
writer.clone_data_from(&reader)?;
|
||||
}
|
||||
|
||||
let wid = env.register_fixed_writer(writer, name, size, chunk_size as u32, incremental)?;
|
||||
|
||||
env.log(format!("created new fixed index {} ({:?})", wid, path));
|
||||
|
||||
@ -520,15 +560,15 @@ pub const API_METHOD_CLOSE_FIXED_INDEX: ApiMethod = ApiMethod::new(
|
||||
(
|
||||
"chunk-count",
|
||||
false,
|
||||
&IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
|
||||
.minimum(1)
|
||||
&IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks. Ignored for incremental backups.")
|
||||
.minimum(0)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"size",
|
||||
false,
|
||||
&IntegerSchema::new("File size. This is used to verify that the server got all data.")
|
||||
.minimum(1)
|
||||
&IntegerSchema::new("File size. This is used to verify that the server got all data. Ignored for incremental backups.")
|
||||
.minimum(0)
|
||||
.schema()
|
||||
),
|
||||
("csum", false, &StringSchema::new("Digest list checksum.").schema()),
|
||||
@ -572,20 +612,17 @@ fn finish_backup (
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_DYNAMIC_CHUNK_INDEX: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&dynamic_chunk_index),
|
||||
pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&download_previous),
|
||||
&ObjectSchema::new(
|
||||
r###"
|
||||
Download the dynamic chunk index from the previous backup.
|
||||
Simply returns an empty list if this is the first backup.
|
||||
"### ,
|
||||
"Download archive from previous backup.",
|
||||
&sorted!([
|
||||
("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA)
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
fn dynamic_chunk_index(
|
||||
fn download_previous(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
@ -598,130 +635,38 @@ fn dynamic_chunk_index(
|
||||
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||
|
||||
if !archive_name.ends_with(".didx") {
|
||||
bail!("wrong archive extension: '{}'", archive_name);
|
||||
}
|
||||
|
||||
let empty_response = {
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::empty())?
|
||||
};
|
||||
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => return Ok(empty_response),
|
||||
None => bail!("no previous backup"),
|
||||
};
|
||||
|
||||
let mut path = last_backup.backup_dir.relative_path();
|
||||
let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);
|
||||
path.push(&archive_name);
|
||||
|
||||
let index = match env.datastore.open_dynamic_reader(path) {
|
||||
Ok(index) => index,
|
||||
Err(_) => {
|
||||
env.log(format!("there is no last backup for archive '{}'", archive_name));
|
||||
return Ok(empty_response);
|
||||
{
|
||||
let index: Option<Box<dyn IndexFile>> = match archive_type(&archive_name)? {
|
||||
ArchiveType::FixedIndex => {
|
||||
let index = env.datastore.open_fixed_reader(&path)?;
|
||||
Some(Box::new(index))
|
||||
}
|
||||
ArchiveType::DynamicIndex => {
|
||||
let index = env.datastore.open_dynamic_reader(&path)?;
|
||||
Some(Box::new(index))
|
||||
}
|
||||
_ => { None }
|
||||
};
|
||||
if let Some(index) = index {
|
||||
env.log(format!("register chunks in '{}' from previous backup.", archive_name));
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let size = info.range.end - info.range.start;
|
||||
env.register_chunk(info.digest, size as u32)?;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
env.log(format!("download last backup index for archive '{}'", archive_name));
|
||||
|
||||
let count = index.index_count();
|
||||
for pos in 0..count {
|
||||
let (start, end, digest) = index.chunk_info(pos)?;
|
||||
let size = (end - start) as u32;
|
||||
env.register_chunk(digest, size)?;
|
||||
}
|
||||
|
||||
let reader = DigestListEncoder::new(Box::new(index));
|
||||
|
||||
let stream = WrappedReaderStream::new(reader);
|
||||
|
||||
// fixme: set size, content type?
|
||||
let response = http::Response::builder()
|
||||
.status(200)
|
||||
.body(Body::wrap_stream(stream))?;
|
||||
|
||||
Ok(response)
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_FIXED_CHUNK_INDEX: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&fixed_chunk_index),
|
||||
&ObjectSchema::new(
|
||||
r###"
|
||||
Download the fixed chunk index from the previous backup.
|
||||
Simply returns an empty list if this is the first backup.
|
||||
"### ,
|
||||
&sorted!([
|
||||
("archive-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA)
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
fn fixed_chunk_index(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
|
||||
async move {
|
||||
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?.to_owned();
|
||||
|
||||
if !archive_name.ends_with(".fidx") {
|
||||
bail!("wrong archive extension: '{}'", archive_name);
|
||||
}
|
||||
|
||||
let empty_response = {
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::empty())?
|
||||
};
|
||||
|
||||
let last_backup = match &env.last_backup {
|
||||
Some(info) => info,
|
||||
None => return Ok(empty_response),
|
||||
};
|
||||
|
||||
let mut path = last_backup.backup_dir.relative_path();
|
||||
path.push(&archive_name);
|
||||
|
||||
let index = match env.datastore.open_fixed_reader(path) {
|
||||
Ok(index) => index,
|
||||
Err(_) => {
|
||||
env.log(format!("there is no last backup for archive '{}'", archive_name));
|
||||
return Ok(empty_response);
|
||||
}
|
||||
};
|
||||
|
||||
env.log(format!("download last backup index for archive '{}'", archive_name));
|
||||
|
||||
let count = index.index_count();
|
||||
let image_size = index.index_bytes();
|
||||
for pos in 0..count {
|
||||
let digest = index.index_digest(pos).unwrap();
|
||||
// Note: last chunk can be smaller
|
||||
let start = (pos*index.chunk_size) as u64;
|
||||
let mut end = start + index.chunk_size as u64;
|
||||
if end > image_size { end = image_size; }
|
||||
let size = (end - start) as u32;
|
||||
env.register_chunk(*digest, size)?;
|
||||
}
|
||||
|
||||
let reader = DigestListEncoder::new(Box::new(index));
|
||||
|
||||
let stream = WrappedReaderStream::new(reader);
|
||||
|
||||
// fixme: set size, content type?
|
||||
let response = http::Response::builder()
|
||||
.status(200)
|
||||
.body(Body::wrap_stream(stream))?;
|
||||
|
||||
Ok(response)
|
||||
env.log(format!("download '{}' from previous backup.", archive_name));
|
||||
crate::api2::helpers::create_download_response(path).await
|
||||
}.boxed()
|
||||
}
|
||||
|
@ -47,6 +47,7 @@ struct FixedWriterState {
|
||||
chunk_count: u64,
|
||||
small_chunk_count: usize, // allow 0..1 small chunks (last chunk may be smaller)
|
||||
upload_stat: UploadStatistic,
|
||||
incremental: bool,
|
||||
}
|
||||
|
||||
struct SharedBackupState {
|
||||
@ -237,7 +238,7 @@ impl BackupEnvironment {
|
||||
}
|
||||
|
||||
/// Store the writer with an unique ID
|
||||
pub fn register_fixed_writer(&self, index: FixedIndexWriter, name: String, size: usize, chunk_size: u32) -> Result<usize, Error> {
|
||||
pub fn register_fixed_writer(&self, index: FixedIndexWriter, name: String, size: usize, chunk_size: u32, incremental: bool) -> Result<usize, Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
@ -245,7 +246,7 @@ impl BackupEnvironment {
|
||||
let uid = state.next_uid();
|
||||
|
||||
state.fixed_writers.insert(uid, FixedWriterState {
|
||||
index, name, chunk_count: 0, size, chunk_size, small_chunk_count: 0, upload_stat: UploadStatistic::new(),
|
||||
index, name, chunk_count: 0, size, chunk_size, small_chunk_count: 0, upload_stat: UploadStatistic::new(), incremental,
|
||||
});
|
||||
|
||||
Ok(uid)
|
||||
@ -310,7 +311,13 @@ impl BackupEnvironment {
|
||||
|
||||
self.log(format!("Upload size: {} ({}%)", upload_stat.size, (upload_stat.size*100)/size));
|
||||
|
||||
let client_side_duplicates = chunk_count - upload_stat.count;
|
||||
// account for zero chunk, which might be uploaded but never used
|
||||
let client_side_duplicates = if chunk_count < upload_stat.count {
|
||||
0
|
||||
} else {
|
||||
chunk_count - upload_stat.count
|
||||
};
|
||||
|
||||
let server_side_duplicates = upload_stat.duplicates;
|
||||
|
||||
if (client_side_duplicates + server_side_duplicates) > 0 {
|
||||
@ -373,21 +380,22 @@ impl BackupEnvironment {
|
||||
bail!("fixed writer '{}' close failed - received wrong number of chunk ({} != {})", data.name, data.chunk_count, chunk_count);
|
||||
}
|
||||
|
||||
let expected_count = data.index.index_length();
|
||||
if !data.incremental {
|
||||
let expected_count = data.index.index_length();
|
||||
|
||||
if chunk_count != (expected_count as u64) {
|
||||
bail!("fixed writer '{}' close failed - unexpected chunk count ({} != {})", data.name, expected_count, chunk_count);
|
||||
}
|
||||
if chunk_count != (expected_count as u64) {
|
||||
bail!("fixed writer '{}' close failed - unexpected chunk count ({} != {})", data.name, expected_count, chunk_count);
|
||||
}
|
||||
|
||||
if size != (data.size as u64) {
|
||||
bail!("fixed writer '{}' close failed - unexpected file size ({} != {})", data.name, data.size, size);
|
||||
if size != (data.size as u64) {
|
||||
bail!("fixed writer '{}' close failed - unexpected file size ({} != {})", data.name, data.size, size);
|
||||
}
|
||||
}
|
||||
|
||||
let uuid = data.index.uuid;
|
||||
|
||||
let expected_csum = data.index.close()?;
|
||||
|
||||
println!("server checksum {:?} client: {:?}", expected_csum, csum);
|
||||
println!("server checksum: {:?} client: {:?} (incremental: {})", expected_csum, csum, data.incremental);
|
||||
if csum != expected_csum {
|
||||
bail!("fixed writer '{}' close failed - got unexpected checksum", data.name);
|
||||
}
|
||||
@ -430,8 +438,6 @@ impl BackupEnvironment {
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
state.finished = true;
|
||||
|
||||
if state.dynamic_writers.len() != 0 {
|
||||
bail!("found open index writer - unable to finish backup");
|
||||
}
|
||||
@ -440,6 +446,8 @@ impl BackupEnvironment {
|
||||
bail!("backup does not contain valid files (file count == 0)");
|
||||
}
|
||||
|
||||
state.finished = true;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
23
src/api2/helpers.rs
Normal file
23
src/api2/helpers.rs
Normal file
@ -0,0 +1,23 @@
|
||||
use std::path::PathBuf;
|
||||
use anyhow::Error;
|
||||
use futures::*;
|
||||
use hyper::{Body, Response, StatusCode, header};
|
||||
use proxmox::http_err;
|
||||
|
||||
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
|
||||
let file = tokio::fs::File::open(path.clone())
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path.clone(), err)))
|
||||
.await?;
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||
|
||||
let body = Body::wrap_stream(payload);
|
||||
|
||||
// fixme: set other headers ?
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||
.body(body)
|
||||
.unwrap())
|
||||
}
|
@ -9,9 +9,11 @@ mod syslog;
|
||||
mod journal;
|
||||
mod services;
|
||||
mod status;
|
||||
mod rrd;
|
||||
pub(crate) mod rrd;
|
||||
pub mod disks;
|
||||
|
||||
pub const SUBDIRS: SubdirMap = &[
|
||||
("disks", &disks::ROUTER),
|
||||
("dns", &dns::ROUTER),
|
||||
("journal", &journal::ROUTER),
|
||||
("network", &network::ROUTER),
|
||||
|
188
src/api2/node/disks.rs
Normal file
188
src/api2/node/disks.rs
Normal file
@ -0,0 +1,188 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, Permission, RpcEnvironment, RpcEnvironmentType};
|
||||
use proxmox::api::router::{Router, SubdirMap};
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::{list_subdirs_api_method};
|
||||
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::tools::disks::{
|
||||
DiskUsageInfo, DiskUsageType, DiskManage, SmartData,
|
||||
get_disks, get_smart_data, get_disk_usage_info, inititialize_gpt_disk,
|
||||
};
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::{UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
|
||||
|
||||
pub mod directory;
|
||||
pub mod zfs;
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
skipsmart: {
|
||||
description: "Skip smart checks.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"usage-type": {
|
||||
type: DiskUsageType,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "Local disk list.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: DiskUsageInfo,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List local disks
|
||||
pub fn list_disks(
|
||||
skipsmart: bool,
|
||||
usage_type: Option<DiskUsageType>,
|
||||
) -> Result<Vec<DiskUsageInfo>, Error> {
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (_, info) in get_disks(None, skipsmart)? {
|
||||
if let Some(ref usage_type) = usage_type {
|
||||
if info.used == *usage_type {
|
||||
list.push(info);
|
||||
}
|
||||
} else {
|
||||
list.push(info);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
healthonly: {
|
||||
description: "If true returns only the health status.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
type: SmartData,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get SMART attributes and health of a disk.
|
||||
pub fn smart_status(
|
||||
disk: String,
|
||||
healthonly: Option<bool>,
|
||||
) -> Result<SmartData, Error> {
|
||||
|
||||
let healthonly = healthonly.unwrap_or(false);
|
||||
|
||||
let manager = DiskManage::new();
|
||||
let disk = manager.disk_by_name(&disk)?;
|
||||
get_smart_data(&disk, healthonly)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
uuid: {
|
||||
description: "UUID for the GPT table.",
|
||||
type: String,
|
||||
optional: true,
|
||||
max_length: 36,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Initialize empty Disk with GPT
|
||||
pub fn initialize_disk(
|
||||
disk: String,
|
||||
uuid: Option<String>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let info = get_disk_usage_info(&disk, true)?;
|
||||
|
||||
if info.used != DiskUsageType::Unused {
|
||||
bail!("disk '{}' is already in use.", disk);
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"diskinit", Some(disk.clone()), &username.clone(), to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("initialize disk {}", disk));
|
||||
|
||||
let disk_manager = DiskManage::new();
|
||||
let disk_info = disk_manager.disk_by_name(&disk)?;
|
||||
|
||||
inititialize_gpt_disk(&disk_info, uuid.as_deref())?;
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(json!(upid_str))
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const SUBDIRS: SubdirMap = &sorted!([
|
||||
// ("lvm", &lvm::ROUTER),
|
||||
("directory", &directory::ROUTER),
|
||||
("zfs", &zfs::ROUTER),
|
||||
(
|
||||
"initgpt", &Router::new()
|
||||
.post(&API_METHOD_INITIALIZE_DISK)
|
||||
),
|
||||
(
|
||||
"list", &Router::new()
|
||||
.get(&API_METHOD_LIST_DISKS)
|
||||
),
|
||||
(
|
||||
"smart", &Router::new()
|
||||
.get(&API_METHOD_SMART_STATUS)
|
||||
),
|
||||
]);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
221
src/api2/node/disks/directory.rs
Normal file
221
src/api2/node/disks/directory.rs
Normal file
@ -0,0 +1,221 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::json;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Permission, RpcEnvironment, RpcEnvironmentType};
|
||||
use proxmox::api::section_config::SectionConfigData;
|
||||
use proxmox::api::router::Router;
|
||||
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::tools::disks::{
|
||||
DiskManage, FileSystemType, DiskUsageType,
|
||||
create_file_system, create_single_linux_partition, get_fs_uuid, get_disk_usage_info,
|
||||
};
|
||||
use crate::tools::systemd::{self, types::*};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::*;
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"filesystem": {
|
||||
type: FileSystemType,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// Datastore mount info.
|
||||
pub struct DatastoreMountInfo {
|
||||
/// The path of the mount unit.
|
||||
pub unitfile: String,
|
||||
/// The mount path.
|
||||
pub path: String,
|
||||
/// The mounted device.
|
||||
pub device: String,
|
||||
/// File system type
|
||||
pub filesystem: Option<String>,
|
||||
/// Mount options
|
||||
pub options: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
}
|
||||
},
|
||||
returns: {
|
||||
description: "List of systemd datastore mount units.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: DatastoreMountInfo,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List systemd datastore mount units.
|
||||
pub fn list_datastore_mounts() -> Result<Vec<DatastoreMountInfo>, Error> {
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref MOUNT_NAME_REGEX: regex::Regex = regex::Regex::new(r"^mnt-datastore-(.+)\.mount$").unwrap();
|
||||
}
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
let basedir = "/etc/systemd/system";
|
||||
for item in crate::tools::fs::scan_subdir(libc::AT_FDCWD, basedir, &MOUNT_NAME_REGEX)? {
|
||||
let item = item?;
|
||||
let name = item.file_name().to_string_lossy().to_string();
|
||||
|
||||
let unitfile = format!("{}/{}", basedir, name);
|
||||
let config = systemd::config::parse_systemd_mount(&unitfile)?;
|
||||
let data: SystemdMountSection = config.lookup("Mount", "Mount")?;
|
||||
|
||||
list.push(DatastoreMountInfo {
|
||||
unitfile,
|
||||
device: data.What,
|
||||
path: data.Where,
|
||||
filesystem: data.Type,
|
||||
options: data.Options,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
"add-datastore": {
|
||||
description: "Configure a datastore using the directory.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
filesystem: {
|
||||
type: FileSystemType,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a Filesystem on an unused disk. Will be mounted under '/mnt/datastore/<name>'.".
|
||||
pub fn create_datastore_disk(
|
||||
name: String,
|
||||
disk: String,
|
||||
add_datastore: Option<bool>,
|
||||
filesystem: Option<FileSystemType>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let info = get_disk_usage_info(&disk, true)?;
|
||||
|
||||
if info.used != DiskUsageType::Unused {
|
||||
bail!("disk '{}' is already in use.", disk);
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"dircreate", Some(name.clone()), &username.clone(), to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("create datastore '{}' on disk {}", name, disk));
|
||||
|
||||
let add_datastore = add_datastore.unwrap_or(false);
|
||||
let filesystem = filesystem.unwrap_or(FileSystemType::Ext4);
|
||||
|
||||
let manager = DiskManage::new();
|
||||
|
||||
let disk = manager.clone().disk_by_name(&disk)?;
|
||||
|
||||
let partition = create_single_linux_partition(&disk)?;
|
||||
create_file_system(&partition, filesystem)?;
|
||||
|
||||
let uuid = get_fs_uuid(&partition)?;
|
||||
let uuid_path = format!("/dev/disk/by-uuid/{}", uuid);
|
||||
|
||||
let (mount_unit_name, mount_point) = create_datastore_mount_unit(&name, filesystem, &uuid_path)?;
|
||||
|
||||
systemd::reload_daemon()?;
|
||||
systemd::enable_unit(&mount_unit_name)?;
|
||||
systemd::start_unit(&mount_unit_name)?;
|
||||
|
||||
if add_datastore {
|
||||
crate::api2::config::datastore::create_datastore(json!({ "name": name, "path": mount_point }))?
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_DATASTORE_MOUNTS)
|
||||
.post(&API_METHOD_CREATE_DATASTORE_DISK);
|
||||
|
||||
|
||||
fn create_datastore_mount_unit(
|
||||
datastore_name: &str,
|
||||
fs_type: FileSystemType,
|
||||
what: &str,
|
||||
) -> Result<(String, String), Error> {
|
||||
|
||||
let mount_point = format!("/mnt/datastore/{}", datastore_name);
|
||||
let mut mount_unit_name = systemd::escape_unit(&mount_point, true);
|
||||
mount_unit_name.push_str(".mount");
|
||||
|
||||
let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
|
||||
|
||||
let unit = SystemdUnitSection {
|
||||
Description: format!("Mount datatstore '{}' under '{}'", datastore_name, mount_point),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let install = SystemdInstallSection {
|
||||
WantedBy: Some(vec!["multi-user.target".to_string()]),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mount = SystemdMountSection {
|
||||
What: what.to_string(),
|
||||
Where: mount_point.clone(),
|
||||
Type: Some(fs_type.to_string()),
|
||||
Options: Some(String::from("defaults")),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut config = SectionConfigData::new();
|
||||
config.set_data("Unit", "Unit", unit)?;
|
||||
config.set_data("Install", "Install", install)?;
|
||||
config.set_data("Mount", "Mount", mount)?;
|
||||
|
||||
systemd::config::save_systemd_mount(&mount_unit_path, &config)?;
|
||||
|
||||
Ok((mount_unit_name, mount_point))
|
||||
}
|
380
src/api2/node/disks/zfs.rs
Normal file
380
src/api2/node/disks/zfs.rs
Normal file
@ -0,0 +1,380 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::{json, Value};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{
|
||||
api, Permission, RpcEnvironment, RpcEnvironmentType,
|
||||
schema::{
|
||||
Schema,
|
||||
StringSchema,
|
||||
ArraySchema,
|
||||
IntegerSchema,
|
||||
ApiStringFormat,
|
||||
parse_property_string,
|
||||
},
|
||||
};
|
||||
use proxmox::api::router::Router;
|
||||
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::tools::disks::{
|
||||
zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree,
|
||||
DiskUsageType,
|
||||
};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::*;
|
||||
|
||||
pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||
"Disk name list.", &BLOCKDEVICE_NAME_SCHEMA)
|
||||
.schema();
|
||||
|
||||
pub const DISK_LIST_SCHEMA: Schema = StringSchema::new(
|
||||
"A list of disk names, comma separated.")
|
||||
.format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA))
|
||||
.schema();
|
||||
|
||||
pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new(
|
||||
"Pool sector size exponent.")
|
||||
.minimum(9)
|
||||
.maximum(16)
|
||||
.default(12)
|
||||
.schema();
|
||||
|
||||
|
||||
#[api(
|
||||
default: "On",
|
||||
)]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The ZFS compression algorithm to use.
|
||||
pub enum ZfsCompressionType {
|
||||
/// Gnu Zip
|
||||
Gzip,
|
||||
/// LZ4
|
||||
Lz4,
|
||||
/// LZJB
|
||||
Lzjb,
|
||||
/// ZLE
|
||||
Zle,
|
||||
/// Enable compression using the default algorithm.
|
||||
On,
|
||||
/// Disable compression.
|
||||
Off,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The ZFS RAID level to use.
|
||||
pub enum ZfsRaidLevel {
|
||||
/// Single Disk
|
||||
Single,
|
||||
/// Mirror
|
||||
Mirror,
|
||||
/// Raid10
|
||||
Raid10,
|
||||
/// RaidZ
|
||||
RaidZ,
|
||||
/// RaidZ2
|
||||
RaidZ2,
|
||||
/// RaidZ3
|
||||
RaidZ3,
|
||||
}
|
||||
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
/// zpool list item
|
||||
pub struct ZpoolListItem {
|
||||
/// zpool name
|
||||
pub name: String,
|
||||
/// Health
|
||||
pub health: String,
|
||||
/// Total size
|
||||
pub size: u64,
|
||||
/// Used size
|
||||
pub alloc: u64,
|
||||
/// Free space
|
||||
pub free: u64,
|
||||
/// ZFS fragnentation level
|
||||
pub frag: u64,
|
||||
/// ZFS deduplication ratio
|
||||
pub dedup: f64,
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "List of zpools.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: ZpoolListItem,
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List zfs pools.
|
||||
pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> {
|
||||
|
||||
let data = zpool_list(None, false)?;
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for item in data {
|
||||
if let Some(usage) = item.usage {
|
||||
list.push(ZpoolListItem {
|
||||
name: item.name,
|
||||
health: item.health,
|
||||
size: usage.size,
|
||||
alloc: usage.alloc,
|
||||
free: usage.free,
|
||||
frag: usage.frag,
|
||||
dedup: usage.dedup,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "zpool vdev tree with status",
|
||||
properties: {
|
||||
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Get zpool status details.
|
||||
pub fn zpool_details(
|
||||
name: String,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let key_value_list = zpool_status(&name)?;
|
||||
|
||||
let config = match key_value_list.iter().find(|(k, _)| k == "config") {
|
||||
Some((_, v)) => v,
|
||||
None => bail!("got zpool status without config key"),
|
||||
};
|
||||
|
||||
let vdev_list = parse_zpool_status_config_tree(config)?;
|
||||
let mut tree = vdev_list_to_tree(&vdev_list)?;
|
||||
|
||||
for (k, v) in key_value_list {
|
||||
if k != "config" {
|
||||
tree[k] = v.into();
|
||||
}
|
||||
}
|
||||
|
||||
tree["name"] = tree.as_object_mut().unwrap()
|
||||
.remove("pool")
|
||||
.unwrap_or_else(|| name.into());
|
||||
|
||||
|
||||
Ok(tree)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
devices: {
|
||||
schema: DISK_LIST_SCHEMA,
|
||||
},
|
||||
raidlevel: {
|
||||
type: ZfsRaidLevel,
|
||||
},
|
||||
ashift: {
|
||||
schema: ZFS_ASHIFT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
compression: {
|
||||
type: ZfsCompressionType,
|
||||
optional: true,
|
||||
},
|
||||
"add-datastore": {
|
||||
description: "Configure a datastore using the zpool.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Create a new ZFS pool.
|
||||
pub fn create_zpool(
|
||||
name: String,
|
||||
devices: String,
|
||||
raidlevel: ZfsRaidLevel,
|
||||
compression: Option<String>,
|
||||
ashift: Option<usize>,
|
||||
add_datastore: Option<bool>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
|
||||
let add_datastore = add_datastore.unwrap_or(false);
|
||||
|
||||
let ashift = ashift.unwrap_or(12);
|
||||
|
||||
let devices_text = devices.clone();
|
||||
let devices = parse_property_string(&devices, &DISK_ARRAY_SCHEMA)?;
|
||||
let devices: Vec<String> = devices.as_array().unwrap().iter()
|
||||
.map(|v| v.as_str().unwrap().to_string()).collect();
|
||||
|
||||
let disk_map = crate::tools::disks::get_disks(None, true)?;
|
||||
for disk in devices.iter() {
|
||||
match disk_map.get(disk) {
|
||||
Some(info) => {
|
||||
if info.used != DiskUsageType::Unused {
|
||||
bail!("disk '{}' is already in use.", disk);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
bail!("no such disk '{}'", disk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let min_disks = match raidlevel {
|
||||
ZfsRaidLevel::Single => 1,
|
||||
ZfsRaidLevel::Mirror => 2,
|
||||
ZfsRaidLevel::Raid10 => 4,
|
||||
ZfsRaidLevel::RaidZ => 3,
|
||||
ZfsRaidLevel::RaidZ2 => 4,
|
||||
ZfsRaidLevel::RaidZ3 => 5,
|
||||
};
|
||||
|
||||
// Sanity checks
|
||||
if raidlevel == ZfsRaidLevel::Raid10 && devices.len() % 2 != 0 {
|
||||
bail!("Raid10 needs an even number of disks.");
|
||||
}
|
||||
|
||||
if raidlevel == ZfsRaidLevel::Single && devices.len() > 1 {
|
||||
bail!("Please give only one disk for single disk mode.");
|
||||
}
|
||||
|
||||
if devices.len() < min_disks {
|
||||
bail!("{:?} needs at least {} disks.", raidlevel, min_disks);
|
||||
}
|
||||
|
||||
// check if the default path does exist already and bail if it does
|
||||
// otherwise we get an error on mounting
|
||||
let mut default_path = std::path::PathBuf::from("/");
|
||||
default_path.push(&name);
|
||||
|
||||
match std::fs::metadata(&default_path) {
|
||||
Err(_) => {}, // path does not exist
|
||||
Ok(_) => {
|
||||
bail!("path {:?} already exists", default_path);
|
||||
}
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"zfscreate", Some(name.clone()), &username.clone(), to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));
|
||||
|
||||
|
||||
let mut command = std::process::Command::new("zpool");
|
||||
command.args(&["create", "-o", &format!("ashift={}", ashift), &name]);
|
||||
|
||||
match raidlevel {
|
||||
ZfsRaidLevel::Single => {
|
||||
command.arg(&devices[0]);
|
||||
}
|
||||
ZfsRaidLevel::Mirror => {
|
||||
command.arg("mirror");
|
||||
command.args(devices);
|
||||
}
|
||||
ZfsRaidLevel::Raid10 => {
|
||||
devices.chunks(2).for_each(|pair| {
|
||||
command.arg("mirror");
|
||||
command.args(pair);
|
||||
});
|
||||
}
|
||||
ZfsRaidLevel::RaidZ => {
|
||||
command.arg("raidz");
|
||||
command.args(devices);
|
||||
}
|
||||
ZfsRaidLevel::RaidZ2 => {
|
||||
command.arg("raidz2");
|
||||
command.args(devices);
|
||||
}
|
||||
ZfsRaidLevel::RaidZ3 => {
|
||||
command.arg("raidz3");
|
||||
command.args(devices);
|
||||
}
|
||||
}
|
||||
|
||||
worker.log(format!("# {:?}", command));
|
||||
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
worker.log(output);
|
||||
|
||||
if let Some(compression) = compression {
|
||||
let mut command = std::process::Command::new("zfs");
|
||||
command.args(&["set", &format!("compression={}", compression), &name]);
|
||||
worker.log(format!("# {:?}", command));
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
worker.log(output);
|
||||
}
|
||||
|
||||
if add_datastore {
|
||||
let mount_point = format!("/{}", name);
|
||||
crate::api2::config::datastore::create_datastore(json!({ "name": name, "path": mount_point }))?
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
pub const POOL_ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_ZPOOL_DETAILS);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_ZPOOLS)
|
||||
.post(&API_METHOD_CREATE_ZPOOL)
|
||||
.match_all("name", &POOL_ROUTER);
|
@ -94,7 +94,7 @@ fn get_journal(
|
||||
|
||||
let mut lines: Vec<String> = vec![];
|
||||
|
||||
let mut child = Command::new("/usr/bin/mini-journalreader")
|
||||
let mut child = Command::new("mini-journalreader")
|
||||
.args(&args)
|
||||
.stdout(Stdio::piped())
|
||||
.spawn()?;
|
||||
|
@ -1,9 +1,47 @@
|
||||
use anyhow::Error;
|
||||
use serde_json::Value;
|
||||
use serde_json::{Value, json};
|
||||
|
||||
use proxmox::api::{api, Router};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::tools::epoch_now_f64;
|
||||
use crate::rrd::{extract_cached_data, RRD_DATA_ENTRIES};
|
||||
|
||||
pub fn create_value_from_rrd(
|
||||
basedir: &str,
|
||||
list: &[&str],
|
||||
timeframe: RRDTimeFrameResolution,
|
||||
cf: RRDMode,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let mut result = Vec::new();
|
||||
let now = epoch_now_f64()?;
|
||||
|
||||
for name in list {
|
||||
let (start, reso, list) = match extract_cached_data(basedir, name, now, timeframe, cf) {
|
||||
Some(result) => result,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let mut t = start;
|
||||
for index in 0..RRD_DATA_ENTRIES {
|
||||
if result.len() <= index {
|
||||
if let Some(value) = list[index] {
|
||||
result.push(json!({ "time": t, *name: value }));
|
||||
} else {
|
||||
result.push(json!({ "time": t }));
|
||||
}
|
||||
} else {
|
||||
if let Some(value) = list[index] {
|
||||
result[index][name] = value.into();
|
||||
}
|
||||
}
|
||||
t += reso;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result.into())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -27,7 +65,7 @@ fn get_node_stats(
|
||||
_param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
crate::rrd::extract_data(
|
||||
create_value_from_rrd(
|
||||
"host",
|
||||
&[
|
||||
"cpu", "iowait",
|
||||
|
@ -38,7 +38,7 @@ fn get_full_service_state(service: &str) -> Result<Value, Error> {
|
||||
|
||||
let real_service_name = real_service_name(service);
|
||||
|
||||
let mut child = Command::new("/bin/systemctl")
|
||||
let mut child = Command::new("systemctl")
|
||||
.args(&["show", real_service_name])
|
||||
.stdout(Stdio::piped())
|
||||
.spawn()?;
|
||||
@ -196,7 +196,7 @@ fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
|
||||
|
||||
let real_service_name = real_service_name(service);
|
||||
|
||||
let status = Command::new("/bin/systemctl")
|
||||
let status = Command::new("systemctl")
|
||||
.args(&[cmd, real_service_name])
|
||||
.status()?;
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
use std::process::Command;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{Error, format_err, bail};
|
||||
use serde_json::{json, Value};
|
||||
@ -60,6 +61,7 @@ fn get_usage(
|
||||
|
||||
let meminfo: procfs::ProcFsMemInfo = procfs::read_meminfo()?;
|
||||
let kstat: procfs::ProcFsStat = procfs::read_proc_stat()?;
|
||||
let disk_usage = crate::tools::disks::disk_usage(Path::new("/"))?;
|
||||
|
||||
Ok(json!({
|
||||
"memory": {
|
||||
@ -68,6 +70,11 @@ fn get_usage(
|
||||
"free": meminfo.memfree,
|
||||
},
|
||||
"cpu": kstat.cpu,
|
||||
"root": {
|
||||
"total": disk_usage.total,
|
||||
"used": disk_usage.used,
|
||||
"free": disk_usage.avail,
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
@ -95,7 +102,7 @@ fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> {
|
||||
NodePowerCommand::Shutdown => "poweroff",
|
||||
};
|
||||
|
||||
let output = Command::new("/bin/systemctl")
|
||||
let output = Command::new("systemctl")
|
||||
.arg(systemctl_command)
|
||||
.output()
|
||||
.map_err(|err| format_err!("failed to execute systemctl - {}", err))?;
|
||||
|
@ -27,7 +27,7 @@ fn dump_journal(
|
||||
let start = start.unwrap_or(0);
|
||||
let mut count: u64 = 0;
|
||||
|
||||
let mut child = Command::new("/bin/journalctl")
|
||||
let mut child = Command::new("journalctl")
|
||||
.args(&args)
|
||||
.stdout(Stdio::piped())
|
||||
.spawn()?;
|
||||
|
@ -323,21 +323,9 @@ pub fn list_tasks(
|
||||
|
||||
let mut count = 0;
|
||||
|
||||
for info in list.iter() {
|
||||
for info in list {
|
||||
if !list_all && info.upid.username != username { continue; }
|
||||
|
||||
let mut entry = TaskListItem {
|
||||
upid: info.upid_str.clone(),
|
||||
node: "localhost".to_string(),
|
||||
pid: info.upid.pid as i64,
|
||||
pstart: info.upid.pstart,
|
||||
starttime: info.upid.starttime,
|
||||
worker_type: info.upid.worker_type.clone(),
|
||||
worker_id: info.upid.worker_id.clone(),
|
||||
user: info.upid.username.clone(),
|
||||
endtime: None,
|
||||
status: None,
|
||||
};
|
||||
|
||||
if let Some(username) = userfilter {
|
||||
if !info.upid.username.contains(username) { continue; }
|
||||
@ -367,9 +355,6 @@ pub fn list_tasks(
|
||||
if errors && state.1 == "OK" {
|
||||
continue;
|
||||
}
|
||||
|
||||
entry.endtime = Some(state.0);
|
||||
entry.status = Some(state.1.clone());
|
||||
}
|
||||
|
||||
if (count as u64) < start {
|
||||
@ -379,7 +364,7 @@ pub fn list_tasks(
|
||||
count += 1;
|
||||
}
|
||||
|
||||
if (result.len() as u64) < limit { result.push(entry); };
|
||||
if (result.len() as u64) < limit { result.push(info.into()); };
|
||||
}
|
||||
|
||||
rpcenv["total"] = Value::from(count);
|
||||
|
@ -17,6 +17,7 @@ use crate::server::{WorkerTask, H2Service};
|
||||
use crate::tools;
|
||||
use crate::config::acl::PRIV_DATASTORE_READ;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::api2::helpers;
|
||||
|
||||
mod environment;
|
||||
use environment::*;
|
||||
@ -187,26 +188,9 @@ fn download_file(
|
||||
path.push(env.backup_dir.relative_path());
|
||||
path.push(&file_name);
|
||||
|
||||
let path2 = path.clone();
|
||||
let path3 = path.clone();
|
||||
env.log(format!("download {:?}", path.clone()));
|
||||
|
||||
let file = tokio::fs::File::open(path)
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path2, err)))
|
||||
.await?;
|
||||
|
||||
env.log(format!("download {:?}", path3));
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||
|
||||
let body = Body::wrap_stream(payload);
|
||||
|
||||
// fixme: set other headers ?
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||
.body(body)
|
||||
.unwrap())
|
||||
helpers::create_download_response(path).await
|
||||
}.boxed()
|
||||
}
|
||||
|
||||
|
226
src/api2/status.rs
Normal file
226
src/api2/status.rs
Normal file
@ -0,0 +1,226 @@
|
||||
use proxmox::list_subdirs_api_method;
|
||||
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{
|
||||
api,
|
||||
ApiMethod,
|
||||
Permission,
|
||||
Router,
|
||||
RpcEnvironment,
|
||||
SubdirMap,
|
||||
UserInformation,
|
||||
};
|
||||
|
||||
use crate::api2::types::{
|
||||
DATASTORE_SCHEMA,
|
||||
RRDMode,
|
||||
RRDTimeFrameResolution,
|
||||
TaskListItem
|
||||
};
|
||||
|
||||
use crate::server;
|
||||
use crate::backup::{DataStore};
|
||||
use crate::config::datastore;
|
||||
use crate::tools::epoch_now_f64;
|
||||
use crate::tools::statistics::{linear_regression};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::acl::{
|
||||
PRIV_SYS_AUDIT,
|
||||
PRIV_DATASTORE_AUDIT,
|
||||
PRIV_DATASTORE_BACKUP,
|
||||
};
|
||||
|
||||
#[api(
|
||||
returns: {
|
||||
description: "Lists the Status of the Datastores.",
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Status of a Datastore",
|
||||
type: Object,
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
total: {
|
||||
type: Integer,
|
||||
description: "The Size of the underlying storage in bytes",
|
||||
},
|
||||
used: {
|
||||
type: Integer,
|
||||
description: "The used bytes of the underlying storage",
|
||||
},
|
||||
avail: {
|
||||
type: Integer,
|
||||
description: "The available bytes of the underlying storage",
|
||||
},
|
||||
history: {
|
||||
type: Array,
|
||||
description: "A list of usages of the past (last Month).",
|
||||
items: {
|
||||
type: Number,
|
||||
description: "The usage of a time in the past. Either null or between 0.0 and 1.0.",
|
||||
}
|
||||
},
|
||||
"estimated-full-date": {
|
||||
type: Integer,
|
||||
optional: true,
|
||||
description: "Estimation of the UNIX epoch when the storage will be full.\
|
||||
This is calculated via a simple Linear Regression (Least Squares)\
|
||||
of RRD data of the last Month. Missing if there are not enough data points yet.\
|
||||
If the estimate lies in the past, the usage is decreasing.",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// List Datastore usages and estimates
|
||||
fn datastore_status(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (store, (_, _)) in &config.sections {
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||
if !allowed {
|
||||
continue;
|
||||
}
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
let status = crate::tools::disks::disk_usage(&datastore.base_path())?;
|
||||
|
||||
let mut entry = json!({
|
||||
"store": store,
|
||||
"total": status.total,
|
||||
"used": status.used,
|
||||
"avail": status.avail,
|
||||
});
|
||||
|
||||
let rrd_dir = format!("datastore/{}", store);
|
||||
let now = epoch_now_f64()?;
|
||||
let rrd_resolution = RRDTimeFrameResolution::Month;
|
||||
let rrd_mode = RRDMode::Average;
|
||||
|
||||
let total_res = crate::rrd::extract_cached_data(
|
||||
&rrd_dir,
|
||||
"total",
|
||||
now,
|
||||
rrd_resolution,
|
||||
rrd_mode,
|
||||
);
|
||||
|
||||
let used_res = crate::rrd::extract_cached_data(
|
||||
&rrd_dir,
|
||||
"used",
|
||||
now,
|
||||
rrd_resolution,
|
||||
rrd_mode,
|
||||
);
|
||||
|
||||
match (total_res, used_res) {
|
||||
(Some((start, reso, total_list)), Some((_, _, used_list))) => {
|
||||
let mut usage_list: Vec<f64> = Vec::new();
|
||||
let mut time_list: Vec<u64> = Vec::new();
|
||||
let mut history = Vec::new();
|
||||
|
||||
for (idx, used) in used_list.iter().enumerate() {
|
||||
let total = if idx < total_list.len() {
|
||||
total_list[idx]
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
match (total, used) {
|
||||
(Some(total), Some(used)) if total != 0.0 => {
|
||||
time_list.push(start + (idx as u64)*reso);
|
||||
let usage = used/total;
|
||||
usage_list.push(usage);
|
||||
history.push(json!(usage));
|
||||
},
|
||||
_ => {
|
||||
history.push(json!(null))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
entry["history"] = history.into();
|
||||
|
||||
// we skip the calculation for datastores with not enough data
|
||||
if usage_list.len() >= 7 {
|
||||
if let Some((a,b)) = linear_regression(&time_list, &usage_list) {
|
||||
if b != 0.0 {
|
||||
let estimate = (1.0 - a) / b;
|
||||
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
|
||||
list.push(entry);
|
||||
}
|
||||
|
||||
Ok(list.into())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
since: {
|
||||
type: u64,
|
||||
description: "Only list tasks since this UNIX epoch.",
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "A list of tasks.",
|
||||
type: Array,
|
||||
items: { type: TaskListItem },
|
||||
},
|
||||
access: {
|
||||
description: "Users can only see there own tasks, unless the have Sys.Audit on /system/tasks.",
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List tasks.
|
||||
pub fn list_tasks(
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TaskListItem>, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["system", "tasks"]);
|
||||
|
||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||
|
||||
// TODO: replace with call that gets all task since 'since' epoch
|
||||
let list: Vec<TaskListItem> = server::read_task_list()?
|
||||
.into_iter()
|
||||
.map(TaskListItem::from)
|
||||
.filter(|entry| list_all || entry.user == username)
|
||||
.collect();
|
||||
|
||||
Ok(list.into())
|
||||
}
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("datastore-usage", &Router::new().get(&API_METHOD_DATASTORE_STATUS)),
|
||||
("tasks", &Router::new().get(&API_METHOD_LIST_TASKS)),
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
@ -5,6 +5,8 @@ use proxmox::api::{api, schema::*};
|
||||
use proxmox::const_regex;
|
||||
use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
||||
|
||||
use crate::backup::CryptMode;
|
||||
|
||||
// File names: may not contain slashes, may not start with "."
|
||||
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
||||
if name.starts_with('.') {
|
||||
@ -74,6 +76,8 @@ const_regex!{
|
||||
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
||||
|
||||
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
||||
|
||||
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
|
||||
}
|
||||
|
||||
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
||||
@ -133,6 +137,8 @@ pub const CIDR_V6_FORMAT: ApiStringFormat =
|
||||
pub const CIDR_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&CIDR_REGEX);
|
||||
|
||||
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
|
||||
|
||||
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
@ -353,6 +359,11 @@ pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID")
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
|
||||
.format(&BLOCKDEVICE_NAME_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
// Complex type definitions
|
||||
|
||||
@ -419,7 +430,7 @@ pub struct SnapshotListItem {
|
||||
pub backup_id: String,
|
||||
pub backup_time: i64,
|
||||
/// List of contained archive files.
|
||||
pub files: Vec<String>,
|
||||
pub files: Vec<BackupContent>,
|
||||
/// Overall snapshot size (sum of all archive sizes).
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub size: Option<u64>,
|
||||
@ -487,6 +498,10 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
|
||||
"filename": {
|
||||
schema: BACKUP_ARCHIVE_NAME_SCHEMA,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@ -494,6 +509,9 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
|
||||
/// Basic information about archive files inside a backup snapshot.
|
||||
pub struct BackupContent {
|
||||
pub filename: String,
|
||||
/// Info if file is encrypted, signed, or neither.
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub crypt_mode: Option<CryptMode>,
|
||||
/// Archive size (from backup manifest).
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub size: Option<u64>,
|
||||
@ -590,6 +608,27 @@ pub struct TaskListItem {
|
||||
pub status: Option<String>,
|
||||
}
|
||||
|
||||
impl From<crate::server::TaskListInfo> for TaskListItem {
|
||||
fn from(info: crate::server::TaskListInfo) -> Self {
|
||||
let (endtime, status) = info
|
||||
.state
|
||||
.map_or_else(|| (None, None), |(a,b)| (Some(a), Some(b)));
|
||||
|
||||
TaskListItem {
|
||||
upid: info.upid_str,
|
||||
node: "localhost".to_string(),
|
||||
pid: info.upid.pid as i64,
|
||||
pstart: info.upid.pstart,
|
||||
starttime: info.upid.starttime,
|
||||
worker_type: info.upid.worker_type,
|
||||
worker_id: info.upid.worker_id,
|
||||
user: info.upid.username,
|
||||
endtime,
|
||||
status,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
|
@ -10,6 +10,8 @@ use std::path::PathBuf;
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
use proxmox::try_block;
|
||||
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
fn compute_csrf_secret_digest(
|
||||
timestamp: i64,
|
||||
secret: &[u8],
|
||||
@ -29,8 +31,7 @@ pub fn assemble_csrf_prevention_token(
|
||||
username: &str,
|
||||
) -> String {
|
||||
|
||||
let epoch = std::time::SystemTime::now().duration_since(
|
||||
std::time::SystemTime::UNIX_EPOCH).unwrap().as_secs() as i64;
|
||||
let epoch = epoch_now_u64().unwrap() as i64;
|
||||
|
||||
let digest = compute_csrf_secret_digest(epoch, secret, username);
|
||||
|
||||
@ -67,8 +68,7 @@ pub fn verify_csrf_prevention_token(
|
||||
bail!("invalid signature.");
|
||||
}
|
||||
|
||||
let now = std::time::SystemTime::now().duration_since(
|
||||
std::time::SystemTime::UNIX_EPOCH)?.as_secs() as i64;
|
||||
let now = epoch_now_u64()? as i64;
|
||||
|
||||
let age = now - ttime;
|
||||
if age < min_age {
|
||||
|
@ -198,5 +198,11 @@ pub use prune::*;
|
||||
mod datastore;
|
||||
pub use datastore::*;
|
||||
|
||||
mod verify;
|
||||
pub use verify::*;
|
||||
|
||||
mod catalog_shell;
|
||||
pub use catalog_shell::*;
|
||||
|
||||
mod async_index_reader;
|
||||
pub use async_index_reader::*;
|
||||
|
128
src/backup/async_index_reader.rs
Normal file
128
src/backup/async_index_reader.rs
Normal file
@ -0,0 +1,128 @@
|
||||
use std::future::Future;
|
||||
use std::task::{Poll, Context};
|
||||
use std::pin::Pin;
|
||||
|
||||
use anyhow::Error;
|
||||
use futures::future::FutureExt;
|
||||
use futures::ready;
|
||||
use tokio::io::AsyncRead;
|
||||
|
||||
use proxmox::sys::error::io_err_other;
|
||||
use proxmox::io_format_err;
|
||||
|
||||
use super::IndexFile;
|
||||
use super::read_chunk::AsyncReadChunk;
|
||||
|
||||
enum AsyncIndexReaderState<S> {
|
||||
NoData,
|
||||
WaitForData(Pin<Box<dyn Future<Output = Result<(S, Vec<u8>), Error>> + Send + 'static>>),
|
||||
HaveData(usize),
|
||||
}
|
||||
|
||||
pub struct AsyncIndexReader<S, I: IndexFile> {
|
||||
store: Option<S>,
|
||||
index: I,
|
||||
read_buffer: Vec<u8>,
|
||||
current_chunk_idx: usize,
|
||||
current_chunk_digest: [u8; 32],
|
||||
state: AsyncIndexReaderState<S>,
|
||||
}
|
||||
|
||||
// ok because the only public interfaces operates on &mut Self
|
||||
unsafe impl<S: Sync, I: IndexFile + Sync> Sync for AsyncIndexReader<S, I> {}
|
||||
|
||||
impl<S: AsyncReadChunk, I: IndexFile> AsyncIndexReader<S, I> {
|
||||
pub fn new(index: I, store: S) -> Self {
|
||||
Self {
|
||||
store: Some(store),
|
||||
index,
|
||||
read_buffer: Vec::with_capacity(1024 * 1024),
|
||||
current_chunk_idx: 0,
|
||||
current_chunk_digest: [0u8; 32],
|
||||
state: AsyncIndexReaderState::NoData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, I> AsyncRead for AsyncIndexReader<S, I>
|
||||
where
|
||||
S: AsyncReadChunk + Unpin + Sync + 'static,
|
||||
I: IndexFile + Unpin,
|
||||
{
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context,
|
||||
buf: &mut [u8],
|
||||
) -> Poll<tokio::io::Result<usize>> {
|
||||
let this = Pin::get_mut(self);
|
||||
loop {
|
||||
match &mut this.state {
|
||||
AsyncIndexReaderState::NoData => {
|
||||
if this.current_chunk_idx >= this.index.index_count() {
|
||||
return Poll::Ready(Ok(0));
|
||||
}
|
||||
|
||||
let digest = this
|
||||
.index
|
||||
.index_digest(this.current_chunk_idx)
|
||||
.ok_or(io_format_err!("could not get digest"))?
|
||||
.clone();
|
||||
|
||||
if digest == this.current_chunk_digest {
|
||||
this.state = AsyncIndexReaderState::HaveData(0);
|
||||
continue;
|
||||
}
|
||||
|
||||
this.current_chunk_digest = digest;
|
||||
|
||||
let store = match this.store.take() {
|
||||
Some(store) => store,
|
||||
None => {
|
||||
return Poll::Ready(Err(io_format_err!("could not find store")));
|
||||
}
|
||||
};
|
||||
|
||||
let future = async move {
|
||||
store.read_chunk(&digest)
|
||||
.await
|
||||
.map(move |x| (store, x))
|
||||
};
|
||||
|
||||
this.state = AsyncIndexReaderState::WaitForData(future.boxed());
|
||||
}
|
||||
AsyncIndexReaderState::WaitForData(ref mut future) => {
|
||||
match ready!(future.as_mut().poll(cx)) {
|
||||
Ok((store, mut chunk_data)) => {
|
||||
this.read_buffer.clear();
|
||||
this.read_buffer.append(&mut chunk_data);
|
||||
this.state = AsyncIndexReaderState::HaveData(0);
|
||||
this.store = Some(store);
|
||||
}
|
||||
Err(err) => {
|
||||
return Poll::Ready(Err(io_err_other(err)));
|
||||
}
|
||||
};
|
||||
}
|
||||
AsyncIndexReaderState::HaveData(offset) => {
|
||||
let offset = *offset;
|
||||
let len = this.read_buffer.len();
|
||||
let n = if len - offset < buf.len() {
|
||||
len - offset
|
||||
} else {
|
||||
buf.len()
|
||||
};
|
||||
|
||||
buf[0..n].copy_from_slice(&this.read_buffer[offset..(offset + n)]);
|
||||
if offset + n == len {
|
||||
this.state = AsyncIndexReaderState::NoData;
|
||||
this.current_chunk_idx += 1;
|
||||
} else {
|
||||
this.state = AsyncIndexReaderState::HaveData(offset + n);
|
||||
}
|
||||
|
||||
return Poll::Ready(Ok(n));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -59,17 +59,6 @@ impl BackupGroup {
|
||||
&self.backup_id
|
||||
}
|
||||
|
||||
pub fn parse(path: &str) -> Result<Self, Error> {
|
||||
|
||||
let cap = GROUP_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
||||
|
||||
Ok(Self {
|
||||
backup_type: cap.get(1).unwrap().as_str().to_owned(),
|
||||
backup_id: cap.get(2).unwrap().as_str().to_owned(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn group_path(&self) -> PathBuf {
|
||||
|
||||
let mut relative_path = PathBuf::new();
|
||||
@ -152,6 +141,31 @@ impl BackupGroup {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for BackupGroup {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let backup_type = self.backup_type();
|
||||
let id = self.backup_id();
|
||||
write!(f, "{}/{}", backup_type, id)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for BackupGroup {
|
||||
type Err = Error;
|
||||
|
||||
/// Parse a backup group path
|
||||
///
|
||||
/// This parses strings like `vm/100".
|
||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||
let cap = GROUP_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
||||
|
||||
Ok(Self {
|
||||
backup_type: cap.get(1).unwrap().as_str().to_owned(),
|
||||
backup_id: cap.get(2).unwrap().as_str().to_owned(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Uniquely identify a Backup (relative to data store)
|
||||
///
|
||||
/// We also call this a backup snaphost.
|
||||
@ -188,16 +202,6 @@ impl BackupDir {
|
||||
self.backup_time
|
||||
}
|
||||
|
||||
pub fn parse(path: &str) -> Result<Self, Error> {
|
||||
|
||||
let cap = SNAPSHOT_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||
|
||||
let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str());
|
||||
let backup_time = cap.get(3).unwrap().as_str().parse::<DateTime<Utc>>()?;
|
||||
Ok(BackupDir::from((group, backup_time.timestamp())))
|
||||
}
|
||||
|
||||
pub fn relative_path(&self) -> PathBuf {
|
||||
|
||||
let mut relative_path = self.group.group_path();
|
||||
@ -212,6 +216,31 @@ impl BackupDir {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for BackupDir {
|
||||
type Err = Error;
|
||||
|
||||
/// Parse a snapshot path
|
||||
///
|
||||
/// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
|
||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||
let cap = SNAPSHOT_PATH_REGEX.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||
|
||||
let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str());
|
||||
let backup_time = cap.get(3).unwrap().as_str().parse::<DateTime<Utc>>()?;
|
||||
Ok(BackupDir::from((group, backup_time.timestamp())))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for BackupDir {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let backup_type = self.group.backup_type();
|
||||
let id = self.group.backup_id();
|
||||
let time = Self::backup_time_to_string(self.backup_time);
|
||||
write!(f, "{}/{}/{}", backup_type, id, time)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(BackupGroup, i64)> for BackupDir {
|
||||
fn from((group, timestamp): (BackupGroup, i64)) -> Self {
|
||||
Self { group, backup_time: Utc.timestamp(timestamp, 0) }
|
||||
|
@ -1,23 +1,21 @@
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use std::fmt;
|
||||
use std::ffi::{CStr, CString, OsStr};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::io::{Read, Write, Seek, SeekFrom};
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::{CStr, CString, OsStr};
|
||||
use std::fmt;
|
||||
use std::io::{Read, Write, Seek, SeekFrom};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::offset::{TimeZone, Local};
|
||||
|
||||
use pathpatterns::{MatchList, MatchType};
|
||||
use proxmox::tools::io::ReadExt;
|
||||
use proxmox::sys::error::io_err_other;
|
||||
|
||||
use crate::pxar::catalog::BackupCatalogWriter;
|
||||
use crate::pxar::{MatchPattern, MatchPatternSlice, MatchType};
|
||||
use crate::backup::file_formats::PROXMOX_CATALOG_FILE_MAGIC_1_0;
|
||||
use crate::tools::runtime::block_on;
|
||||
use crate::pxar::catalog::BackupCatalogWriter;
|
||||
|
||||
#[repr(u8)]
|
||||
#[derive(Copy,Clone,PartialEq)]
|
||||
enum CatalogEntryType {
|
||||
pub(crate) enum CatalogEntryType {
|
||||
Directory = b'd',
|
||||
File = b'f',
|
||||
Symlink = b'l',
|
||||
@ -46,6 +44,21 @@ impl TryFrom<u8> for CatalogEntryType {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&DirEntryAttribute> for CatalogEntryType {
|
||||
fn from(value: &DirEntryAttribute) -> Self {
|
||||
match value {
|
||||
DirEntryAttribute::Directory { .. } => CatalogEntryType::Directory,
|
||||
DirEntryAttribute::File { .. } => CatalogEntryType::File,
|
||||
DirEntryAttribute::Symlink => CatalogEntryType::Symlink,
|
||||
DirEntryAttribute::Hardlink => CatalogEntryType::Hardlink,
|
||||
DirEntryAttribute::BlockDevice => CatalogEntryType::BlockDevice,
|
||||
DirEntryAttribute::CharDevice => CatalogEntryType::CharDevice,
|
||||
DirEntryAttribute::Fifo => CatalogEntryType::Fifo,
|
||||
DirEntryAttribute::Socket => CatalogEntryType::Socket,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for CatalogEntryType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", char::from(*self as u8))
|
||||
@ -63,7 +76,7 @@ pub struct DirEntry {
|
||||
}
|
||||
|
||||
/// Used to specific additional attributes inside DirEntry
|
||||
#[derive(Clone, PartialEq)]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum DirEntryAttribute {
|
||||
Directory { start: u64 },
|
||||
File { size: u64, mtime: u64 },
|
||||
@ -106,6 +119,23 @@ impl DirEntry {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get file mode bits for this entry to be used with the `MatchList` api.
|
||||
pub fn get_file_mode(&self) -> Option<u32> {
|
||||
Some(
|
||||
match self.attr {
|
||||
DirEntryAttribute::Directory { .. } => pxar::mode::IFDIR,
|
||||
DirEntryAttribute::File { .. } => pxar::mode::IFREG,
|
||||
DirEntryAttribute::Symlink => pxar::mode::IFLNK,
|
||||
DirEntryAttribute::Hardlink => return None,
|
||||
DirEntryAttribute::BlockDevice => pxar::mode::IFBLK,
|
||||
DirEntryAttribute::CharDevice => pxar::mode::IFCHR,
|
||||
DirEntryAttribute::Fifo => pxar::mode::IFIFO,
|
||||
DirEntryAttribute::Socket => pxar::mode::IFSOCK,
|
||||
}
|
||||
as u32
|
||||
)
|
||||
}
|
||||
|
||||
/// Check if DirEntry is a directory
|
||||
pub fn is_directory(&self) -> bool {
|
||||
match self.attr {
|
||||
@ -383,32 +413,6 @@ impl <W: Write> BackupCatalogWriter for CatalogWriter<W> {
|
||||
}
|
||||
}
|
||||
|
||||
// fixme: move to somehere else?
|
||||
/// Implement Write to tokio mpsc channel Sender
|
||||
pub struct SenderWriter(tokio::sync::mpsc::Sender<Result<Vec<u8>, Error>>);
|
||||
|
||||
impl SenderWriter {
|
||||
pub fn new(sender: tokio::sync::mpsc::Sender<Result<Vec<u8>, Error>>) -> Self {
|
||||
Self(sender)
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for SenderWriter {
|
||||
fn write(&mut self, buf: &[u8]) -> Result<usize, std::io::Error> {
|
||||
block_on(async move {
|
||||
self.0
|
||||
.send(Ok(buf.to_vec()))
|
||||
.await
|
||||
.map_err(io_err_other)
|
||||
.and(Ok(buf.len()))
|
||||
})
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> Result<(), std::io::Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Read Catalog files
|
||||
pub struct CatalogReader<R> {
|
||||
reader: R,
|
||||
@ -476,7 +480,7 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
&mut self,
|
||||
parent: &DirEntry,
|
||||
filename: &[u8],
|
||||
) -> Result<DirEntry, Error> {
|
||||
) -> Result<Option<DirEntry>, Error> {
|
||||
|
||||
let start = match parent.attr {
|
||||
DirEntryAttribute::Directory { start } => start,
|
||||
@ -496,10 +500,7 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
Ok(false) // stop parsing
|
||||
})?;
|
||||
|
||||
match item {
|
||||
None => bail!("no such file"),
|
||||
Some(entry) => Ok(entry),
|
||||
}
|
||||
Ok(item)
|
||||
}
|
||||
|
||||
/// Read the raw directory info block from current reader position.
|
||||
@ -532,7 +533,10 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
self.dump_dir(&path, pos)?;
|
||||
}
|
||||
CatalogEntryType::File => {
|
||||
let dt = Local.timestamp(mtime as i64, 0);
|
||||
let dt = Local
|
||||
.timestamp_opt(mtime as i64, 0)
|
||||
.single() // chrono docs say timestamp_opt can only be None or Single!
|
||||
.unwrap_or_else(|| Local.timestamp(0, 0));
|
||||
|
||||
println!(
|
||||
"{} {:?} {} {}",
|
||||
@ -555,38 +559,30 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
/// provided callback on them.
|
||||
pub fn find(
|
||||
&mut self,
|
||||
mut entry: &mut Vec<DirEntry>,
|
||||
pattern: &[MatchPatternSlice],
|
||||
callback: &Box<fn(&[DirEntry])>,
|
||||
parent: &DirEntry,
|
||||
file_path: &mut Vec<u8>,
|
||||
match_list: &impl MatchList, //&[MatchEntry],
|
||||
callback: &mut dyn FnMut(&[u8]) -> Result<(), Error>,
|
||||
) -> Result<(), Error> {
|
||||
let parent = entry.last().unwrap();
|
||||
if !parent.is_directory() {
|
||||
return Ok(())
|
||||
}
|
||||
|
||||
let file_len = file_path.len();
|
||||
for e in self.read_dir(parent)? {
|
||||
match MatchPatternSlice::match_filename_include(
|
||||
&CString::new(e.name.clone())?,
|
||||
e.is_directory(),
|
||||
pattern,
|
||||
)? {
|
||||
(MatchType::Positive, _) => {
|
||||
entry.push(e);
|
||||
callback(&entry);
|
||||
let pattern = MatchPattern::from_line(b"**/*").unwrap().unwrap();
|
||||
let child_pattern = vec![pattern.as_slice()];
|
||||
self.find(&mut entry, &child_pattern, callback)?;
|
||||
entry.pop();
|
||||
}
|
||||
(MatchType::PartialPositive, child_pattern)
|
||||
| (MatchType::PartialNegative, child_pattern) => {
|
||||
entry.push(e);
|
||||
self.find(&mut entry, &child_pattern, callback)?;
|
||||
entry.pop();
|
||||
}
|
||||
_ => {}
|
||||
let is_dir = e.is_directory();
|
||||
file_path.truncate(file_len);
|
||||
if !e.name.starts_with(b"/") {
|
||||
file_path.reserve(e.name.len() + 1);
|
||||
file_path.push(b'/');
|
||||
}
|
||||
file_path.extend(&e.name);
|
||||
match match_list.matches(&file_path, e.get_file_mode()) {
|
||||
Some(MatchType::Exclude) => continue,
|
||||
Some(MatchType::Include) => callback(&file_path)?,
|
||||
None => (),
|
||||
}
|
||||
if is_dir {
|
||||
self.find(&e, file_path, match_list, callback)?;
|
||||
}
|
||||
}
|
||||
file_path.truncate(file_len);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -429,6 +429,10 @@ impl ChunkStore {
|
||||
full_path
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
pub fn base_path(&self) -> PathBuf {
|
||||
self.base.clone()
|
||||
}
|
||||
|
@ -6,12 +6,30 @@
|
||||
//! See the Wikipedia Artikel for [Authenticated
|
||||
//! encryption](https://en.wikipedia.org/wiki/Authenticated_encryption)
|
||||
//! for a short introduction.
|
||||
use anyhow::{bail, Error};
|
||||
use openssl::pkcs5::pbkdf2_hmac;
|
||||
use openssl::hash::MessageDigest;
|
||||
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
|
||||
|
||||
use std::io::Write;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use chrono::{Local, TimeZone, DateTime};
|
||||
use openssl::hash::MessageDigest;
|
||||
use openssl::pkcs5::pbkdf2_hmac;
|
||||
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
|
||||
#[api(default: "encrypt")]
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Defines whether data is encrypted (using an AEAD cipher), only signed, or neither.
|
||||
pub enum CryptMode {
|
||||
/// Don't encrypt.
|
||||
None,
|
||||
/// Encrypt.
|
||||
Encrypt,
|
||||
/// Only sign.
|
||||
SignOnly,
|
||||
}
|
||||
|
||||
/// Encryption Configuration with secret key
|
||||
///
|
||||
@ -26,7 +44,6 @@ pub struct CryptConfig {
|
||||
id_pkey: openssl::pkey::PKey<openssl::pkey::Private>,
|
||||
// The private key used by the cipher.
|
||||
enc_key: [u8; 32],
|
||||
|
||||
}
|
||||
|
||||
impl CryptConfig {
|
||||
@ -63,10 +80,9 @@ impl CryptConfig {
|
||||
/// chunk digest values do not clash with values computed for
|
||||
/// other sectret keys.
|
||||
pub fn compute_digest(&self, data: &[u8]) -> [u8; 32] {
|
||||
// FIXME: use HMAC-SHA256 instead??
|
||||
let mut hasher = openssl::sha::Sha256::new();
|
||||
hasher.update(&self.id_key);
|
||||
hasher.update(data);
|
||||
hasher.update(&self.id_key); // at the end, to avoid length extensions attacks
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
@ -203,7 +219,7 @@ impl CryptConfig {
|
||||
created: DateTime<Local>,
|
||||
) -> Result<Vec<u8>, Error> {
|
||||
|
||||
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
||||
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
||||
let key_config = super::KeyConfig { kdf: None, created, modified, data: self.enc_key.to_vec() };
|
||||
let data = serde_json::to_string(&key_config)?.as_bytes().to_vec();
|
||||
|
||||
|
@ -3,10 +3,10 @@ use std::convert::TryInto;
|
||||
|
||||
use proxmox::tools::io::{ReadExt, WriteExt};
|
||||
|
||||
const MAX_BLOB_SIZE: usize = 128*1024*1024;
|
||||
|
||||
use super::file_formats::*;
|
||||
use super::CryptConfig;
|
||||
use super::{CryptConfig, CryptMode};
|
||||
|
||||
const MAX_BLOB_SIZE: usize = 128*1024*1024;
|
||||
|
||||
/// Encoded data chunk with digest and positional information
|
||||
pub struct ChunkInfo {
|
||||
@ -166,8 +166,21 @@ impl DataBlob {
|
||||
Ok(blob)
|
||||
}
|
||||
|
||||
/// Get the encryption mode for this blob.
|
||||
pub fn crypt_mode(&self) -> Result<CryptMode, Error> {
|
||||
let magic = self.magic();
|
||||
|
||||
Ok(if magic == &UNCOMPRESSED_BLOB_MAGIC_1_0 || magic == &COMPRESSED_BLOB_MAGIC_1_0 {
|
||||
CryptMode::None
|
||||
} else if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||
CryptMode::Encrypt
|
||||
} else {
|
||||
bail!("Invalid blob magic number.");
|
||||
})
|
||||
}
|
||||
|
||||
/// Decode blob data
|
||||
pub fn decode(self, config: Option<&CryptConfig>) -> Result<Vec<u8>, Error> {
|
||||
pub fn decode(&self, config: Option<&CryptConfig>) -> Result<Vec<u8>, Error> {
|
||||
|
||||
let magic = self.magic();
|
||||
|
||||
@ -194,75 +207,11 @@ impl DataBlob {
|
||||
} else {
|
||||
bail!("unable to decrypt blob - missing CryptConfig");
|
||||
}
|
||||
} else if magic == &AUTH_COMPR_BLOB_MAGIC_1_0 || magic == &AUTHENTICATED_BLOB_MAGIC_1_0 {
|
||||
let header_len = std::mem::size_of::<AuthenticatedDataBlobHeader>();
|
||||
let head = unsafe {
|
||||
(&self.raw_data[..header_len]).read_le_value::<AuthenticatedDataBlobHeader>()?
|
||||
};
|
||||
|
||||
let data_start = std::mem::size_of::<AuthenticatedDataBlobHeader>();
|
||||
|
||||
// Note: only verify if we have a crypt config
|
||||
if let Some(config) = config {
|
||||
let signature = config.compute_auth_tag(&self.raw_data[data_start..]);
|
||||
if signature != head.tag {
|
||||
bail!("verifying blob signature failed");
|
||||
}
|
||||
}
|
||||
|
||||
if magic == &AUTH_COMPR_BLOB_MAGIC_1_0 {
|
||||
let data = zstd::block::decompress(&self.raw_data[data_start..], 16*1024*1024)?;
|
||||
Ok(data)
|
||||
} else {
|
||||
Ok(self.raw_data[data_start..].to_vec())
|
||||
}
|
||||
} else {
|
||||
bail!("Invalid blob magic number.");
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a signed DataBlob, optionally compressed
|
||||
pub fn create_signed(
|
||||
data: &[u8],
|
||||
config: &CryptConfig,
|
||||
compress: bool,
|
||||
) -> Result<Self, Error> {
|
||||
|
||||
if data.len() > MAX_BLOB_SIZE {
|
||||
bail!("data blob too large ({} bytes).", data.len());
|
||||
}
|
||||
|
||||
let compr_data;
|
||||
let (_compress, data, magic) = if compress {
|
||||
compr_data = zstd::block::compress(data, 1)?;
|
||||
// Note: We only use compression if result is shorter
|
||||
if compr_data.len() < data.len() {
|
||||
(true, &compr_data[..], AUTH_COMPR_BLOB_MAGIC_1_0)
|
||||
} else {
|
||||
(false, data, AUTHENTICATED_BLOB_MAGIC_1_0)
|
||||
}
|
||||
} else {
|
||||
(false, data, AUTHENTICATED_BLOB_MAGIC_1_0)
|
||||
};
|
||||
|
||||
let header_len = std::mem::size_of::<AuthenticatedDataBlobHeader>();
|
||||
let mut raw_data = Vec::with_capacity(data.len() + header_len);
|
||||
|
||||
let head = AuthenticatedDataBlobHeader {
|
||||
head: DataBlobHeader { magic, crc: [0; 4] },
|
||||
tag: config.compute_auth_tag(data),
|
||||
};
|
||||
unsafe {
|
||||
raw_data.write_le_value(head)?;
|
||||
}
|
||||
raw_data.extend_from_slice(data);
|
||||
|
||||
let mut blob = DataBlob { raw_data };
|
||||
blob.set_crc(blob.compute_crc());
|
||||
|
||||
Ok(blob)
|
||||
}
|
||||
|
||||
/// Load blob from ``reader``
|
||||
pub fn load(reader: &mut dyn std::io::Read) -> Result<Self, Error> {
|
||||
|
||||
@ -294,14 +243,6 @@ impl DataBlob {
|
||||
|
||||
let blob = DataBlob { raw_data: data };
|
||||
|
||||
Ok(blob)
|
||||
} else if magic == AUTH_COMPR_BLOB_MAGIC_1_0 || magic == AUTHENTICATED_BLOB_MAGIC_1_0 {
|
||||
if data.len() < std::mem::size_of::<AuthenticatedDataBlobHeader>() {
|
||||
bail!("authenticated blob too small ({} bytes).", data.len());
|
||||
}
|
||||
|
||||
let blob = DataBlob { raw_data: data };
|
||||
|
||||
Ok(blob)
|
||||
} else {
|
||||
bail!("unable to parse raw blob - wrong magic");
|
||||
@ -311,7 +252,9 @@ impl DataBlob {
|
||||
/// Verify digest and data length for unencrypted chunks.
|
||||
///
|
||||
/// To do that, we need to decompress data first. Please note that
|
||||
/// this is not possible for encrypted chunks.
|
||||
/// this is not possible for encrypted chunks. This function simply return Ok
|
||||
/// for encrypted chunks.
|
||||
/// Note: This does not call verify_crc
|
||||
pub fn verify_unencrypted(
|
||||
&self,
|
||||
expected_chunk_size: usize,
|
||||
@ -320,22 +263,18 @@ impl DataBlob {
|
||||
|
||||
let magic = self.magic();
|
||||
|
||||
let verify_raw_data = |data: &[u8]| {
|
||||
if expected_chunk_size != data.len() {
|
||||
bail!("detected chunk with wrong length ({} != {})", expected_chunk_size, data.len());
|
||||
}
|
||||
let digest = openssl::sha::sha256(data);
|
||||
if &digest != expected_digest {
|
||||
bail!("detected chunk with wrong digest.");
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if magic == &COMPRESSED_BLOB_MAGIC_1_0 {
|
||||
let data = zstd::block::decompress(&self.raw_data[12..], 16*1024*1024)?;
|
||||
verify_raw_data(&data)?;
|
||||
} else if magic == &UNCOMPRESSED_BLOB_MAGIC_1_0 {
|
||||
verify_raw_data(&self.raw_data[12..])?;
|
||||
let data = self.decode(None)?;
|
||||
|
||||
if expected_chunk_size != data.len() {
|
||||
bail!("detected chunk with wrong length ({} != {})", expected_chunk_size, data.len());
|
||||
}
|
||||
let digest = openssl::sha::sha256(&data);
|
||||
if &digest != expected_digest {
|
||||
bail!("detected chunk with wrong digest.");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -378,7 +317,7 @@ impl <'a, 'b> DataChunkBuilder<'a, 'b> {
|
||||
|
||||
/// Set encryption Configuration
|
||||
///
|
||||
/// If set, chunks are encrypted.
|
||||
/// If set, chunks are encrypted
|
||||
pub fn crypt_config(mut self, value: &'b CryptConfig) -> Self {
|
||||
if self.digest_computed {
|
||||
panic!("unable to set crypt_config after compute_digest().");
|
||||
@ -417,12 +356,7 @@ impl <'a, 'b> DataChunkBuilder<'a, 'b> {
|
||||
self.compute_digest();
|
||||
}
|
||||
|
||||
let chunk = DataBlob::encode(
|
||||
self.orig_data,
|
||||
self.config,
|
||||
self.compress,
|
||||
)?;
|
||||
|
||||
let chunk = DataBlob::encode(self.orig_data, self.config, self.compress)?;
|
||||
Ok((chunk, self.digest))
|
||||
}
|
||||
|
||||
|
@ -8,8 +8,6 @@ use super::*;
|
||||
enum BlobReaderState<R: Read> {
|
||||
Uncompressed { expected_crc: u32, csum_reader: ChecksumReader<R> },
|
||||
Compressed { expected_crc: u32, decompr: zstd::stream::read::Decoder<BufReader<ChecksumReader<R>>> },
|
||||
Signed { expected_crc: u32, expected_hmac: [u8; 32], csum_reader: ChecksumReader<R> },
|
||||
SignedCompressed { expected_crc: u32, expected_hmac: [u8; 32], decompr: zstd::stream::read::Decoder<BufReader<ChecksumReader<R>>> },
|
||||
Encrypted { expected_crc: u32, decrypt_reader: CryptReader<BufReader<ChecksumReader<R>>> },
|
||||
EncryptedCompressed { expected_crc: u32, decompr: zstd::stream::read::Decoder<BufReader<CryptReader<BufReader<ChecksumReader<R>>>>> },
|
||||
}
|
||||
@ -19,6 +17,10 @@ pub struct DataBlobReader<R: Read> {
|
||||
state: BlobReaderState<R>,
|
||||
}
|
||||
|
||||
// zstd_safe::DCtx is not sync but we are, since
|
||||
// the only public interface is on mutable reference
|
||||
unsafe impl<R: Read> Sync for DataBlobReader<R> {}
|
||||
|
||||
impl <R: Read> DataBlobReader<R> {
|
||||
|
||||
pub fn new(mut reader: R, config: Option<Arc<CryptConfig>>) -> Result<Self, Error> {
|
||||
@ -37,22 +39,6 @@ impl <R: Read> DataBlobReader<R> {
|
||||
let decompr = zstd::stream::read::Decoder::new(csum_reader)?;
|
||||
Ok(Self { state: BlobReaderState::Compressed { expected_crc, decompr }})
|
||||
}
|
||||
AUTHENTICATED_BLOB_MAGIC_1_0 => {
|
||||
let expected_crc = u32::from_le_bytes(head.crc);
|
||||
let mut expected_hmac = [0u8; 32];
|
||||
reader.read_exact(&mut expected_hmac)?;
|
||||
let csum_reader = ChecksumReader::new(reader, config);
|
||||
Ok(Self { state: BlobReaderState::Signed { expected_crc, expected_hmac, csum_reader }})
|
||||
}
|
||||
AUTH_COMPR_BLOB_MAGIC_1_0 => {
|
||||
let expected_crc = u32::from_le_bytes(head.crc);
|
||||
let mut expected_hmac = [0u8; 32];
|
||||
reader.read_exact(&mut expected_hmac)?;
|
||||
let csum_reader = ChecksumReader::new(reader, config);
|
||||
|
||||
let decompr = zstd::stream::read::Decoder::new(csum_reader)?;
|
||||
Ok(Self { state: BlobReaderState::SignedCompressed { expected_crc, expected_hmac, decompr }})
|
||||
}
|
||||
ENCRYPTED_BLOB_MAGIC_1_0 => {
|
||||
let expected_crc = u32::from_le_bytes(head.crc);
|
||||
let mut iv = [0u8; 16];
|
||||
@ -95,31 +81,6 @@ impl <R: Read> DataBlobReader<R> {
|
||||
}
|
||||
Ok(reader)
|
||||
}
|
||||
BlobReaderState::Signed { csum_reader, expected_crc, expected_hmac } => {
|
||||
let (reader, crc, hmac) = csum_reader.finish()?;
|
||||
if crc != expected_crc {
|
||||
bail!("blob crc check failed");
|
||||
}
|
||||
if let Some(hmac) = hmac {
|
||||
if hmac != expected_hmac {
|
||||
bail!("blob signature check failed");
|
||||
}
|
||||
}
|
||||
Ok(reader)
|
||||
}
|
||||
BlobReaderState::SignedCompressed { expected_crc, expected_hmac, decompr } => {
|
||||
let csum_reader = decompr.finish().into_inner();
|
||||
let (reader, crc, hmac) = csum_reader.finish()?;
|
||||
if crc != expected_crc {
|
||||
bail!("blob crc check failed");
|
||||
}
|
||||
if let Some(hmac) = hmac {
|
||||
if hmac != expected_hmac {
|
||||
bail!("blob signature check failed");
|
||||
}
|
||||
}
|
||||
Ok(reader)
|
||||
}
|
||||
BlobReaderState::Encrypted { expected_crc, decrypt_reader } => {
|
||||
let csum_reader = decrypt_reader.finish()?.into_inner();
|
||||
let (reader, crc, _) = csum_reader.finish()?;
|
||||
@ -151,12 +112,6 @@ impl <R: Read> Read for DataBlobReader<R> {
|
||||
BlobReaderState::Compressed { decompr, .. } => {
|
||||
decompr.read(buf)
|
||||
}
|
||||
BlobReaderState::Signed { csum_reader, .. } => {
|
||||
csum_reader.read(buf)
|
||||
}
|
||||
BlobReaderState::SignedCompressed { decompr, .. } => {
|
||||
decompr.read(buf)
|
||||
}
|
||||
BlobReaderState::Encrypted { decrypt_reader, .. } => {
|
||||
decrypt_reader.read(buf)
|
||||
}
|
||||
|
@ -8,8 +8,6 @@ use super::*;
|
||||
enum BlobWriterState<W: Write> {
|
||||
Uncompressed { csum_writer: ChecksumWriter<W> },
|
||||
Compressed { compr: zstd::stream::write::Encoder<ChecksumWriter<W>> },
|
||||
Signed { csum_writer: ChecksumWriter<W> },
|
||||
SignedCompressed { compr: zstd::stream::write::Encoder<ChecksumWriter<W>> },
|
||||
Encrypted { crypt_writer: CryptWriter<ChecksumWriter<W>> },
|
||||
EncryptedCompressed { compr: zstd::stream::write::Encoder<CryptWriter<ChecksumWriter<W>>> },
|
||||
}
|
||||
@ -42,33 +40,6 @@ impl <W: Write + Seek> DataBlobWriter<W> {
|
||||
Ok(Self { state: BlobWriterState::Compressed { compr }})
|
||||
}
|
||||
|
||||
pub fn new_signed(mut writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
|
||||
writer.seek(SeekFrom::Start(0))?;
|
||||
let head = AuthenticatedDataBlobHeader {
|
||||
head: DataBlobHeader { magic: AUTHENTICATED_BLOB_MAGIC_1_0, crc: [0; 4] },
|
||||
tag: [0u8; 32],
|
||||
};
|
||||
unsafe {
|
||||
writer.write_le_value(head)?;
|
||||
}
|
||||
let csum_writer = ChecksumWriter::new(writer, Some(config));
|
||||
Ok(Self { state: BlobWriterState::Signed { csum_writer }})
|
||||
}
|
||||
|
||||
pub fn new_signed_compressed(mut writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
|
||||
writer.seek(SeekFrom::Start(0))?;
|
||||
let head = AuthenticatedDataBlobHeader {
|
||||
head: DataBlobHeader { magic: AUTH_COMPR_BLOB_MAGIC_1_0, crc: [0; 4] },
|
||||
tag: [0u8; 32],
|
||||
};
|
||||
unsafe {
|
||||
writer.write_le_value(head)?;
|
||||
}
|
||||
let csum_writer = ChecksumWriter::new(writer, Some(config));
|
||||
let compr = zstd::stream::write::Encoder::new(csum_writer, 1)?;
|
||||
Ok(Self { state: BlobWriterState::SignedCompressed { compr }})
|
||||
}
|
||||
|
||||
pub fn new_encrypted(mut writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
|
||||
writer.seek(SeekFrom::Start(0))?;
|
||||
let head = EncryptedDataBlobHeader {
|
||||
@ -129,37 +100,6 @@ impl <W: Write + Seek> DataBlobWriter<W> {
|
||||
|
||||
Ok(writer)
|
||||
}
|
||||
BlobWriterState::Signed { csum_writer } => {
|
||||
let (mut writer, crc, tag) = csum_writer.finish()?;
|
||||
|
||||
let head = AuthenticatedDataBlobHeader {
|
||||
head: DataBlobHeader { magic: AUTHENTICATED_BLOB_MAGIC_1_0, crc: crc.to_le_bytes() },
|
||||
tag: tag.unwrap(),
|
||||
};
|
||||
|
||||
writer.seek(SeekFrom::Start(0))?;
|
||||
unsafe {
|
||||
writer.write_le_value(head)?;
|
||||
}
|
||||
|
||||
Ok(writer)
|
||||
}
|
||||
BlobWriterState::SignedCompressed { compr } => {
|
||||
let csum_writer = compr.finish()?;
|
||||
let (mut writer, crc, tag) = csum_writer.finish()?;
|
||||
|
||||
let head = AuthenticatedDataBlobHeader {
|
||||
head: DataBlobHeader { magic: AUTH_COMPR_BLOB_MAGIC_1_0, crc: crc.to_le_bytes() },
|
||||
tag: tag.unwrap(),
|
||||
};
|
||||
|
||||
writer.seek(SeekFrom::Start(0))?;
|
||||
unsafe {
|
||||
writer.write_le_value(head)?;
|
||||
}
|
||||
|
||||
Ok(writer)
|
||||
}
|
||||
BlobWriterState::Encrypted { crypt_writer } => {
|
||||
let (csum_writer, iv, tag) = crypt_writer.finish()?;
|
||||
let (mut writer, crc, _) = csum_writer.finish()?;
|
||||
@ -203,12 +143,6 @@ impl <W: Write + Seek> Write for DataBlobWriter<W> {
|
||||
BlobWriterState::Compressed { ref mut compr } => {
|
||||
compr.write(buf)
|
||||
}
|
||||
BlobWriterState::Signed { ref mut csum_writer } => {
|
||||
csum_writer.write(buf)
|
||||
}
|
||||
BlobWriterState::SignedCompressed { ref mut compr } => {
|
||||
compr.write(buf)
|
||||
}
|
||||
BlobWriterState::Encrypted { ref mut crypt_writer } => {
|
||||
crypt_writer.write(buf)
|
||||
}
|
||||
@ -226,13 +160,7 @@ impl <W: Write + Seek> Write for DataBlobWriter<W> {
|
||||
BlobWriterState::Compressed { ref mut compr } => {
|
||||
compr.flush()
|
||||
}
|
||||
BlobWriterState::Signed { ref mut csum_writer } => {
|
||||
csum_writer.flush()
|
||||
}
|
||||
BlobWriterState::SignedCompressed { ref mut compr } => {
|
||||
compr.flush()
|
||||
}
|
||||
BlobWriterState::Encrypted { ref mut crypt_writer } => {
|
||||
BlobWriterState::Encrypted { ref mut crypt_writer } => {
|
||||
crypt_writer.flush()
|
||||
}
|
||||
BlobWriterState::EncryptedCompressed { ref mut compr } => {
|
||||
|
@ -2,6 +2,7 @@ use std::collections::{HashSet, HashMap};
|
||||
use std::io::{self, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
@ -14,6 +15,7 @@ use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
|
||||
use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
|
||||
use super::index::*;
|
||||
use super::{DataBlob, ArchiveType, archive_type};
|
||||
use crate::backup::CryptMode;
|
||||
use crate::config::datastore;
|
||||
use crate::server::WorkerTask;
|
||||
use crate::tools;
|
||||
@ -134,6 +136,10 @@ impl DataStore {
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &str {
|
||||
self.chunk_store.name()
|
||||
}
|
||||
|
||||
pub fn base_path(&self) -> PathBuf {
|
||||
self.chunk_store.base_path()
|
||||
}
|
||||
@ -470,4 +476,32 @@ impl DataStore {
|
||||
) -> Result<(bool, u64), Error> {
|
||||
self.chunk_store.insert_chunk(chunk, digest)
|
||||
}
|
||||
|
||||
pub fn verify_stored_chunk(&self, digest: &[u8; 32], expected_chunk_size: u64) -> Result<(), Error> {
|
||||
let blob = self.chunk_store.read_chunk(digest)?;
|
||||
blob.verify_crc()?;
|
||||
blob.verify_unencrypted(expected_chunk_size as usize, digest)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<(DataBlob, u64), Error> {
|
||||
let mut path = self.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(filename);
|
||||
|
||||
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
||||
let raw_size = raw_data.len() as u64;
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
Ok((blob, raw_size))
|
||||
}
|
||||
|
||||
pub fn load_manifest(
|
||||
&self,
|
||||
backup_dir: &BackupDir,
|
||||
) -> Result<(BackupManifest, CryptMode, u64), Error> {
|
||||
let (blob, raw_size) = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
||||
let crypt_mode = blob.crypt_mode()?;
|
||||
let manifest = BackupManifest::try_from(blob)?;
|
||||
Ok((manifest, crypt_mode, raw_size))
|
||||
}
|
||||
}
|
||||
|
@ -1,23 +1,28 @@
|
||||
use std::convert::TryInto;
|
||||
use std::fs::File;
|
||||
use std::io::{BufWriter, Seek, SeekFrom, Write};
|
||||
use std::io::{self, BufWriter, Seek, SeekFrom, Write};
|
||||
use std::ops::Range;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::task::Context;
|
||||
use std::pin::Pin;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use proxmox::tools::io::ReadExt;
|
||||
use proxmox::tools::uuid::Uuid;
|
||||
use proxmox::tools::vec;
|
||||
use proxmox::tools::mmap::Mmap;
|
||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||
|
||||
use super::chunk_stat::ChunkStat;
|
||||
use super::chunk_store::ChunkStore;
|
||||
use super::index::ChunkReadInfo;
|
||||
use super::read_chunk::ReadChunk;
|
||||
use super::Chunker;
|
||||
use super::IndexFile;
|
||||
use super::{DataBlob, DataChunkBuilder};
|
||||
use crate::tools;
|
||||
use crate::tools::{self, epoch_now_u64};
|
||||
|
||||
/// Header format definition for dynamic index files (`.dixd`)
|
||||
#[repr(C)]
|
||||
@ -36,34 +41,34 @@ proxmox::static_assert_size!(DynamicIndexHeader, 4096);
|
||||
// pub data: DynamicIndexHeaderData,
|
||||
// }
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[repr(C)]
|
||||
pub struct DynamicEntry {
|
||||
end_le: u64,
|
||||
digest: [u8; 32],
|
||||
}
|
||||
|
||||
impl DynamicEntry {
|
||||
#[inline]
|
||||
pub fn end(&self) -> u64 {
|
||||
u64::from_le(self.end_le)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DynamicIndexReader {
|
||||
_file: File,
|
||||
pub size: usize,
|
||||
index: *const u8,
|
||||
index_entries: usize,
|
||||
index: Mmap<DynamicEntry>,
|
||||
pub uuid: [u8; 16],
|
||||
pub ctime: u64,
|
||||
pub index_csum: [u8; 32],
|
||||
}
|
||||
|
||||
// `index` is mmap()ed which cannot be thread-local so should be sendable
|
||||
// FIXME: Introduce an mmap wrapper type for this?
|
||||
unsafe impl Send for DynamicIndexReader {}
|
||||
unsafe impl Sync for DynamicIndexReader {}
|
||||
|
||||
impl Drop for DynamicIndexReader {
|
||||
fn drop(&mut self) {
|
||||
if let Err(err) = self.unmap() {
|
||||
eprintln!("Unable to unmap dynamic index - {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DynamicIndexReader {
|
||||
pub fn open(path: &Path) -> Result<Self, Error> {
|
||||
File::open(path)
|
||||
.map_err(Error::from)
|
||||
.and_then(|file| Self::new(file))
|
||||
.and_then(Self::new)
|
||||
.map_err(|err| format_err!("Unable to open dynamic index {:?} - {}", path, err))
|
||||
}
|
||||
|
||||
@ -74,6 +79,7 @@ impl DynamicIndexReader {
|
||||
bail!("unable to get shared lock - {}", err);
|
||||
}
|
||||
|
||||
// FIXME: This is NOT OUR job! Check the callers of this method and remove this!
|
||||
file.seek(SeekFrom::Start(0))?;
|
||||
|
||||
let header_size = std::mem::size_of::<DynamicIndexHeader>();
|
||||
@ -93,123 +99,49 @@ impl DynamicIndexReader {
|
||||
let size = stat.st_size as usize;
|
||||
|
||||
let index_size = size - header_size;
|
||||
if (index_size % 40) != 0 {
|
||||
let index_count = index_size / 40;
|
||||
if index_count * 40 != index_size {
|
||||
bail!("got unexpected file size");
|
||||
}
|
||||
|
||||
let data = unsafe {
|
||||
nix::sys::mman::mmap(
|
||||
std::ptr::null_mut(),
|
||||
index_size,
|
||||
let index = unsafe {
|
||||
Mmap::map_fd(
|
||||
rawfd,
|
||||
header_size as u64,
|
||||
index_count,
|
||||
nix::sys::mman::ProtFlags::PROT_READ,
|
||||
nix::sys::mman::MapFlags::MAP_PRIVATE,
|
||||
rawfd,
|
||||
header_size as i64,
|
||||
)
|
||||
}? as *const u8;
|
||||
)?
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
_file: file,
|
||||
size,
|
||||
index: data,
|
||||
index_entries: index_size / 40,
|
||||
index,
|
||||
ctime,
|
||||
uuid: header.uuid,
|
||||
index_csum: header.index_csum,
|
||||
})
|
||||
}
|
||||
|
||||
fn unmap(&mut self) -> Result<(), Error> {
|
||||
if self.index == std::ptr::null_mut() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Err(err) = unsafe {
|
||||
nix::sys::mman::munmap(self.index as *mut std::ffi::c_void, self.index_entries * 40)
|
||||
} {
|
||||
bail!("unmap dynamic index failed - {}", err);
|
||||
}
|
||||
|
||||
self.index = std::ptr::null_mut();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
pub fn chunk_info(&self, pos: usize) -> Result<(u64, u64, [u8; 32]), Error> {
|
||||
if pos >= self.index_entries {
|
||||
bail!("chunk index out of range");
|
||||
}
|
||||
let start = if pos == 0 {
|
||||
0
|
||||
} else {
|
||||
unsafe { *(self.index.add((pos - 1) * 40) as *const u64) }
|
||||
};
|
||||
|
||||
let end = unsafe { *(self.index.add(pos * 40) as *const u64) };
|
||||
|
||||
let mut digest = std::mem::MaybeUninit::<[u8; 32]>::uninit();
|
||||
unsafe {
|
||||
std::ptr::copy_nonoverlapping(
|
||||
self.index.add(pos * 40 + 8),
|
||||
(*digest.as_mut_ptr()).as_mut_ptr(),
|
||||
32,
|
||||
);
|
||||
}
|
||||
|
||||
Ok((start, end, unsafe { digest.assume_init() }))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
fn chunk_end(&self, pos: usize) -> u64 {
|
||||
if pos >= self.index_entries {
|
||||
if pos >= self.index.len() {
|
||||
panic!("chunk index out of range");
|
||||
}
|
||||
unsafe { *(self.index.add(pos * 40) as *const u64) }
|
||||
self.index[pos].end()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn chunk_digest(&self, pos: usize) -> &[u8; 32] {
|
||||
if pos >= self.index_entries {
|
||||
if pos >= self.index.len() {
|
||||
panic!("chunk index out of range");
|
||||
}
|
||||
let slice = unsafe { std::slice::from_raw_parts(self.index.add(pos * 40 + 8), 32) };
|
||||
slice.try_into().unwrap()
|
||||
&self.index[pos].digest
|
||||
}
|
||||
|
||||
/// Compute checksum and data size
|
||||
pub fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||
let mut csum = openssl::sha::Sha256::new();
|
||||
let mut chunk_end = 0;
|
||||
for pos in 0..self.index_entries {
|
||||
chunk_end = self.chunk_end(pos);
|
||||
let digest = self.chunk_digest(pos);
|
||||
csum.update(&chunk_end.to_le_bytes());
|
||||
csum.update(digest);
|
||||
}
|
||||
let csum = csum.finish();
|
||||
|
||||
(csum, chunk_end)
|
||||
}
|
||||
|
||||
/*
|
||||
pub fn dump_pxar(&self, mut writer: Box<dyn Write>) -> Result<(), Error> {
|
||||
|
||||
for pos in 0..self.index_entries {
|
||||
let _end = self.chunk_end(pos);
|
||||
let digest = self.chunk_digest(pos);
|
||||
//println!("Dump {:08x}", end );
|
||||
let chunk = self.store.read_chunk(digest)?;
|
||||
// fimxe: handle encrypted chunks
|
||||
let data = chunk.decode(None)?;
|
||||
writer.write_all(&data)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
*/
|
||||
|
||||
// TODO: can we use std::slice::binary_search with Mmap now?
|
||||
fn binary_search(
|
||||
&self,
|
||||
start_idx: usize,
|
||||
@ -238,11 +170,11 @@ impl DynamicIndexReader {
|
||||
|
||||
impl IndexFile for DynamicIndexReader {
|
||||
fn index_count(&self) -> usize {
|
||||
self.index_entries
|
||||
self.index.len()
|
||||
}
|
||||
|
||||
fn index_digest(&self, pos: usize) -> Option<&[u8; 32]> {
|
||||
if pos >= self.index_entries {
|
||||
if pos >= self.index.len() {
|
||||
None
|
||||
} else {
|
||||
Some(unsafe { std::mem::transmute(self.chunk_digest(pos).as_ptr()) })
|
||||
@ -250,12 +182,59 @@ impl IndexFile for DynamicIndexReader {
|
||||
}
|
||||
|
||||
fn index_bytes(&self) -> u64 {
|
||||
if self.index_entries == 0 {
|
||||
if self.index.is_empty() {
|
||||
0
|
||||
} else {
|
||||
self.chunk_end((self.index_entries - 1) as usize)
|
||||
self.chunk_end(self.index.len() - 1)
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||
let mut csum = openssl::sha::Sha256::new();
|
||||
let mut chunk_end = 0;
|
||||
for pos in 0..self.index_count() {
|
||||
let info = self.chunk_info(pos).unwrap();
|
||||
chunk_end = info.range.end;
|
||||
csum.update(&chunk_end.to_le_bytes());
|
||||
csum.update(&info.digest);
|
||||
}
|
||||
let csum = csum.finish();
|
||||
(csum, chunk_end)
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo> {
|
||||
if pos >= self.index.len() {
|
||||
return None;
|
||||
}
|
||||
let start = if pos == 0 { 0 } else { self.index[pos - 1].end() };
|
||||
|
||||
let end = self.index[pos].end();
|
||||
|
||||
Some(ChunkReadInfo {
|
||||
range: start..end,
|
||||
digest: self.index[pos].digest.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct CachedChunk {
|
||||
range: Range<u64>,
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
impl CachedChunk {
|
||||
/// Perform sanity checks on the range and data size:
|
||||
pub fn new(range: Range<u64>, data: Vec<u8>) -> Result<Self, Error> {
|
||||
if data.len() as u64 != range.end - range.start {
|
||||
bail!(
|
||||
"read chunk with wrong size ({} != {})",
|
||||
data.len(),
|
||||
range.end - range.start,
|
||||
);
|
||||
}
|
||||
Ok(Self { range, data })
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BufferedDynamicReader<S> {
|
||||
@ -266,7 +245,7 @@ pub struct BufferedDynamicReader<S> {
|
||||
buffered_chunk_idx: usize,
|
||||
buffered_chunk_start: u64,
|
||||
read_offset: u64,
|
||||
lru_cache: crate::tools::lru_cache::LruCache<usize, (u64, u64, Vec<u8>)>,
|
||||
lru_cache: crate::tools::lru_cache::LruCache<usize, CachedChunk>,
|
||||
}
|
||||
|
||||
struct ChunkCacher<'a, S> {
|
||||
@ -274,16 +253,21 @@ struct ChunkCacher<'a, S> {
|
||||
index: &'a DynamicIndexReader,
|
||||
}
|
||||
|
||||
impl<'a, S: ReadChunk> crate::tools::lru_cache::Cacher<usize, (u64, u64, Vec<u8>)> for ChunkCacher<'a, S> {
|
||||
fn fetch(&mut self, index: usize) -> Result<Option<(u64, u64, Vec<u8>)>, anyhow::Error> {
|
||||
let (start, end, digest) = self.index.chunk_info(index)?;
|
||||
self.store.read_chunk(&digest).and_then(|data| Ok(Some((start, end, data))))
|
||||
impl<'a, S: ReadChunk> crate::tools::lru_cache::Cacher<usize, CachedChunk> for ChunkCacher<'a, S> {
|
||||
fn fetch(&mut self, index: usize) -> Result<Option<CachedChunk>, Error> {
|
||||
let info = match self.index.chunk_info(index) {
|
||||
Some(info) => info,
|
||||
None => bail!("chunk index out of range"),
|
||||
};
|
||||
let range = info.range;
|
||||
let data = self.store.read_chunk(&info.digest)?;
|
||||
CachedChunk::new(range, data).map(Some)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||
pub fn new(index: DynamicIndexReader, store: S) -> Self {
|
||||
let archive_size = index.chunk_end(index.index_entries - 1);
|
||||
let archive_size = index.index_bytes();
|
||||
Self {
|
||||
store,
|
||||
index,
|
||||
@ -301,7 +285,8 @@ impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||
}
|
||||
|
||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
||||
let (start, end, data) = self.lru_cache.access(
|
||||
//let (start, end, data) = self.lru_cache.access(
|
||||
let cached_chunk = self.lru_cache.access(
|
||||
idx,
|
||||
&mut ChunkCacher {
|
||||
store: &mut self.store,
|
||||
@ -309,21 +294,13 @@ impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||
},
|
||||
)?.ok_or_else(|| format_err!("chunk not found by cacher"))?;
|
||||
|
||||
if (*end - *start) != data.len() as u64 {
|
||||
bail!(
|
||||
"read chunk with wrong size ({} != {}",
|
||||
(*end - *start),
|
||||
data.len()
|
||||
);
|
||||
}
|
||||
|
||||
// fixme: avoid copy
|
||||
self.read_buffer.clear();
|
||||
self.read_buffer.extend_from_slice(&data);
|
||||
self.read_buffer.extend_from_slice(&cached_chunk.data);
|
||||
|
||||
self.buffered_chunk_idx = idx;
|
||||
|
||||
self.buffered_chunk_start = *start;
|
||||
self.buffered_chunk_start = cached_chunk.range.start;
|
||||
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
||||
Ok(())
|
||||
}
|
||||
@ -340,7 +317,7 @@ impl<S: ReadChunk> crate::tools::BufferedRead for BufferedDynamicReader<S> {
|
||||
|
||||
// optimization for sequential read
|
||||
if buffer_len > 0
|
||||
&& ((self.buffered_chunk_idx + 1) < index.index_entries)
|
||||
&& ((self.buffered_chunk_idx + 1) < index.index.len())
|
||||
&& (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||
{
|
||||
let next_idx = self.buffered_chunk_idx + 1;
|
||||
@ -356,7 +333,7 @@ impl<S: ReadChunk> crate::tools::BufferedRead for BufferedDynamicReader<S> {
|
||||
|| (offset < self.buffered_chunk_start)
|
||||
|| (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||
{
|
||||
let end_idx = index.index_entries - 1;
|
||||
let end_idx = index.index.len() - 1;
|
||||
let end = index.chunk_end(end_idx);
|
||||
let idx = index.binary_search(0, 0, end_idx, end, offset)?;
|
||||
self.buffer_chunk(idx)?;
|
||||
@ -383,9 +360,7 @@ impl<S: ReadChunk> std::io::Read for BufferedDynamicReader<S> {
|
||||
data.len()
|
||||
};
|
||||
|
||||
unsafe {
|
||||
std::ptr::copy_nonoverlapping(data.as_ptr(), buf.as_mut_ptr(), n);
|
||||
}
|
||||
buf[0..n].copy_from_slice(&data[0..n]);
|
||||
|
||||
self.read_offset += n as u64;
|
||||
|
||||
@ -417,6 +392,49 @@ impl<S: ReadChunk> std::io::Seek for BufferedDynamicReader<S> {
|
||||
}
|
||||
}
|
||||
|
||||
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
||||
/// async use!
|
||||
///
|
||||
/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
|
||||
/// so that we can properly access it from multiple threads simultaneously while not issuing
|
||||
/// duplicate simultaneous reads over http.
|
||||
#[derive(Clone)]
|
||||
pub struct LocalDynamicReadAt<R: ReadChunk> {
|
||||
inner: Arc<Mutex<BufferedDynamicReader<R>>>,
|
||||
}
|
||||
|
||||
impl<R: ReadChunk> LocalDynamicReadAt<R> {
|
||||
pub fn new(inner: BufferedDynamicReader<R>) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(inner)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: ReadChunk> ReadAt for LocalDynamicReadAt<R> {
|
||||
fn start_read_at<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
_cx: &mut Context,
|
||||
buf: &'a mut [u8],
|
||||
offset: u64,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
use std::io::Read;
|
||||
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
||||
let mut reader = self.inner.lock().unwrap();
|
||||
reader.seek(SeekFrom::Start(offset))?;
|
||||
Ok(reader.read(buf)?)
|
||||
}))
|
||||
}
|
||||
|
||||
fn poll_complete<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
_op: ReadAtOperation<'a>,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
panic!("LocalDynamicReadAt::start_read_at returned Pending");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Create dynamic index files (`.dixd`)
|
||||
pub struct DynamicIndexWriter {
|
||||
store: Arc<ChunkStore>,
|
||||
@ -460,9 +478,7 @@ impl DynamicIndexWriter {
|
||||
panic!("got unexpected header size");
|
||||
}
|
||||
|
||||
let ctime = std::time::SystemTime::now()
|
||||
.duration_since(std::time::SystemTime::UNIX_EPOCH)?
|
||||
.as_secs();
|
||||
let ctime = epoch_now_u64()?;
|
||||
|
||||
let uuid = Uuid::generate();
|
||||
|
||||
|
@ -17,12 +17,6 @@ pub const ENCRYPTED_BLOB_MAGIC_1_0: [u8; 8] = [123, 103, 133, 190, 34, 45, 76, 2
|
||||
// openssl::sha::sha256(b"Proxmox Backup zstd compressed encrypted blob v1.0")[0..8]
|
||||
pub const ENCR_COMPR_BLOB_MAGIC_1_0: [u8; 8] = [230, 89, 27, 191, 11, 191, 216, 11];
|
||||
|
||||
//openssl::sha::sha256(b"Proxmox Backup authenticated blob v1.0")[0..8]
|
||||
pub const AUTHENTICATED_BLOB_MAGIC_1_0: [u8; 8] = [31, 135, 238, 226, 145, 206, 5, 2];
|
||||
|
||||
//openssl::sha::sha256(b"Proxmox Backup zstd compressed authenticated blob v1.0")[0..8]
|
||||
pub const AUTH_COMPR_BLOB_MAGIC_1_0: [u8; 8] = [126, 166, 15, 190, 145, 31, 169, 96];
|
||||
|
||||
// openssl::sha::sha256(b"Proxmox Backup fixed sized chunk index v1.0")[0..8]
|
||||
pub const FIXED_SIZED_CHUNK_INDEX_1_0: [u8; 8] = [47, 127, 65, 237, 145, 253, 15, 205];
|
||||
|
||||
@ -50,19 +44,6 @@ pub struct DataBlobHeader {
|
||||
pub crc: [u8; 4],
|
||||
}
|
||||
|
||||
/// Authenticated data blob binary storage format
|
||||
///
|
||||
/// The ``DataBlobHeader`` for authenticated blobs additionally contains
|
||||
/// a 16 byte HMAC tag, followed by the data:
|
||||
///
|
||||
/// (MAGIC || CRC32 || TAG || Data).
|
||||
#[derive(Endian)]
|
||||
#[repr(C,packed)]
|
||||
pub struct AuthenticatedDataBlobHeader {
|
||||
pub head: DataBlobHeader,
|
||||
pub tag: [u8; 32],
|
||||
}
|
||||
|
||||
/// Encrypted data blob binary storage format
|
||||
///
|
||||
/// The ``DataBlobHeader`` for encrypted blobs additionally contains
|
||||
@ -87,8 +68,6 @@ pub fn header_size(magic: &[u8; 8]) -> usize {
|
||||
&COMPRESSED_BLOB_MAGIC_1_0 => std::mem::size_of::<DataBlobHeader>(),
|
||||
&ENCRYPTED_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
||||
&ENCR_COMPR_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
||||
&AUTHENTICATED_BLOB_MAGIC_1_0 => std::mem::size_of::<AuthenticatedDataBlobHeader>(),
|
||||
&AUTH_COMPR_BLOB_MAGIC_1_0 => std::mem::size_of::<AuthenticatedDataBlobHeader>(),
|
||||
_ => panic!("unknown blob magic"),
|
||||
}
|
||||
}
|
||||
|
@ -1,11 +1,10 @@
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use std::convert::TryInto;
|
||||
use std::io::{Seek, SeekFrom};
|
||||
|
||||
use super::chunk_stat::*;
|
||||
use super::chunk_store::*;
|
||||
use super::IndexFile;
|
||||
use crate::tools;
|
||||
use super::{IndexFile, ChunkReadInfo};
|
||||
use crate::tools::{self, epoch_now_u64};
|
||||
|
||||
use chrono::{Local, TimeZone};
|
||||
use std::fs::File;
|
||||
@ -147,38 +146,6 @@ impl FixedIndexReader {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn chunk_info(&self, pos: usize) -> Result<(u64, u64, [u8; 32]), Error> {
|
||||
if pos >= self.index_length {
|
||||
bail!("chunk index out of range");
|
||||
}
|
||||
let start = (pos * self.chunk_size) as u64;
|
||||
let mut end = start + self.chunk_size as u64;
|
||||
|
||||
if end > self.size {
|
||||
end = self.size;
|
||||
}
|
||||
|
||||
let mut digest = std::mem::MaybeUninit::<[u8; 32]>::uninit();
|
||||
unsafe {
|
||||
std::ptr::copy_nonoverlapping(
|
||||
self.index.add(pos * 32),
|
||||
(*digest.as_mut_ptr()).as_mut_ptr(),
|
||||
32,
|
||||
);
|
||||
}
|
||||
|
||||
Ok((start, end, unsafe { digest.assume_init() }))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn chunk_digest(&self, pos: usize) -> &[u8; 32] {
|
||||
if pos >= self.index_length {
|
||||
panic!("chunk index out of range");
|
||||
}
|
||||
let slice = unsafe { std::slice::from_raw_parts(self.index.add(pos * 32), 32) };
|
||||
slice.try_into().unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn chunk_end(&self, pos: usize) -> u64 {
|
||||
if pos >= self.index_length {
|
||||
@ -193,20 +160,6 @@ impl FixedIndexReader {
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute checksum and data size
|
||||
pub fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||
let mut csum = openssl::sha::Sha256::new();
|
||||
let mut chunk_end = 0;
|
||||
for pos in 0..self.index_length {
|
||||
chunk_end = self.chunk_end(pos);
|
||||
let digest = self.chunk_digest(pos);
|
||||
csum.update(digest);
|
||||
}
|
||||
let csum = csum.finish();
|
||||
|
||||
(csum, chunk_end)
|
||||
}
|
||||
|
||||
pub fn print_info(&self) {
|
||||
println!("Size: {}", self.size);
|
||||
println!("ChunkSize: {}", self.chunk_size);
|
||||
@ -234,6 +187,38 @@ impl IndexFile for FixedIndexReader {
|
||||
fn index_bytes(&self) -> u64 {
|
||||
self.size
|
||||
}
|
||||
|
||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo> {
|
||||
if pos >= self.index_length {
|
||||
return None;
|
||||
}
|
||||
|
||||
let start = (pos * self.chunk_size) as u64;
|
||||
let mut end = start + self.chunk_size as u64;
|
||||
|
||||
if end > self.size {
|
||||
end = self.size;
|
||||
}
|
||||
|
||||
let digest = self.index_digest(pos).unwrap();
|
||||
Some(ChunkReadInfo {
|
||||
range: start..end,
|
||||
digest: *digest,
|
||||
})
|
||||
}
|
||||
|
||||
fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||
let mut csum = openssl::sha::Sha256::new();
|
||||
let mut chunk_end = 0;
|
||||
for pos in 0..self.index_count() {
|
||||
let info = self.chunk_info(pos).unwrap();
|
||||
chunk_end = info.range.end;
|
||||
csum.update(&info.digest);
|
||||
}
|
||||
let csum = csum.finish();
|
||||
|
||||
(csum, chunk_end)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FixedIndexWriter {
|
||||
@ -290,9 +275,7 @@ impl FixedIndexWriter {
|
||||
panic!("got unexpected header size");
|
||||
}
|
||||
|
||||
let ctime = std::time::SystemTime::now()
|
||||
.duration_since(std::time::SystemTime::UNIX_EPOCH)?
|
||||
.as_secs();
|
||||
let ctime = epoch_now_u64()?;
|
||||
|
||||
let uuid = Uuid::generate();
|
||||
|
||||
@ -469,6 +452,18 @@ impl FixedIndexWriter {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn clone_data_from(&mut self, reader: &FixedIndexReader) -> Result<(), Error> {
|
||||
if self.index_length != reader.index_count() {
|
||||
bail!("clone_data_from failed - index sizes not equal");
|
||||
}
|
||||
|
||||
for i in 0..self.index_length {
|
||||
self.add_digest(i, reader.index_digest(i).unwrap())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BufferedFixedReader<S> {
|
||||
@ -501,18 +496,17 @@ impl<S: ReadChunk> BufferedFixedReader<S> {
|
||||
|
||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
||||
let index = &self.index;
|
||||
let (start, end, digest) = index.chunk_info(idx)?;
|
||||
let info = match index.chunk_info(idx) {
|
||||
Some(info) => info,
|
||||
None => bail!("chunk index out of range"),
|
||||
};
|
||||
|
||||
// fixme: avoid copy
|
||||
|
||||
let data = self.store.read_chunk(&digest)?;
|
||||
|
||||
if (end - start) != data.len() as u64 {
|
||||
bail!(
|
||||
"read chunk with wrong size ({} != {}",
|
||||
(end - start),
|
||||
data.len()
|
||||
);
|
||||
let data = self.store.read_chunk(&info.digest)?;
|
||||
let size = info.range.end - info.range.start;
|
||||
if size != data.len() as u64 {
|
||||
bail!("read chunk with wrong size ({} != {}", size, data.len());
|
||||
}
|
||||
|
||||
self.read_buffer.clear();
|
||||
@ -520,8 +514,7 @@ impl<S: ReadChunk> BufferedFixedReader<S> {
|
||||
|
||||
self.buffered_chunk_idx = idx;
|
||||
|
||||
self.buffered_chunk_start = start as u64;
|
||||
//println!("BUFFER {} {}", self.buffered_chunk_start, end);
|
||||
self.buffered_chunk_start = info.range.start as u64;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -1,10 +1,17 @@
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::ops::Range;
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::*;
|
||||
pub struct ChunkReadInfo {
|
||||
pub range: Range<u64>,
|
||||
pub digest: [u8; 32],
|
||||
}
|
||||
|
||||
impl ChunkReadInfo {
|
||||
#[inline]
|
||||
pub fn size(&self) -> u64 {
|
||||
self.range.end - self.range.start
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait to get digest list from index files
|
||||
///
|
||||
@ -13,6 +20,10 @@ pub trait IndexFile {
|
||||
fn index_count(&self) -> usize;
|
||||
fn index_digest(&self, pos: usize) -> Option<&[u8; 32]>;
|
||||
fn index_bytes(&self) -> u64;
|
||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo>;
|
||||
|
||||
/// Compute index checksum and size
|
||||
fn compute_csum(&self) -> ([u8; 32], u64);
|
||||
|
||||
/// Returns most often used chunks
|
||||
fn find_most_used_chunks(&self, max: usize) -> HashMap<[u8; 32], usize> {
|
||||
@ -46,111 +57,3 @@ pub trait IndexFile {
|
||||
map
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode digest list from an `IndexFile` into a binary stream
|
||||
///
|
||||
/// The reader simply returns a birary stream of 32 byte digest values.
|
||||
pub struct DigestListEncoder {
|
||||
index: Box<dyn IndexFile + Send + Sync>,
|
||||
pos: usize,
|
||||
count: usize,
|
||||
}
|
||||
|
||||
impl DigestListEncoder {
|
||||
|
||||
pub fn new(index: Box<dyn IndexFile + Send + Sync>) -> Self {
|
||||
let count = index.index_count();
|
||||
Self { index, pos: 0, count }
|
||||
}
|
||||
}
|
||||
|
||||
impl std::io::Read for DigestListEncoder {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
||||
if buf.len() < 32 {
|
||||
panic!("read buffer too small");
|
||||
}
|
||||
|
||||
if self.pos < self.count {
|
||||
let mut written = 0;
|
||||
loop {
|
||||
let digest = self.index.index_digest(self.pos).unwrap();
|
||||
buf[written..(written + 32)].copy_from_slice(digest);
|
||||
self.pos += 1;
|
||||
written += 32;
|
||||
if self.pos >= self.count {
|
||||
break;
|
||||
}
|
||||
if (written + 32) >= buf.len() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(written)
|
||||
} else {
|
||||
Ok(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Decodes a Stream<Item=Bytes> into Stream<Item=<[u8;32]>
|
||||
///
|
||||
/// The reader simply returns a birary stream of 32 byte digest values.
|
||||
|
||||
pub struct DigestListDecoder<S: Unpin> {
|
||||
input: S,
|
||||
buffer: BytesMut,
|
||||
}
|
||||
|
||||
impl<S: Unpin> DigestListDecoder<S> {
|
||||
pub fn new(input: S) -> Self {
|
||||
Self { input, buffer: BytesMut::new() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Unpin> Unpin for DigestListDecoder<S> {}
|
||||
|
||||
impl<S: Unpin, E> Stream for DigestListDecoder<S>
|
||||
where
|
||||
S: Stream<Item=Result<Bytes, E>>,
|
||||
E: Into<Error>,
|
||||
{
|
||||
type Item = Result<[u8; 32], Error>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
let this = self.get_mut();
|
||||
|
||||
loop {
|
||||
if this.buffer.len() >= 32 {
|
||||
let left = this.buffer.split_to(32);
|
||||
|
||||
let mut digest = std::mem::MaybeUninit::<[u8; 32]>::uninit();
|
||||
unsafe {
|
||||
(*digest.as_mut_ptr()).copy_from_slice(&left[..]);
|
||||
return Poll::Ready(Some(Ok(digest.assume_init())));
|
||||
}
|
||||
}
|
||||
|
||||
match Pin::new(&mut this.input).poll_next(cx) {
|
||||
Poll::Pending => {
|
||||
return Poll::Pending;
|
||||
}
|
||||
Poll::Ready(Some(Err(err))) => {
|
||||
return Poll::Ready(Some(Err(err.into())));
|
||||
}
|
||||
Poll::Ready(Some(Ok(data))) => {
|
||||
this.buffer.extend_from_slice(&data);
|
||||
// continue
|
||||
}
|
||||
Poll::Ready(None) => {
|
||||
let rest = this.buffer.len();
|
||||
if rest == 0 {
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
return Poll::Ready(Some(Err(format_err!(
|
||||
"got small digest ({} != 32).",
|
||||
rest,
|
||||
))));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use anyhow::{bail, format_err, Context, Error};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use chrono::{Local, TimeZone, DateTime};
|
||||
@ -146,12 +146,26 @@ pub fn encrypt_key_with_passphrase(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn load_and_decrypt_key(path: &std::path::Path, passphrase: &dyn Fn() -> Result<Vec<u8>, Error>) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
pub fn load_and_decrypt_key(
|
||||
path: &std::path::Path,
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
do_load_and_decrypt_key(path, passphrase)
|
||||
.with_context(|| format!("failed to load decryption key from {:?}", path))
|
||||
}
|
||||
|
||||
let raw = file_get_contents(&path)?;
|
||||
let data = String::from_utf8(raw)?;
|
||||
fn do_load_and_decrypt_key(
|
||||
path: &std::path::Path,
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
decrypt_key(&file_get_contents(&path)?, passphrase)
|
||||
}
|
||||
|
||||
let key_config: KeyConfig = serde_json::from_str(&data)?;
|
||||
pub fn decrypt_key(
|
||||
mut keydata: &[u8],
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
let key_config: KeyConfig = serde_json::from_reader(&mut keydata)?;
|
||||
|
||||
let raw_data = key_config.data;
|
||||
let created = key_config.created;
|
||||
|
@ -3,21 +3,56 @@ use std::convert::TryFrom;
|
||||
use std::path::Path;
|
||||
|
||||
use serde_json::{json, Value};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::backup::BackupDir;
|
||||
use crate::backup::{BackupDir, CryptMode, CryptConfig};
|
||||
|
||||
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
||||
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
||||
|
||||
mod hex_csum {
|
||||
use serde::{self, Deserialize, Serializer, Deserializer};
|
||||
|
||||
pub fn serialize<S>(
|
||||
csum: &[u8; 32],
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let s = proxmox::tools::digest_to_hex(csum);
|
||||
serializer.serialize_str(&s)
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(
|
||||
deserializer: D,
|
||||
) -> Result<[u8; 32], D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
proxmox::tools::hex_to_digest(&s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
pub struct FileInfo {
|
||||
pub filename: String,
|
||||
pub crypt_mode: CryptMode,
|
||||
pub size: u64,
|
||||
#[serde(with = "hex_csum")]
|
||||
pub csum: [u8; 32],
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
pub struct BackupManifest {
|
||||
snapshot: BackupDir,
|
||||
backup_type: String,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
files: Vec<FileInfo>,
|
||||
pub unprotected: Value,
|
||||
}
|
||||
|
||||
#[derive(PartialEq)]
|
||||
@ -45,12 +80,18 @@ pub fn archive_type<P: AsRef<Path>>(
|
||||
impl BackupManifest {
|
||||
|
||||
pub fn new(snapshot: BackupDir) -> Self {
|
||||
Self { files: Vec::new(), snapshot }
|
||||
Self {
|
||||
backup_type: snapshot.group().backup_type().into(),
|
||||
backup_id: snapshot.group().backup_id().into(),
|
||||
backup_time: snapshot.backup_time().timestamp(),
|
||||
files: Vec::new(),
|
||||
unprotected: json!({}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32]) -> Result<(), Error> {
|
||||
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32], crypt_mode: CryptMode) -> Result<(), Error> {
|
||||
let _archive_type = archive_type(&filename)?; // check type
|
||||
self.files.push(FileInfo { filename, size, csum });
|
||||
self.files.push(FileInfo { filename, size, csum, crypt_mode });
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -83,24 +124,103 @@ impl BackupManifest {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn into_json(self) -> Value {
|
||||
json!({
|
||||
"backup-type": self.snapshot.group().backup_type(),
|
||||
"backup-id": self.snapshot.group().backup_id(),
|
||||
"backup-time": self.snapshot.backup_time().timestamp(),
|
||||
"files": self.files.iter()
|
||||
.fold(Vec::new(), |mut acc, info| {
|
||||
acc.push(json!({
|
||||
"filename": info.filename,
|
||||
"size": info.size,
|
||||
"csum": proxmox::tools::digest_to_hex(&info.csum),
|
||||
}));
|
||||
acc
|
||||
})
|
||||
})
|
||||
// Generate cannonical json
|
||||
fn to_canonical_json(value: &Value, output: &mut String) -> Result<(), Error> {
|
||||
match value {
|
||||
Value::Null => bail!("got unexpected null value"),
|
||||
Value::String(_) => {
|
||||
output.push_str(&serde_json::to_string(value)?);
|
||||
},
|
||||
Value::Number(_) => {
|
||||
output.push_str(&serde_json::to_string(value)?);
|
||||
}
|
||||
Value::Bool(_) => {
|
||||
output.push_str(&serde_json::to_string(value)?);
|
||||
},
|
||||
Value::Array(list) => {
|
||||
output.push('[');
|
||||
for (i, item) in list.iter().enumerate() {
|
||||
if i != 0 { output.push(','); }
|
||||
Self::to_canonical_json(item, output)?;
|
||||
}
|
||||
output.push(']');
|
||||
}
|
||||
Value::Object(map) => {
|
||||
output.push('{');
|
||||
let mut keys: Vec<String> = map.keys().map(|s| s.clone()).collect();
|
||||
keys.sort();
|
||||
for (i, key) in keys.iter().enumerate() {
|
||||
let item = map.get(key).unwrap();
|
||||
if i != 0 { output.push(','); }
|
||||
|
||||
output.push_str(&serde_json::to_string(&Value::String(key.clone()))?);
|
||||
output.push(':');
|
||||
Self::to_canonical_json(item, output)?;
|
||||
}
|
||||
output.push('}');
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Compute manifest signature
|
||||
///
|
||||
/// By generating a HMAC SHA256 over the canonical json
|
||||
/// representation, The 'unpreotected' property is excluded.
|
||||
pub fn signature(&self, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
|
||||
Self::json_signature(&serde_json::to_value(&self)?, crypt_config)
|
||||
}
|
||||
|
||||
fn json_signature(data: &Value, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
|
||||
|
||||
let mut signed_data = data.clone();
|
||||
|
||||
signed_data.as_object_mut().unwrap().remove("unprotected"); // exclude
|
||||
|
||||
let mut canonical = String::new();
|
||||
Self::to_canonical_json(&signed_data, &mut canonical)?;
|
||||
|
||||
let sig = crypt_config.compute_auth_tag(canonical.as_bytes());
|
||||
|
||||
Ok(sig)
|
||||
}
|
||||
|
||||
/// Converts the Manifest into json string, and add a signature if there is a crypt_config.
|
||||
pub fn to_string(&self, crypt_config: Option<&CryptConfig>) -> Result<String, Error> {
|
||||
|
||||
let mut manifest = serde_json::to_value(&self)?;
|
||||
|
||||
if let Some(crypt_config) = crypt_config {
|
||||
let sig = self.signature(crypt_config)?;
|
||||
manifest["signature"] = proxmox::tools::digest_to_hex(&sig).into();
|
||||
}
|
||||
|
||||
let manifest = serde_json::to_string_pretty(&manifest).unwrap().into();
|
||||
Ok(manifest)
|
||||
}
|
||||
|
||||
/// Try to read the manifest. This verifies the signature if there is a crypt_config.
|
||||
pub fn from_data(data: &[u8], crypt_config: Option<&CryptConfig>) -> Result<BackupManifest, Error> {
|
||||
let json: Value = serde_json::from_slice(data)?;
|
||||
let signature = json["signature"].as_str().map(String::from);
|
||||
|
||||
if let Some(ref crypt_config) = crypt_config {
|
||||
if let Some(signature) = signature {
|
||||
let expected_signature = proxmox::tools::digest_to_hex(&Self::json_signature(&json, crypt_config)?);
|
||||
if signature != expected_signature {
|
||||
bail!("wrong signature in manifest");
|
||||
}
|
||||
} else {
|
||||
// not signed: warn/fail?
|
||||
}
|
||||
}
|
||||
|
||||
let manifest: BackupManifest = serde_json::from_value(json)?;
|
||||
Ok(manifest)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl TryFrom<super::DataBlob> for BackupManifest {
|
||||
type Error = Error;
|
||||
|
||||
@ -109,40 +229,50 @@ impl TryFrom<super::DataBlob> for BackupManifest {
|
||||
.map_err(|err| format_err!("decode backup manifest blob failed - {}", err))?;
|
||||
let json: Value = serde_json::from_slice(&data[..])
|
||||
.map_err(|err| format_err!("unable to parse backup manifest json - {}", err))?;
|
||||
BackupManifest::try_from(json)
|
||||
let manifest: BackupManifest = serde_json::from_value(json)?;
|
||||
Ok(manifest)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Value> for BackupManifest {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(data: Value) -> Result<Self, Error> {
|
||||
#[test]
|
||||
fn test_manifest_signature() -> Result<(), Error> {
|
||||
|
||||
use crate::tools::{required_string_property, required_integer_property, required_array_property};
|
||||
use crate::backup::{KeyDerivationConfig};
|
||||
|
||||
proxmox::try_block!({
|
||||
let backup_type = required_string_property(&data, "backup-type")?;
|
||||
let backup_id = required_string_property(&data, "backup-id")?;
|
||||
let backup_time = required_integer_property(&data, "backup-time")?;
|
||||
let pw = b"test";
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let kdf = KeyDerivationConfig::Scrypt {
|
||||
n: 65536,
|
||||
r: 8,
|
||||
p: 1,
|
||||
salt: Vec::new(),
|
||||
};
|
||||
|
||||
let mut manifest = BackupManifest::new(snapshot);
|
||||
let testkey = kdf.derive_key(pw)?;
|
||||
|
||||
for item in required_array_property(&data, "files")?.iter() {
|
||||
let filename = required_string_property(item, "filename")?.to_owned();
|
||||
let csum = required_string_property(item, "csum")?;
|
||||
let csum = proxmox::tools::hex_to_digest(csum)?;
|
||||
let size = required_integer_property(item, "size")? as u64;
|
||||
manifest.add_file(filename, size, csum)?;
|
||||
}
|
||||
let crypt_config = CryptConfig::new(testkey)?;
|
||||
|
||||
if manifest.files().is_empty() {
|
||||
bail!("manifest does not list any files.");
|
||||
}
|
||||
let snapshot: BackupDir = "host/elsa/2020-06-26T13:56:05Z".parse()?;
|
||||
|
||||
Ok(manifest)
|
||||
}).map_err(|err: Error| format_err!("unable to parse backup manifest - {}", err))
|
||||
let mut manifest = BackupManifest::new(snapshot);
|
||||
|
||||
}
|
||||
manifest.add_file("test1.img.fidx".into(), 200, [1u8; 32], CryptMode::Encrypt)?;
|
||||
manifest.add_file("abc.blob".into(), 200, [2u8; 32], CryptMode::None)?;
|
||||
|
||||
manifest.unprotected["note"] = "This is not protected by the signature.".into();
|
||||
|
||||
let text = manifest.to_string(Some(&crypt_config))?;
|
||||
|
||||
let manifest: Value = serde_json::from_str(&text)?;
|
||||
let signature = manifest["signature"].as_str().unwrap().to_string();
|
||||
|
||||
assert_eq!(signature, "d7b446fb7db081662081d4b40fedd858a1d6307a5aff4ecff7d5bf4fd35679e9");
|
||||
|
||||
let manifest: BackupManifest = serde_json::from_value(manifest)?;
|
||||
let expected_signature = proxmox::tools::digest_to_hex(&manifest.signature(&crypt_config)?);
|
||||
|
||||
assert_eq!(signature, expected_signature);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,38 +1,39 @@
|
||||
use anyhow::{Error};
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::datastore::*;
|
||||
use super::crypt_config::*;
|
||||
use super::data_blob::*;
|
||||
use anyhow::Error;
|
||||
|
||||
use super::crypt_config::CryptConfig;
|
||||
use super::data_blob::DataBlob;
|
||||
use super::datastore::DataStore;
|
||||
|
||||
/// The ReadChunk trait allows reading backup data chunks (local or remote)
|
||||
pub trait ReadChunk {
|
||||
/// Returns the encoded chunk data
|
||||
fn read_raw_chunk(&mut self, digest:&[u8; 32]) -> Result<DataBlob, Error>;
|
||||
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error>;
|
||||
|
||||
/// Returns the decoded chunk data
|
||||
fn read_chunk(&mut self, digest:&[u8; 32]) -> Result<Vec<u8>, Error>;
|
||||
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error>;
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LocalChunkReader {
|
||||
store: Arc<DataStore>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
}
|
||||
|
||||
impl LocalChunkReader {
|
||||
|
||||
pub fn new(store: Arc<DataStore>, crypt_config: Option<Arc<CryptConfig>>) -> Self {
|
||||
Self { store, crypt_config }
|
||||
Self {
|
||||
store,
|
||||
crypt_config,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadChunk for LocalChunkReader {
|
||||
|
||||
fn read_raw_chunk(&mut self, digest:&[u8; 32]) -> Result<DataBlob, Error> {
|
||||
|
||||
let digest_str = proxmox::tools::digest_to_hex(digest);
|
||||
println!("READ CHUNK {}", digest_str);
|
||||
|
||||
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
let (path, _) = self.store.chunk_path(digest);
|
||||
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
||||
let chunk = DataBlob::from_raw(raw_data)?;
|
||||
@ -41,13 +42,59 @@ impl ReadChunk for LocalChunkReader {
|
||||
Ok(chunk)
|
||||
}
|
||||
|
||||
fn read_chunk(&mut self, digest:&[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||
let chunk = self.read_raw_chunk(digest)?;
|
||||
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
|
||||
Ok(raw_data)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait AsyncReadChunk: Send {
|
||||
/// Returns the encoded chunk data
|
||||
fn read_raw_chunk<'a>(
|
||||
&'a self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>>;
|
||||
|
||||
/// Returns the decoded chunk data
|
||||
fn read_chunk<'a>(
|
||||
&'a self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>>;
|
||||
}
|
||||
|
||||
impl AsyncReadChunk for LocalChunkReader {
|
||||
fn read_raw_chunk<'a>(
|
||||
&'a self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
||||
Box::pin(async move{
|
||||
let (path, _) = self.store.chunk_path(digest);
|
||||
|
||||
let raw_data = tokio::fs::read(&path).await?;
|
||||
let chunk = DataBlob::from_raw(raw_data)?;
|
||||
chunk.verify_crc()?;
|
||||
|
||||
Ok(chunk)
|
||||
})
|
||||
}
|
||||
|
||||
fn read_chunk<'a>(
|
||||
&'a self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
|
||||
Box::pin(async move {
|
||||
let chunk = AsyncReadChunk::read_raw_chunk(self, digest).await?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
|
||||
Ok(raw_data)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
196
src/backup/verify.rs
Normal file
196
src/backup/verify.rs
Normal file
@ -0,0 +1,196 @@
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use super::{
|
||||
DataStore, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
||||
ENCR_COMPR_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0,
|
||||
FileInfo, ArchiveType, archive_type,
|
||||
};
|
||||
|
||||
fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||
|
||||
let (blob, raw_size) = datastore.load_blob(backup_dir, &info.filename)?;
|
||||
|
||||
let csum = openssl::sha::sha256(blob.raw_data());
|
||||
if raw_size != info.size {
|
||||
bail!("wrong size ({} != {})", info.size, raw_size);
|
||||
}
|
||||
|
||||
if csum != info.csum {
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
blob.verify_crc()?;
|
||||
|
||||
let magic = blob.magic();
|
||||
|
||||
if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
blob.decode(None)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verify_index_chunks(
|
||||
datastore: &DataStore,
|
||||
index: Box<dyn IndexFile>,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let size = info.range.end - info.range.start;
|
||||
datastore.verify_stored_chunk(&info.digest, size)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verify_fixed_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
||||
|
||||
let mut path = backup_dir.relative_path();
|
||||
path.push(&info.filename);
|
||||
|
||||
let index = datastore.open_fixed_reader(&path)?;
|
||||
|
||||
let (csum, size) = index.compute_csum();
|
||||
if size != info.size {
|
||||
bail!("wrong size ({} != {})", info.size, size);
|
||||
}
|
||||
|
||||
if csum != info.csum {
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
verify_index_chunks(datastore, Box::new(index), worker)
|
||||
}
|
||||
|
||||
fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
||||
let mut path = backup_dir.relative_path();
|
||||
path.push(&info.filename);
|
||||
|
||||
let index = datastore.open_dynamic_reader(&path)?;
|
||||
|
||||
let (csum, size) = index.compute_csum();
|
||||
if size != info.size {
|
||||
bail!("wrong size ({} != {})", info.size, size);
|
||||
}
|
||||
|
||||
if csum != info.csum {
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
verify_index_chunks(datastore, Box::new(index), worker)
|
||||
}
|
||||
|
||||
/// Verify a single backup snapshot
|
||||
///
|
||||
/// This checks all archives inside a backup snapshot.
|
||||
/// Errors are logged to the worker log.
|
||||
///
|
||||
/// Returns
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
|
||||
let manifest = match datastore.load_manifest(&backup_dir) {
|
||||
Ok((manifest, _crypt_mode, _)) => manifest,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify {}:{}", datastore.name(), backup_dir));
|
||||
|
||||
let mut error_count = 0;
|
||||
|
||||
for info in manifest.files() {
|
||||
let result = proxmox::try_block!({
|
||||
worker.log(format!(" check {}", info.filename));
|
||||
match archive_type(&info.filename)? {
|
||||
ArchiveType::FixedIndex => verify_fixed_index(&datastore, &backup_dir, info, worker),
|
||||
ArchiveType::DynamicIndex => verify_dynamic_index(&datastore, &backup_dir, info, worker),
|
||||
ArchiveType::Blob => verify_blob(&datastore, &backup_dir, info),
|
||||
}
|
||||
});
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
|
||||
if let Err(err) = result {
|
||||
worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err));
|
||||
error_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(error_count == 0)
|
||||
}
|
||||
|
||||
/// Verify all backups inside a backup group
|
||||
///
|
||||
/// Errors are logged to the worker log.
|
||||
///
|
||||
/// Returns
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
|
||||
let mut list = match group.list_backups(&datastore.base_path()) {
|
||||
Ok(list) => list,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify group {}:{}", datastore.name(), group));
|
||||
|
||||
let mut error_count = 0;
|
||||
|
||||
BackupInfo::sort_list(&mut list, false); // newest first
|
||||
for info in list {
|
||||
if !verify_backup_dir(datastore, &info.backup_dir, worker)? {
|
||||
error_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(error_count == 0)
|
||||
}
|
||||
|
||||
/// Verify all backups inside a datastore
|
||||
///
|
||||
/// Errors are logged to the worker log.
|
||||
///
|
||||
/// Returns
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
|
||||
let list = match BackupGroup::list_groups(&datastore.base_path()) {
|
||||
Ok(list) => list,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err));
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify datastore {}", datastore.name()));
|
||||
|
||||
let mut error_count = 0;
|
||||
for group in list {
|
||||
if !verify_backup_group(datastore, &group, worker)? {
|
||||
error_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(error_count == 0)
|
||||
}
|
@ -14,6 +14,8 @@ use proxmox_backup::config;
|
||||
use proxmox_backup::buildcfg;
|
||||
|
||||
fn main() {
|
||||
proxmox_backup::tools::setup_safe_path_env();
|
||||
|
||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||
eprintln!("Error: {}", err);
|
||||
std::process::exit(-1);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -32,6 +32,24 @@ async fn view_task_result(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Note: local workers should print logs to stdout, so there is no need
|
||||
// to fetch/display logs. We just wait for the worker to finish.
|
||||
pub async fn wait_for_local_worker(upid_str: &str) -> Result<(), Error> {
|
||||
|
||||
let upid: proxmox_backup::server::UPID = upid_str.parse()?;
|
||||
|
||||
let sleep_duration = core::time::Duration::new(0, 100_000_000);
|
||||
|
||||
loop {
|
||||
if proxmox_backup::server::worker_is_active_local(&upid) {
|
||||
tokio::time::delay_for(sleep_duration).await;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn connect() -> Result<HttpClient, Error> {
|
||||
|
||||
let uid = nix::unistd::Uid::current();
|
||||
@ -301,11 +319,48 @@ async fn pull_datastore(
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"store": {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Verify backups
|
||||
async fn verify(
|
||||
store: String,
|
||||
param: Value,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let mut client = connect()?;
|
||||
|
||||
let args = json!({});
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/verify", store);
|
||||
|
||||
let result = client.post(&path, Some(args)).await?;
|
||||
|
||||
view_task_result(client, result, &output_format).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
fn main() {
|
||||
|
||||
proxmox_backup::tools::setup_safe_path_env();
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("acl", acl_commands())
|
||||
.insert("datastore", datastore_commands())
|
||||
.insert("disk", disk_commands())
|
||||
.insert("dns", dns_commands())
|
||||
.insert("network", network_commands())
|
||||
.insert("user", user_commands())
|
||||
@ -321,8 +376,16 @@ fn main() {
|
||||
.completion_cb("local-store", config::datastore::complete_datastore_name)
|
||||
.completion_cb("remote", config::remote::complete_remote_name)
|
||||
.completion_cb("remote-store", complete_remote_datastore_name)
|
||||
)
|
||||
.insert(
|
||||
"verify",
|
||||
CliCommand::new(&API_METHOD_VERIFY)
|
||||
.arg_param(&["store"])
|
||||
.completion_cb("store", config::datastore::complete_datastore_name)
|
||||
);
|
||||
|
||||
|
||||
|
||||
let mut rpcenv = CliEnvironment::new();
|
||||
rpcenv.set_user(Some(String::from("root@pam")));
|
||||
|
||||
|
@ -12,12 +12,14 @@ use proxmox::api::RpcEnvironmentType;
|
||||
use proxmox_backup::configdir;
|
||||
use proxmox_backup::buildcfg;
|
||||
use proxmox_backup::server;
|
||||
use proxmox_backup::tools::daemon;
|
||||
use proxmox_backup::tools::{daemon, epoch_now, epoch_now_u64};
|
||||
use proxmox_backup::server::{ApiConfig, rest::*};
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
|
||||
|
||||
fn main() {
|
||||
proxmox_backup::tools::setup_safe_path_env();
|
||||
|
||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||
eprintln!("Error: {}", err);
|
||||
std::process::exit(-1);
|
||||
@ -134,10 +136,10 @@ fn start_task_scheduler() {
|
||||
tokio::spawn(task.map(|_| ()));
|
||||
}
|
||||
|
||||
use std::time:: {Instant, Duration, SystemTime, UNIX_EPOCH};
|
||||
use std::time:: {Instant, Duration};
|
||||
|
||||
fn next_minute() -> Result<Instant, Error> {
|
||||
let epoch_now = SystemTime::now().duration_since(UNIX_EPOCH)?;
|
||||
let epoch_now = epoch_now()?;
|
||||
let epoch_next = Duration::from_secs((epoch_now.as_secs()/60 + 1)*60);
|
||||
Ok(Instant::now() + epoch_next - epoch_now)
|
||||
}
|
||||
@ -296,8 +298,9 @@ async fn schedule_datastore_garbage_collection() {
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
|
||||
Ok(epoch_now) => epoch_now.as_secs() as i64,
|
||||
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
@ -407,8 +410,8 @@ async fn schedule_datastore_prune() {
|
||||
}
|
||||
};
|
||||
|
||||
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
|
||||
Ok(epoch_now) => epoch_now.as_secs() as i64,
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
@ -532,8 +535,8 @@ async fn schedule_datastore_sync_jobs() {
|
||||
}
|
||||
};
|
||||
|
||||
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
|
||||
Ok(epoch_now) => epoch_now.as_secs() as i64,
|
||||
let now = match epoch_now_u64() {
|
||||
Ok(epoch_now) => epoch_now as i64,
|
||||
Err(err) => {
|
||||
eprintln!("query system time failed - {}", err);
|
||||
continue;
|
||||
@ -711,11 +714,11 @@ async fn generate_host_stats(save: bool) {
|
||||
fn gather_disk_stats(disk_manager: Arc<DiskManage>, path: &Path, rrd_prefix: &str, save: bool) {
|
||||
|
||||
match proxmox_backup::tools::disks::disk_usage(path) {
|
||||
Ok((total, used, _avail)) => {
|
||||
Ok(status) => {
|
||||
let rrd_key = format!("{}/total", rrd_prefix);
|
||||
rrd_update_gauge(&rrd_key, total as f64, save);
|
||||
rrd_update_gauge(&rrd_key, status.total as f64, save);
|
||||
let rrd_key = format!("{}/used", rrd_prefix);
|
||||
rrd_update_gauge(&rrd_key, used as f64, save);
|
||||
rrd_update_gauge(&rrd_key, status.used as f64, save);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("read disk_usage on {:?} failed - {}", path, err);
|
||||
|
81
src/bin/proxmox_backup_client/benchmark.rs
Normal file
81
src/bin/proxmox_backup_client/benchmark.rs
Normal file
@ -0,0 +1,81 @@
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{Error};
|
||||
use serde_json::Value;
|
||||
use chrono::{TimeZone, Utc};
|
||||
|
||||
use proxmox::api::{ApiMethod, RpcEnvironment};
|
||||
use proxmox::api::api;
|
||||
|
||||
use proxmox_backup::backup::{
|
||||
load_and_decrypt_key,
|
||||
CryptConfig,
|
||||
|
||||
};
|
||||
|
||||
use proxmox_backup::client::*;
|
||||
|
||||
use crate::{
|
||||
KEYFILE_SCHEMA, REPO_URL_SCHEMA,
|
||||
extract_repository_from_value,
|
||||
record_repository,
|
||||
connect,
|
||||
};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
keyfile: {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Run benchmark tests
|
||||
pub async fn benchmark(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||
|
||||
let crypt_config = match keyfile {
|
||||
None => None,
|
||||
Some(path) => {
|
||||
let (key, _) = load_and_decrypt_key(&path, &crate::key::get_encryption_key_password)?;
|
||||
let crypt_config = CryptConfig::new(key)?;
|
||||
Some(Arc::new(crypt_config))
|
||||
}
|
||||
};
|
||||
|
||||
let backup_time = Utc.timestamp(Utc::now().timestamp(), 0);
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
record_repository(&repo);
|
||||
|
||||
let client = BackupWriter::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
"host",
|
||||
"benshmark",
|
||||
backup_time,
|
||||
false,
|
||||
).await?;
|
||||
|
||||
println!("Start upload speed test");
|
||||
let speed = client.upload_speedtest().await?;
|
||||
|
||||
println!("Upload speed: {} MiB/s", speed);
|
||||
|
||||
Ok(())
|
||||
}
|
246
src/bin/proxmox_backup_client/catalog.rs
Normal file
246
src/bin/proxmox_backup_client/catalog.rs
Normal file
@ -0,0 +1,246 @@
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::io::{Seek, SeekFrom};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, cli::*};
|
||||
|
||||
use proxmox_backup::tools;
|
||||
|
||||
use proxmox_backup::client::*;
|
||||
|
||||
use crate::{
|
||||
REPO_URL_SCHEMA,
|
||||
extract_repository_from_value,
|
||||
record_repository,
|
||||
api_datastore_latest_snapshot,
|
||||
complete_repository,
|
||||
complete_backup_snapshot,
|
||||
complete_group_or_snapshot,
|
||||
complete_pxar_archive_name,
|
||||
connect,
|
||||
BackupDir,
|
||||
BackupGroup,
|
||||
BufferedDynamicReader,
|
||||
BufferedDynamicReadAt,
|
||||
CatalogReader,
|
||||
CATALOG_NAME,
|
||||
CryptConfig,
|
||||
DynamicIndexReader,
|
||||
IndexFile,
|
||||
Shell,
|
||||
};
|
||||
|
||||
use proxmox_backup::backup::load_and_decrypt_key;
|
||||
|
||||
use crate::key::get_encryption_key_password;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
snapshot: {
|
||||
type: String,
|
||||
description: "Snapshot path.",
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Dump catalog.
|
||||
async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
|
||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||
|
||||
let crypt_config = match keyfile {
|
||||
None => None,
|
||||
Some(path) => {
|
||||
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||
Some(Arc::new(CryptConfig::new(key)?))
|
||||
}
|
||||
};
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
|
||||
let client = BackupReader::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
&snapshot.group().backup_type(),
|
||||
&snapshot.group().backup_id(),
|
||||
snapshot.backup_time(),
|
||||
true,
|
||||
).await?;
|
||||
|
||||
let (manifest, _) = client.download_manifest().await?;
|
||||
|
||||
let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
|
||||
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||
|
||||
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
|
||||
let mut catalogfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
std::io::copy(&mut reader, &mut catalogfile)
|
||||
.map_err(|err| format_err!("unable to download catalog - {}", err))?;
|
||||
|
||||
catalogfile.seek(SeekFrom::Start(0))?;
|
||||
|
||||
let mut catalog_reader = CatalogReader::new(catalogfile);
|
||||
|
||||
catalog_reader.dump()?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"snapshot": {
|
||||
type: String,
|
||||
description: "Group/Snapshot path.",
|
||||
},
|
||||
"archive-name": {
|
||||
type: String,
|
||||
description: "Backup archive name.",
|
||||
},
|
||||
"repository": {
|
||||
optional: true,
|
||||
schema: REPO_URL_SCHEMA,
|
||||
},
|
||||
"keyfile": {
|
||||
optional: true,
|
||||
type: String,
|
||||
description: "Path to encryption key.",
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Shell to interactively inspect and restore snapshots.
|
||||
async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||
|
||||
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||
let group: BackupGroup = path.parse()?;
|
||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||
} else {
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||
};
|
||||
|
||||
let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
|
||||
let crypt_config = match keyfile {
|
||||
None => None,
|
||||
Some(path) => {
|
||||
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||
Some(Arc::new(CryptConfig::new(key)?))
|
||||
}
|
||||
};
|
||||
|
||||
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||
format!("{}.didx", archive_name)
|
||||
} else {
|
||||
bail!("Can only mount pxar archives.");
|
||||
};
|
||||
|
||||
let client = BackupReader::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
&backup_type,
|
||||
&backup_id,
|
||||
backup_time,
|
||||
true,
|
||||
).await?;
|
||||
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let (manifest, _) = client.download_manifest().await?;
|
||||
|
||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let archive_size = reader.archive_size();
|
||||
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||
|
||||
client.download(CATALOG_NAME, &mut tmpfile).await?;
|
||||
let index = DynamicIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read catalog index - {}", err))?;
|
||||
|
||||
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(CATALOG_NAME, &csum, size)?;
|
||||
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let mut catalogfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
std::io::copy(&mut reader, &mut catalogfile)
|
||||
.map_err(|err| format_err!("unable to download catalog - {}", err))?;
|
||||
|
||||
catalogfile.seek(SeekFrom::Start(0))?;
|
||||
let catalog_reader = CatalogReader::new(catalogfile);
|
||||
let state = Shell::new(
|
||||
catalog_reader,
|
||||
&server_archive_name,
|
||||
decoder,
|
||||
).await?;
|
||||
|
||||
println!("Starting interactive shell");
|
||||
state.shell().await?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn catalog_mgmt_cli() -> CliCommandMap {
|
||||
let catalog_shell_cmd_def = CliCommand::new(&API_METHOD_CATALOG_SHELL)
|
||||
.arg_param(&["snapshot", "archive-name"])
|
||||
.completion_cb("repository", complete_repository)
|
||||
.completion_cb("archive-name", complete_pxar_archive_name)
|
||||
.completion_cb("snapshot", complete_group_or_snapshot);
|
||||
|
||||
let catalog_dump_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
|
||||
.arg_param(&["snapshot"])
|
||||
.completion_cb("repository", complete_repository)
|
||||
.completion_cb("snapshot", complete_backup_snapshot);
|
||||
|
||||
CliCommandMap::new()
|
||||
.insert("dump", catalog_dump_cmd_def)
|
||||
.insert("shell", catalog_shell_cmd_def)
|
||||
}
|
274
src/bin/proxmox_backup_client/key.rs
Normal file
274
src/bin/proxmox_backup_client/key.rs
Normal file
@ -0,0 +1,274 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::{Local, TimeZone};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::cli::{CliCommand, CliCommandMap};
|
||||
use proxmox::sys::linux::tty;
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
|
||||
use proxmox_backup::backup::{
|
||||
encrypt_key_with_passphrase, load_and_decrypt_key, store_key_config, KeyConfig,
|
||||
};
|
||||
use proxmox_backup::tools;
|
||||
|
||||
pub const DEFAULT_ENCRYPTION_KEY_FILE_NAME: &str = "encryption-key.json";
|
||||
pub const MASTER_PUBKEY_FILE_NAME: &str = "master-public.pem";
|
||||
|
||||
pub fn find_master_pubkey() -> Result<Option<PathBuf>, Error> {
|
||||
super::find_xdg_file(MASTER_PUBKEY_FILE_NAME, "main public key file")
|
||||
}
|
||||
|
||||
pub fn place_master_pubkey() -> Result<PathBuf, Error> {
|
||||
super::place_xdg_file(MASTER_PUBKEY_FILE_NAME, "main public key file")
|
||||
}
|
||||
|
||||
pub fn find_default_encryption_key() -> Result<Option<PathBuf>, Error> {
|
||||
super::find_xdg_file(DEFAULT_ENCRYPTION_KEY_FILE_NAME, "default encryption key file")
|
||||
}
|
||||
|
||||
pub fn place_default_encryption_key() -> Result<PathBuf, Error> {
|
||||
super::place_xdg_file(DEFAULT_ENCRYPTION_KEY_FILE_NAME, "default encryption key file")
|
||||
}
|
||||
|
||||
pub fn read_optional_default_encryption_key() -> Result<Option<Vec<u8>>, Error> {
|
||||
find_default_encryption_key()?
|
||||
.map(file_get_contents)
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
|
||||
// fixme: implement other input methods
|
||||
|
||||
use std::env::VarError::*;
|
||||
match std::env::var("PBS_ENCRYPTION_PASSWORD") {
|
||||
Ok(p) => return Ok(p.as_bytes().to_vec()),
|
||||
Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
|
||||
Err(NotPresent) => {
|
||||
// Try another method
|
||||
}
|
||||
}
|
||||
|
||||
// If we're on a TTY, query the user for a password
|
||||
if tty::stdin_isatty() {
|
||||
return Ok(tty::read_password("Encryption Key Password: ")?);
|
||||
}
|
||||
|
||||
bail!("no password input mechanism available");
|
||||
}
|
||||
|
||||
#[api(
|
||||
default: "scrypt",
|
||||
)]
|
||||
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Key derivation function for password protected encryption keys.
|
||||
pub enum Kdf {
|
||||
/// Do not encrypt the key.
|
||||
None,
|
||||
|
||||
/// Encrypt they key with a password using SCrypt.
|
||||
Scrypt,
|
||||
}
|
||||
|
||||
impl Default for Kdf {
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
Kdf::Scrypt
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
kdf: {
|
||||
type: Kdf,
|
||||
optional: true,
|
||||
},
|
||||
path: {
|
||||
description:
|
||||
"Output file. Without this the key will become the new default encryption key.",
|
||||
optional: true,
|
||||
}
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Create a new encryption key.
|
||||
fn create(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
||||
let path = match path {
|
||||
Some(path) => PathBuf::from(path),
|
||||
None => place_default_encryption_key()?,
|
||||
};
|
||||
|
||||
let kdf = kdf.unwrap_or_default();
|
||||
|
||||
let key = proxmox::sys::linux::random_data(32)?;
|
||||
|
||||
match kdf {
|
||||
Kdf::None => {
|
||||
let created = Local.timestamp(Local::now().timestamp(), 0);
|
||||
|
||||
store_key_config(
|
||||
&path,
|
||||
false,
|
||||
KeyConfig {
|
||||
kdf: None,
|
||||
created,
|
||||
modified: created,
|
||||
data: key,
|
||||
},
|
||||
)?;
|
||||
}
|
||||
Kdf::Scrypt => {
|
||||
// always read passphrase from tty
|
||||
if !tty::stdin_isatty() {
|
||||
bail!("unable to read passphrase - no tty");
|
||||
}
|
||||
|
||||
let password = tty::read_and_verify_password("Encryption Key Password: ")?;
|
||||
|
||||
let key_config = encrypt_key_with_passphrase(&key, &password)?;
|
||||
|
||||
store_key_config(&path, false, key_config)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
kdf: {
|
||||
type: Kdf,
|
||||
optional: true,
|
||||
},
|
||||
path: {
|
||||
description: "Key file. Without this the default key's password will be changed.",
|
||||
optional: true,
|
||||
}
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Change the encryption key's password.
|
||||
fn change_passphrase(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
||||
let path = match path {
|
||||
Some(path) => PathBuf::from(path),
|
||||
None => find_default_encryption_key()?
|
||||
.ok_or_else(|| format_err!("no encryption file provided and no default file found"))?,
|
||||
};
|
||||
|
||||
let kdf = kdf.unwrap_or_default();
|
||||
|
||||
if !tty::stdin_isatty() {
|
||||
bail!("unable to change passphrase - no tty");
|
||||
}
|
||||
|
||||
let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||
|
||||
match kdf {
|
||||
Kdf::None => {
|
||||
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
||||
|
||||
store_key_config(
|
||||
&path,
|
||||
true,
|
||||
KeyConfig {
|
||||
kdf: None,
|
||||
created, // keep original value
|
||||
modified,
|
||||
data: key.to_vec(),
|
||||
},
|
||||
)?;
|
||||
}
|
||||
Kdf::Scrypt => {
|
||||
let password = tty::read_and_verify_password("New Password: ")?;
|
||||
|
||||
let mut new_key_config = encrypt_key_with_passphrase(&key, &password)?;
|
||||
new_key_config.created = created; // keep original value
|
||||
|
||||
store_key_config(&path, true, new_key_config)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
path: {
|
||||
description: "Path to the PEM formatted RSA public key.",
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Import an RSA public key used to put an encrypted version of the symmetric backup encryption
|
||||
/// key onto the backup server along with each backup.
|
||||
fn import_master_pubkey(path: String) -> Result<(), Error> {
|
||||
let pem_data = file_get_contents(&path)?;
|
||||
|
||||
if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
|
||||
bail!("Unable to decode PEM data - {}", err);
|
||||
}
|
||||
|
||||
let target_path = place_master_pubkey()?;
|
||||
|
||||
replace_file(&target_path, &pem_data, CreateOptions::new())?;
|
||||
|
||||
println!("Imported public master key to {:?}", target_path);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api]
|
||||
/// Create an RSA public/private key pair used to put an encrypted version of the symmetric backup
|
||||
/// encryption key onto the backup server along with each backup.
|
||||
fn create_master_key() -> Result<(), Error> {
|
||||
// we need a TTY to query the new password
|
||||
if !tty::stdin_isatty() {
|
||||
bail!("unable to create master key - no tty");
|
||||
}
|
||||
|
||||
let rsa = openssl::rsa::Rsa::generate(4096)?;
|
||||
let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
|
||||
|
||||
let password = String::from_utf8(tty::read_and_verify_password("Master Key Password: ")?)?;
|
||||
|
||||
let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
|
||||
let filename_pub = "master-public.pem";
|
||||
println!("Writing public master key to {}", filename_pub);
|
||||
replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new())?;
|
||||
|
||||
let cipher = openssl::symm::Cipher::aes_256_cbc();
|
||||
let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, password.as_bytes())?;
|
||||
|
||||
let filename_priv = "master-private.pem";
|
||||
println!("Writing private master key to {}", filename_priv);
|
||||
replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn cli() -> CliCommandMap {
|
||||
let key_create_cmd_def = CliCommand::new(&API_METHOD_CREATE)
|
||||
.arg_param(&["path"])
|
||||
.completion_cb("path", tools::complete_file_name);
|
||||
|
||||
let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_CHANGE_PASSPHRASE)
|
||||
.arg_param(&["path"])
|
||||
.completion_cb("path", tools::complete_file_name);
|
||||
|
||||
let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_CREATE_MASTER_KEY);
|
||||
let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_IMPORT_MASTER_PUBKEY)
|
||||
.arg_param(&["path"])
|
||||
.completion_cb("path", tools::complete_file_name);
|
||||
|
||||
CliCommandMap::new()
|
||||
.insert("create", key_create_cmd_def)
|
||||
.insert("create-master-key", key_create_master_key_cmd_def)
|
||||
.insert("import-master-pubkey", key_import_master_pubkey_cmd_def)
|
||||
.insert("change-passphrase", key_change_passphrase_cmd_def)
|
||||
}
|
39
src/bin/proxmox_backup_client/mod.rs
Normal file
39
src/bin/proxmox_backup_client/mod.rs
Normal file
@ -0,0 +1,39 @@
|
||||
use anyhow::{Context, Error};
|
||||
|
||||
mod benchmark;
|
||||
pub use benchmark::*;
|
||||
mod mount;
|
||||
pub use mount::*;
|
||||
mod task;
|
||||
pub use task::*;
|
||||
mod catalog;
|
||||
pub use catalog::*;
|
||||
|
||||
pub mod key;
|
||||
|
||||
pub fn base_directories() -> Result<xdg::BaseDirectories, Error> {
|
||||
xdg::BaseDirectories::with_prefix("proxmox-backup").map_err(Error::from)
|
||||
}
|
||||
|
||||
/// Convenience helper for better error messages:
|
||||
pub fn find_xdg_file(
|
||||
file_name: impl AsRef<std::path::Path>,
|
||||
description: &'static str,
|
||||
) -> Result<Option<std::path::PathBuf>, Error> {
|
||||
let file_name = file_name.as_ref();
|
||||
base_directories()
|
||||
.map(|base| base.find_config_file(file_name))
|
||||
.with_context(|| format!("error searching for {}", description))
|
||||
}
|
||||
|
||||
pub fn place_xdg_file(
|
||||
file_name: impl AsRef<std::path::Path>,
|
||||
description: &'static str,
|
||||
) -> Result<std::path::PathBuf, Error> {
|
||||
let file_name = file_name.as_ref();
|
||||
base_directories()
|
||||
.and_then(|base| {
|
||||
base.place_config_file(file_name).map_err(Error::from)
|
||||
})
|
||||
.with_context(|| format!("failed to place {} in xdg home", description))
|
||||
}
|
195
src/bin/proxmox_backup_client/mount.rs
Normal file
195
src/bin/proxmox_backup_client/mount.rs
Normal file
@ -0,0 +1,195 @@
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::Path;
|
||||
use std::ffi::OsStr;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::Value;
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
use nix::unistd::{fork, ForkResult, pipe};
|
||||
use futures::select;
|
||||
use futures::future::FutureExt;
|
||||
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment, schema::*, cli::*};
|
||||
|
||||
|
||||
use proxmox_backup::tools;
|
||||
use proxmox_backup::backup::{
|
||||
load_and_decrypt_key,
|
||||
CryptConfig,
|
||||
IndexFile,
|
||||
BackupDir,
|
||||
BackupGroup,
|
||||
BufferedDynamicReader,
|
||||
};
|
||||
|
||||
use proxmox_backup::client::*;
|
||||
|
||||
use crate::{
|
||||
REPO_URL_SCHEMA,
|
||||
extract_repository_from_value,
|
||||
complete_pxar_archive_name,
|
||||
complete_group_or_snapshot,
|
||||
complete_repository,
|
||||
record_repository,
|
||||
connect,
|
||||
api_datastore_latest_snapshot,
|
||||
BufferedDynamicReadAt,
|
||||
};
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&mount),
|
||||
&ObjectSchema::new(
|
||||
"Mount pxar archive.",
|
||||
&sorted!([
|
||||
("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
|
||||
("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
|
||||
("target", false, &StringSchema::new("Target directory path.").schema()),
|
||||
("repository", true, &REPO_URL_SCHEMA),
|
||||
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
|
||||
("verbose", true, &BooleanSchema::new("Verbose output.").default(false).schema()),
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
pub fn mount_cmd_def() -> CliCommand {
|
||||
|
||||
CliCommand::new(&API_METHOD_MOUNT)
|
||||
.arg_param(&["snapshot", "archive-name", "target"])
|
||||
.completion_cb("repository", complete_repository)
|
||||
.completion_cb("snapshot", complete_group_or_snapshot)
|
||||
.completion_cb("archive-name", complete_pxar_archive_name)
|
||||
.completion_cb("target", tools::complete_file_name)
|
||||
}
|
||||
|
||||
fn mount(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||
if verbose {
|
||||
// This will stay in foreground with debug output enabled as None is
|
||||
// passed for the RawFd.
|
||||
return proxmox_backup::tools::runtime::main(mount_do(param, None));
|
||||
}
|
||||
|
||||
// Process should be deamonized.
|
||||
// Make sure to fork before the async runtime is instantiated to avoid troubles.
|
||||
let pipe = pipe()?;
|
||||
match fork() {
|
||||
Ok(ForkResult::Parent { .. }) => {
|
||||
nix::unistd::close(pipe.1).unwrap();
|
||||
// Blocks the parent process until we are ready to go in the child
|
||||
let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
|
||||
Ok(Value::Null)
|
||||
}
|
||||
Ok(ForkResult::Child) => {
|
||||
nix::unistd::close(pipe.0).unwrap();
|
||||
nix::unistd::setsid().unwrap();
|
||||
proxmox_backup::tools::runtime::main(mount_do(param, Some(pipe.1)))
|
||||
}
|
||||
Err(_) => bail!("failed to daemonize process"),
|
||||
}
|
||||
}
|
||||
|
||||
async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||
let target = tools::required_string_param(¶m, "target")?;
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||
let group: BackupGroup = path.parse()?;
|
||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||
} else {
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||
};
|
||||
|
||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||
let crypt_config = match keyfile {
|
||||
None => None,
|
||||
Some(path) => {
|
||||
let (key, _) = load_and_decrypt_key(&path, &crate::key::get_encryption_key_password)?;
|
||||
Some(Arc::new(CryptConfig::new(key)?))
|
||||
}
|
||||
};
|
||||
|
||||
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||
format!("{}.didx", archive_name)
|
||||
} else {
|
||||
bail!("Can only mount pxar archives.");
|
||||
};
|
||||
|
||||
let client = BackupReader::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
&backup_type,
|
||||
&backup_id,
|
||||
backup_time,
|
||||
true,
|
||||
).await?;
|
||||
|
||||
let (manifest, _) = client.download_manifest().await?;
|
||||
|
||||
if server_archive_name.ends_with(".didx") {
|
||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let archive_size = reader.archive_size();
|
||||
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||
let options = OsStr::new("ro,default_permissions");
|
||||
|
||||
let session = proxmox_backup::pxar::fuse::Session::mount(
|
||||
decoder,
|
||||
&options,
|
||||
false,
|
||||
Path::new(target),
|
||||
)
|
||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||
|
||||
if let Some(pipe) = pipe {
|
||||
nix::unistd::chdir(Path::new("/")).unwrap();
|
||||
// Finish creation of daemon by redirecting filedescriptors.
|
||||
let nullfd = nix::fcntl::open(
|
||||
"/dev/null",
|
||||
nix::fcntl::OFlag::O_RDWR,
|
||||
nix::sys::stat::Mode::empty(),
|
||||
).unwrap();
|
||||
nix::unistd::dup2(nullfd, 0).unwrap();
|
||||
nix::unistd::dup2(nullfd, 1).unwrap();
|
||||
nix::unistd::dup2(nullfd, 2).unwrap();
|
||||
if nullfd > 2 {
|
||||
nix::unistd::close(nullfd).unwrap();
|
||||
}
|
||||
// Signal the parent process that we are done with the setup and it can
|
||||
// terminate.
|
||||
nix::unistd::write(pipe, &[0u8])?;
|
||||
nix::unistd::close(pipe).unwrap();
|
||||
}
|
||||
|
||||
let mut interrupt = signal(SignalKind::interrupt())?;
|
||||
select! {
|
||||
res = session.fuse() => res?,
|
||||
_ = interrupt.recv().fuse() => {
|
||||
// exit on interrupted
|
||||
}
|
||||
}
|
||||
} else {
|
||||
bail!("unknown archive file extension (expected .pxar)");
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
148
src/bin/proxmox_backup_client/task.rs
Normal file
148
src/bin/proxmox_backup_client/task.rs
Normal file
@ -0,0 +1,148 @@
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, cli::*};
|
||||
|
||||
use proxmox_backup::tools;
|
||||
|
||||
use proxmox_backup::client::*;
|
||||
use proxmox_backup::api2::types::UPID_SCHEMA;
|
||||
|
||||
use crate::{
|
||||
REPO_URL_SCHEMA,
|
||||
extract_repository_from_value,
|
||||
complete_repository,
|
||||
connect,
|
||||
};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
limit: {
|
||||
description: "The maximal number of tasks to list.",
|
||||
type: Integer,
|
||||
optional: true,
|
||||
minimum: 1,
|
||||
maximum: 1000,
|
||||
default: 50,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
all: {
|
||||
type: Boolean,
|
||||
description: "Also list stopped tasks.",
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// List running server tasks for this repo user
|
||||
async fn task_list(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
|
||||
let limit = param["limit"].as_u64().unwrap_or(50) as usize;
|
||||
let running = !param["all"].as_bool().unwrap_or(false);
|
||||
|
||||
let args = json!({
|
||||
"running": running,
|
||||
"start": 0,
|
||||
"limit": limit,
|
||||
"userfilter": repo.user(),
|
||||
"store": repo.store(),
|
||||
});
|
||||
|
||||
let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
|
||||
let mut data = result["data"].take();
|
||||
|
||||
let schema = &proxmox_backup::api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
|
||||
.column(ColumnConfig::new("endtime").right_align(false).renderer(tools::format::render_epoch))
|
||||
.column(ColumnConfig::new("upid"))
|
||||
.column(ColumnConfig::new("status").renderer(tools::format::render_task_status));
|
||||
|
||||
format_and_print_result_full(&mut data, schema, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
upid: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Display the task log.
|
||||
async fn task_log(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let upid = tools::required_string_param(¶m, "upid")?;
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
|
||||
display_task_log(client, upid, true).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
upid: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Try to stop a specific task.
|
||||
async fn task_stop(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let upid_str = tools::required_string_param(¶m, "upid")?;
|
||||
|
||||
let mut client = connect(repo.host(), repo.user())?;
|
||||
|
||||
let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
|
||||
let _ = client.delete(&path, None).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn task_mgmt_cli() -> CliCommandMap {
|
||||
|
||||
let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
|
||||
.completion_cb("repository", complete_repository);
|
||||
|
||||
let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
|
||||
.arg_param(&["upid"]);
|
||||
|
||||
let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
|
||||
.arg_param(&["upid"]);
|
||||
|
||||
CliCommandMap::new()
|
||||
.insert("log", task_log_cmd_def)
|
||||
.insert("list", task_list_cmd_def)
|
||||
.insert("stop", task_stop_cmd_def)
|
||||
}
|
@ -86,7 +86,7 @@ pub fn datastore_commands() -> CommandLineInterface {
|
||||
.completion_cb("name", config::datastore::complete_datastore_name)
|
||||
.completion_cb("gc-schedule", config::datastore::complete_calendar_event)
|
||||
.completion_cb("prune-schedule", config::datastore::complete_calendar_event)
|
||||
)
|
||||
)
|
||||
.insert("remove",
|
||||
CliCommand::new(&api2::config::datastore::API_METHOD_DELETE_DATASTORE)
|
||||
.arg_param(&["name"])
|
||||
|
353
src/bin/proxmox_backup_manager/disk.rs
Normal file
353
src/bin/proxmox_backup_manager/disk.rs
Normal file
@ -0,0 +1,353 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, cli::*, RpcEnvironment, ApiHandler};
|
||||
|
||||
use proxmox_backup::tools::disks::{
|
||||
FileSystemType,
|
||||
SmartAttribute,
|
||||
complete_disk_name,
|
||||
};
|
||||
|
||||
use proxmox_backup::api2::node::disks::{
|
||||
zfs::DISK_LIST_SCHEMA,
|
||||
zfs::ZFS_ASHIFT_SCHEMA,
|
||||
zfs::ZfsRaidLevel,
|
||||
zfs::ZfsCompressionType,
|
||||
};
|
||||
|
||||
use proxmox_backup::api2::{self, types::* };
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Local disk list.
|
||||
fn list_disks(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::API_METHOD_LIST_DISKS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let render_wearout = |value: &Value, _record: &Value| -> Result<String, Error> {
|
||||
match value.as_f64() {
|
||||
Some(value) => Ok(format!("{:.2} %", if value <= 100.0 { 100.0 - value } else { 0.0 })),
|
||||
None => Ok(String::from("-")),
|
||||
}
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("name"))
|
||||
.column(ColumnConfig::new("used"))
|
||||
.column(ColumnConfig::new("gpt"))
|
||||
.column(ColumnConfig::new("disk-type"))
|
||||
.column(ColumnConfig::new("size"))
|
||||
.column(ColumnConfig::new("model"))
|
||||
.column(ColumnConfig::new("wearout").renderer(render_wearout))
|
||||
.column(ColumnConfig::new("status"))
|
||||
;
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
},
|
||||
returns: {
|
||||
description: "SMART attributes.",
|
||||
type: Array,
|
||||
items: {
|
||||
type: SmartAttribute,
|
||||
},
|
||||
}
|
||||
)]
|
||||
/// Show SMART attributes.
|
||||
fn smart_attributes(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::API_METHOD_SMART_STATUS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let mut data = data["attributes"].take();
|
||||
|
||||
let options = default_table_format_options();
|
||||
format_and_print_result_full(&mut data, API_METHOD_SMART_ATTRIBUTES.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
uuid: {
|
||||
description: "UUID for the GPT table.",
|
||||
type: String,
|
||||
optional: true,
|
||||
max_length: 36,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Initialize empty Disk with GPT
|
||||
async fn initialize_disk(
|
||||
mut param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::API_METHOD_INITIALIZE_DISK;
|
||||
let result = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
crate::wait_for_local_worker(result.as_str().unwrap()).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
devices: {
|
||||
schema: DISK_LIST_SCHEMA,
|
||||
},
|
||||
raidlevel: {
|
||||
type: ZfsRaidLevel,
|
||||
},
|
||||
ashift: {
|
||||
schema: ZFS_ASHIFT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
compression: {
|
||||
type: ZfsCompressionType,
|
||||
optional: true,
|
||||
},
|
||||
"add-datastore": {
|
||||
description: "Configure a datastore using the zpool.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// create a zfs pool
|
||||
async fn create_zpool(
|
||||
mut param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::zfs::API_METHOD_CREATE_ZPOOL;
|
||||
let result = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
crate::wait_for_local_worker(result.as_str().unwrap()).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Local zfs pools.
|
||||
fn list_zpools(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::zfs::API_METHOD_LIST_ZPOOLS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let render_usage = |value: &Value, record: &Value| -> Result<String, Error> {
|
||||
let value = value.as_u64().unwrap_or(0);
|
||||
let size = match record["size"].as_u64() {
|
||||
Some(size) => size,
|
||||
None => bail!("missing size property"),
|
||||
};
|
||||
if size == 0 {
|
||||
bail!("got zero size");
|
||||
}
|
||||
Ok(format!("{:.2} %", (value as f64)/(size as f64)))
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("name"))
|
||||
.column(ColumnConfig::new("size"))
|
||||
.column(ColumnConfig::new("alloc").right_align(true).renderer(render_usage))
|
||||
.column(ColumnConfig::new("health"));
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn zpool_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&API_METHOD_LIST_ZPOOLS))
|
||||
.insert("create",
|
||||
CliCommand::new(&API_METHOD_CREATE_ZPOOL)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("devices", complete_disk_name) // fixme: comlete the list
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// List systemd datastore mount units.
|
||||
fn list_datastore_mounts(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::directory::API_METHOD_LIST_DATASTORE_MOUNTS;
|
||||
let mut data = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("path"))
|
||||
.column(ColumnConfig::new("device"))
|
||||
.column(ColumnConfig::new("filesystem"))
|
||||
.column(ColumnConfig::new("options"));
|
||||
|
||||
format_and_print_result_full(&mut data, info.returns, &output_format, &options);
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
disk: {
|
||||
schema: BLOCKDEVICE_NAME_SCHEMA,
|
||||
},
|
||||
"add-datastore": {
|
||||
description: "Configure a datastore using the directory.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
filesystem: {
|
||||
type: FileSystemType,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Create a Filesystem on an unused disk. Will be mounted under '/mnt/datastore/<name>'.
|
||||
async fn create_datastore_disk(
|
||||
mut param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
param["node"] = "localhost".into();
|
||||
|
||||
let info = &api2::node::disks::directory::API_METHOD_CREATE_DATASTORE_DISK;
|
||||
let result = match info.handler {
|
||||
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
crate::wait_for_local_worker(result.as_str().unwrap()).await?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
pub fn filesystem_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&API_METHOD_LIST_DATASTORE_MOUNTS))
|
||||
.insert("create",
|
||||
CliCommand::new(&API_METHOD_CREATE_DATASTORE_DISK)
|
||||
.arg_param(&["name"])
|
||||
.completion_cb("disk", complete_disk_name)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
||||
|
||||
pub fn disk_commands() -> CommandLineInterface {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("list", CliCommand::new(&API_METHOD_LIST_DISKS))
|
||||
.insert("smart-attributes",
|
||||
CliCommand::new(&API_METHOD_SMART_ATTRIBUTES)
|
||||
.arg_param(&["disk"])
|
||||
.completion_cb("disk", complete_disk_name)
|
||||
)
|
||||
.insert("fs", filesystem_commands())
|
||||
.insert("zpool", zpool_commands())
|
||||
.insert("initialize",
|
||||
CliCommand::new(&API_METHOD_INITIALIZE_DISK)
|
||||
.arg_param(&["disk"])
|
||||
.completion_cb("disk", complete_disk_name)
|
||||
);
|
||||
|
||||
cmd_def.into()
|
||||
}
|
@ -14,3 +14,5 @@ mod sync;
|
||||
pub use sync::*;
|
||||
mod user;
|
||||
pub use user::*;
|
||||
mod disk;
|
||||
pub use disk::*;
|
||||
|
800
src/bin/pxar.rs
800
src/bin/pxar.rs
@ -1,191 +1,305 @@
|
||||
extern crate proxmox_backup;
|
||||
use std::collections::HashSet;
|
||||
use std::ffi::OsStr;
|
||||
use std::fs::OpenOptions;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::future::FutureExt;
|
||||
use futures::select;
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
|
||||
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
|
||||
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::api::cli::*;
|
||||
use proxmox::api::api;
|
||||
|
||||
use proxmox_backup::tools;
|
||||
|
||||
use serde_json::{Value};
|
||||
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::fs::OpenOptions;
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use proxmox_backup::pxar;
|
||||
|
||||
fn dump_archive_from_reader<R: std::io::Read>(
|
||||
reader: &mut R,
|
||||
feature_flags: u64,
|
||||
verbose: bool,
|
||||
) -> Result<(), Error> {
|
||||
let mut decoder = pxar::SequentialDecoder::new(reader, feature_flags);
|
||||
|
||||
let stdout = std::io::stdout();
|
||||
let mut out = stdout.lock();
|
||||
|
||||
let mut path = PathBuf::new();
|
||||
decoder.dump_entry(&mut path, verbose, &mut out)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn dump_archive(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let archive = tools::required_string_param(¶m, "archive")?;
|
||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||
|
||||
let feature_flags = pxar::flags::DEFAULT;
|
||||
|
||||
if archive == "-" {
|
||||
let stdin = std::io::stdin();
|
||||
let mut reader = stdin.lock();
|
||||
dump_archive_from_reader(&mut reader, feature_flags, verbose)?;
|
||||
} else {
|
||||
if verbose { println!("PXAR dump: {}", archive); }
|
||||
let file = std::fs::File::open(archive)?;
|
||||
let mut reader = std::io::BufReader::new(file);
|
||||
dump_archive_from_reader(&mut reader, feature_flags, verbose)?;
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
use proxmox_backup::pxar::{fuse, format_single_line_entry, ENCODER_MAX_ENTRIES, Flags};
|
||||
|
||||
fn extract_archive_from_reader<R: std::io::Read>(
|
||||
reader: &mut R,
|
||||
target: &str,
|
||||
feature_flags: u64,
|
||||
feature_flags: Flags,
|
||||
allow_existing_dirs: bool,
|
||||
verbose: bool,
|
||||
pattern: Option<Vec<pxar::MatchPattern>>
|
||||
match_list: &[MatchEntry],
|
||||
) -> Result<(), Error> {
|
||||
let mut decoder = pxar::SequentialDecoder::new(reader, feature_flags);
|
||||
decoder.set_callback(move |path| {
|
||||
if verbose {
|
||||
println!("{:?}", path);
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
decoder.set_allow_existing_dirs(allow_existing_dirs);
|
||||
|
||||
let pattern = pattern.unwrap_or_else(Vec::new);
|
||||
decoder.restore(Path::new(target), &pattern)?;
|
||||
|
||||
Ok(())
|
||||
proxmox_backup::pxar::extract_archive(
|
||||
pxar::decoder::Decoder::from_std(reader)?,
|
||||
Path::new(target),
|
||||
&match_list,
|
||||
feature_flags,
|
||||
allow_existing_dirs,
|
||||
|path| {
|
||||
if verbose {
|
||||
println!("{:?}", path);
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
archive: {
|
||||
description: "Archive name.",
|
||||
},
|
||||
pattern: {
|
||||
description: "List of paths or pattern matching files to restore",
|
||||
type: Array,
|
||||
items: {
|
||||
type: String,
|
||||
description: "Path or pattern matching files to restore.",
|
||||
},
|
||||
optional: true,
|
||||
},
|
||||
target: {
|
||||
description: "Target directory",
|
||||
optional: true,
|
||||
},
|
||||
verbose: {
|
||||
description: "Verbose output.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-xattrs": {
|
||||
description: "Ignore extended file attributes.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-fcaps": {
|
||||
description: "Ignore file capabilities.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-acls": {
|
||||
description: "Ignore access control list entries.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"allow-existing-dirs": {
|
||||
description: "Allows directories to already exist on restore.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"files-from": {
|
||||
description: "File containing match pattern for files to restore.",
|
||||
optional: true,
|
||||
},
|
||||
"no-device-nodes": {
|
||||
description: "Ignore device nodes.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-fifos": {
|
||||
description: "Ignore fifos.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-sockets": {
|
||||
description: "Ignore sockets.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Extract an archive.
|
||||
fn extract_archive(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let archive = tools::required_string_param(¶m, "archive")?;
|
||||
let target = param["target"].as_str().unwrap_or(".");
|
||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||
let no_xattrs = param["no-xattrs"].as_bool().unwrap_or(false);
|
||||
let no_fcaps = param["no-fcaps"].as_bool().unwrap_or(false);
|
||||
let no_acls = param["no-acls"].as_bool().unwrap_or(false);
|
||||
let no_device_nodes = param["no-device-nodes"].as_bool().unwrap_or(false);
|
||||
let no_fifos = param["no-fifos"].as_bool().unwrap_or(false);
|
||||
let no_sockets = param["no-sockets"].as_bool().unwrap_or(false);
|
||||
let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
|
||||
let files_from = param["files-from"].as_str();
|
||||
let empty = Vec::new();
|
||||
let arg_pattern = param["pattern"].as_array().unwrap_or(&empty);
|
||||
|
||||
let mut feature_flags = pxar::flags::DEFAULT;
|
||||
archive: String,
|
||||
pattern: Option<Vec<String>>,
|
||||
target: Option<String>,
|
||||
verbose: bool,
|
||||
no_xattrs: bool,
|
||||
no_fcaps: bool,
|
||||
no_acls: bool,
|
||||
allow_existing_dirs: bool,
|
||||
files_from: Option<String>,
|
||||
no_device_nodes: bool,
|
||||
no_fifos: bool,
|
||||
no_sockets: bool,
|
||||
) -> Result<(), Error> {
|
||||
let mut feature_flags = Flags::DEFAULT;
|
||||
if no_xattrs {
|
||||
feature_flags ^= pxar::flags::WITH_XATTRS;
|
||||
feature_flags ^= Flags::WITH_XATTRS;
|
||||
}
|
||||
if no_fcaps {
|
||||
feature_flags ^= pxar::flags::WITH_FCAPS;
|
||||
feature_flags ^= Flags::WITH_FCAPS;
|
||||
}
|
||||
if no_acls {
|
||||
feature_flags ^= pxar::flags::WITH_ACL;
|
||||
feature_flags ^= Flags::WITH_ACL;
|
||||
}
|
||||
if no_device_nodes {
|
||||
feature_flags ^= pxar::flags::WITH_DEVICE_NODES;
|
||||
feature_flags ^= Flags::WITH_DEVICE_NODES;
|
||||
}
|
||||
if no_fifos {
|
||||
feature_flags ^= pxar::flags::WITH_FIFOS;
|
||||
feature_flags ^= Flags::WITH_FIFOS;
|
||||
}
|
||||
if no_sockets {
|
||||
feature_flags ^= pxar::flags::WITH_SOCKETS;
|
||||
feature_flags ^= Flags::WITH_SOCKETS;
|
||||
}
|
||||
|
||||
let mut pattern_list = Vec::new();
|
||||
if let Some(filename) = files_from {
|
||||
let dir = nix::dir::Dir::open("./", nix::fcntl::OFlag::O_RDONLY, nix::sys::stat::Mode::empty())?;
|
||||
if let Some((mut pattern, _, _)) = pxar::MatchPattern::from_file(dir.as_raw_fd(), filename)? {
|
||||
pattern_list.append(&mut pattern);
|
||||
let pattern = pattern.unwrap_or_else(Vec::new);
|
||||
let target = target.as_ref().map_or_else(|| ".", String::as_str);
|
||||
|
||||
let mut match_list = Vec::new();
|
||||
if let Some(filename) = &files_from {
|
||||
for line in proxmox_backup::tools::file_get_non_comment_lines(filename)? {
|
||||
let line = line
|
||||
.map_err(|err| format_err!("error reading {}: {}", filename, err))?;
|
||||
match_list.push(
|
||||
MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, MatchType::Include)
|
||||
.map_err(|err| format_err!("bad pattern in file '{}': {}", filename, err))?,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
for s in arg_pattern {
|
||||
let l = s.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
|
||||
let p = pxar::MatchPattern::from_line(l.as_bytes())?
|
||||
.ok_or_else(|| format_err!("Invalid match pattern in arguments"))?;
|
||||
pattern_list.push(p);
|
||||
for entry in pattern {
|
||||
match_list.push(
|
||||
MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Include)
|
||||
.map_err(|err| format_err!("error in pattern: {}", err))?,
|
||||
);
|
||||
}
|
||||
|
||||
let pattern = if pattern_list.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(pattern_list)
|
||||
};
|
||||
|
||||
if archive == "-" {
|
||||
let stdin = std::io::stdin();
|
||||
let mut reader = stdin.lock();
|
||||
extract_archive_from_reader(&mut reader, target, feature_flags, allow_existing_dirs, verbose, pattern)?;
|
||||
extract_archive_from_reader(
|
||||
&mut reader,
|
||||
&target,
|
||||
feature_flags,
|
||||
allow_existing_dirs,
|
||||
verbose,
|
||||
&match_list,
|
||||
)?;
|
||||
} else {
|
||||
if verbose { println!("PXAR extract: {}", archive); }
|
||||
if verbose {
|
||||
println!("PXAR extract: {}", archive);
|
||||
}
|
||||
let file = std::fs::File::open(archive)?;
|
||||
let mut reader = std::io::BufReader::new(file);
|
||||
extract_archive_from_reader(&mut reader, target, feature_flags, allow_existing_dirs, verbose, pattern)?;
|
||||
extract_archive_from_reader(
|
||||
&mut reader,
|
||||
&target,
|
||||
feature_flags,
|
||||
allow_existing_dirs,
|
||||
verbose,
|
||||
&match_list,
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
archive: {
|
||||
description: "Archive name.",
|
||||
},
|
||||
source: {
|
||||
description: "Source directory.",
|
||||
},
|
||||
verbose: {
|
||||
description: "Verbose output.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-xattrs": {
|
||||
description: "Ignore extended file attributes.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-fcaps": {
|
||||
description: "Ignore file capabilities.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-acls": {
|
||||
description: "Ignore access control list entries.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"all-file-systems": {
|
||||
description: "Include mounted sudirs.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-device-nodes": {
|
||||
description: "Ignore device nodes.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-fifos": {
|
||||
description: "Ignore fifos.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-sockets": {
|
||||
description: "Ignore sockets.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
exclude: {
|
||||
description: "List of paths or pattern matching files to exclude.",
|
||||
optional: true,
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Path or pattern matching files to restore",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
"entries-max": {
|
||||
description: "Max number of entries loaded at once into memory",
|
||||
optional: true,
|
||||
default: ENCODER_MAX_ENTRIES as isize,
|
||||
minimum: 0,
|
||||
maximum: std::isize::MAX,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Create a new .pxar archive.
|
||||
fn create_archive(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
archive: String,
|
||||
source: String,
|
||||
verbose: bool,
|
||||
no_xattrs: bool,
|
||||
no_fcaps: bool,
|
||||
no_acls: bool,
|
||||
all_file_systems: bool,
|
||||
no_device_nodes: bool,
|
||||
no_fifos: bool,
|
||||
no_sockets: bool,
|
||||
exclude: Option<Vec<String>>,
|
||||
entries_max: isize,
|
||||
) -> Result<(), Error> {
|
||||
let pattern_list = {
|
||||
let input = exclude.unwrap_or_else(Vec::new);
|
||||
let mut pattern_list = Vec::with_capacity(input.len());
|
||||
for entry in input {
|
||||
pattern_list.push(
|
||||
MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude)
|
||||
.map_err(|err| format_err!("error in exclude pattern: {}", err))?,
|
||||
);
|
||||
}
|
||||
pattern_list
|
||||
};
|
||||
|
||||
let archive = tools::required_string_param(¶m, "archive")?;
|
||||
let source = tools::required_string_param(¶m, "source")?;
|
||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||
let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
|
||||
let no_xattrs = param["no-xattrs"].as_bool().unwrap_or(false);
|
||||
let no_fcaps = param["no-fcaps"].as_bool().unwrap_or(false);
|
||||
let no_acls = param["no-acls"].as_bool().unwrap_or(false);
|
||||
let no_device_nodes = param["no-device-nodes"].as_bool().unwrap_or(false);
|
||||
let no_fifos = param["no-fifos"].as_bool().unwrap_or(false);
|
||||
let no_sockets = param["no-sockets"].as_bool().unwrap_or(false);
|
||||
let empty = Vec::new();
|
||||
let exclude_pattern = param["exclude"].as_array().unwrap_or(&empty);
|
||||
let entries_max = param["entries-max"].as_u64().unwrap_or(pxar::ENCODER_MAX_ENTRIES as u64);
|
||||
|
||||
let devices = if all_file_systems { None } else { Some(HashSet::new()) };
|
||||
let device_set = if all_file_systems {
|
||||
None
|
||||
} else {
|
||||
Some(HashSet::new())
|
||||
};
|
||||
|
||||
let source = PathBuf::from(source);
|
||||
|
||||
let mut dir = nix::dir::Dir::open(
|
||||
&source, nix::fcntl::OFlag::O_NOFOLLOW, nix::sys::stat::Mode::empty())?;
|
||||
let dir = nix::dir::Dir::open(
|
||||
&source,
|
||||
nix::fcntl::OFlag::O_NOFOLLOW,
|
||||
nix::sys::stat::Mode::empty(),
|
||||
)?;
|
||||
|
||||
let file = OpenOptions::new()
|
||||
.create_new(true)
|
||||
@ -193,332 +307,150 @@ fn create_archive(
|
||||
.mode(0o640)
|
||||
.open(archive)?;
|
||||
|
||||
let mut writer = std::io::BufWriter::with_capacity(1024*1024, file);
|
||||
let mut feature_flags = pxar::flags::DEFAULT;
|
||||
let writer = std::io::BufWriter::with_capacity(1024 * 1024, file);
|
||||
let mut feature_flags = Flags::DEFAULT;
|
||||
if no_xattrs {
|
||||
feature_flags ^= pxar::flags::WITH_XATTRS;
|
||||
feature_flags ^= Flags::WITH_XATTRS;
|
||||
}
|
||||
if no_fcaps {
|
||||
feature_flags ^= pxar::flags::WITH_FCAPS;
|
||||
feature_flags ^= Flags::WITH_FCAPS;
|
||||
}
|
||||
if no_acls {
|
||||
feature_flags ^= pxar::flags::WITH_ACL;
|
||||
feature_flags ^= Flags::WITH_ACL;
|
||||
}
|
||||
if no_device_nodes {
|
||||
feature_flags ^= pxar::flags::WITH_DEVICE_NODES;
|
||||
feature_flags ^= Flags::WITH_DEVICE_NODES;
|
||||
}
|
||||
if no_fifos {
|
||||
feature_flags ^= pxar::flags::WITH_FIFOS;
|
||||
feature_flags ^= Flags::WITH_FIFOS;
|
||||
}
|
||||
if no_sockets {
|
||||
feature_flags ^= pxar::flags::WITH_SOCKETS;
|
||||
feature_flags ^= Flags::WITH_SOCKETS;
|
||||
}
|
||||
|
||||
let mut pattern_list = Vec::new();
|
||||
for s in exclude_pattern {
|
||||
let l = s.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
|
||||
let p = pxar::MatchPattern::from_line(l.as_bytes())?
|
||||
.ok_or_else(|| format_err!("Invalid match pattern in arguments"))?;
|
||||
pattern_list.push(p);
|
||||
}
|
||||
|
||||
let catalog = None::<&mut pxar::catalog::DummyCatalogWriter>;
|
||||
pxar::Encoder::encode(
|
||||
source,
|
||||
&mut dir,
|
||||
&mut writer,
|
||||
catalog,
|
||||
devices,
|
||||
verbose,
|
||||
false,
|
||||
feature_flags,
|
||||
let writer = pxar::encoder::sync::StandardWriter::new(writer);
|
||||
proxmox_backup::pxar::create_archive(
|
||||
dir,
|
||||
writer,
|
||||
pattern_list,
|
||||
feature_flags,
|
||||
device_set,
|
||||
false,
|
||||
|path| {
|
||||
if verbose {
|
||||
println!("{:?}", path);
|
||||
}
|
||||
Ok(())
|
||||
},
|
||||
entries_max as usize,
|
||||
None,
|
||||
)?;
|
||||
|
||||
writer.flush()?;
|
||||
|
||||
Ok(Value::Null)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
archive: { description: "Archive name." },
|
||||
mountpoint: { description: "Mountpoint for the file system." },
|
||||
verbose: {
|
||||
description: "Verbose output, running in the foreground (for debugging).",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Mount the archive to the provided mountpoint via FUSE.
|
||||
fn mount_archive(
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let archive = tools::required_string_param(¶m, "archive")?;
|
||||
let mountpoint = tools::required_string_param(¶m, "mountpoint")?;
|
||||
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||
let no_mt = param["no-mt"].as_bool().unwrap_or(false);
|
||||
|
||||
let archive = Path::new(archive);
|
||||
let mountpoint = Path::new(mountpoint);
|
||||
async fn mount_archive(
|
||||
archive: String,
|
||||
mountpoint: String,
|
||||
verbose: bool,
|
||||
) -> Result<(), Error> {
|
||||
let archive = Path::new(&archive);
|
||||
let mountpoint = Path::new(&mountpoint);
|
||||
let options = OsStr::new("ro,default_permissions");
|
||||
let mut session = pxar::fuse::Session::from_path(&archive, &options, verbose)
|
||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||
// Mount the session and deamonize if verbose is not set
|
||||
session.mount(&mountpoint, !verbose)?;
|
||||
session.run_loop(!no_mt)?;
|
||||
|
||||
Ok(Value::Null)
|
||||
let session = fuse::Session::mount_path(&archive, &options, verbose, mountpoint)
|
||||
.await
|
||||
.map_err(|err| format_err!("pxar mount failed: {}", err))?;
|
||||
|
||||
let mut interrupt = signal(SignalKind::interrupt())?;
|
||||
|
||||
select! {
|
||||
res = session.fuse() => res?,
|
||||
_ = interrupt.recv().fuse() => {
|
||||
if verbose {
|
||||
eprintln!("interrupted");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_CREATE_ARCHIVE: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&create_archive),
|
||||
&ObjectSchema::new(
|
||||
"Create new .pxar archive.",
|
||||
&sorted!([
|
||||
(
|
||||
"archive",
|
||||
false,
|
||||
&StringSchema::new("Archive name").schema()
|
||||
),
|
||||
(
|
||||
"source",
|
||||
false,
|
||||
&StringSchema::new("Source directory.").schema()
|
||||
),
|
||||
(
|
||||
"verbose",
|
||||
true,
|
||||
&BooleanSchema::new("Verbose output.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-xattrs",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore extended file attributes.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-fcaps",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore file capabilities.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-acls",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore access control list entries.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"all-file-systems",
|
||||
true,
|
||||
&BooleanSchema::new("Include mounted sudirs.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-device-nodes",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore device nodes.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-fifos",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore fifos.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-sockets",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore sockets.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"exclude",
|
||||
true,
|
||||
&ArraySchema::new(
|
||||
"List of paths or pattern matching files to exclude.",
|
||||
&StringSchema::new("Path or pattern matching files to restore.").schema()
|
||||
).schema()
|
||||
),
|
||||
(
|
||||
"entries-max",
|
||||
true,
|
||||
&IntegerSchema::new("Max number of entries loaded at once into memory")
|
||||
.default(pxar::ENCODER_MAX_ENTRIES as isize)
|
||||
.minimum(0)
|
||||
.maximum(std::isize::MAX)
|
||||
.schema()
|
||||
),
|
||||
]),
|
||||
)
|
||||
);
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
archive: {
|
||||
description: "Archive name.",
|
||||
},
|
||||
verbose: {
|
||||
description: "Verbose output.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// List the contents of an archive.
|
||||
fn dump_archive(archive: String, verbose: bool) -> Result<(), Error> {
|
||||
for entry in pxar::decoder::Decoder::open(archive)? {
|
||||
let entry = entry?;
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_EXTRACT_ARCHIVE: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&extract_archive),
|
||||
&ObjectSchema::new(
|
||||
"Extract an archive.",
|
||||
&sorted!([
|
||||
(
|
||||
"archive",
|
||||
false,
|
||||
&StringSchema::new("Archive name.").schema()
|
||||
),
|
||||
(
|
||||
"pattern",
|
||||
true,
|
||||
&ArraySchema::new(
|
||||
"List of paths or pattern matching files to restore",
|
||||
&StringSchema::new("Path or pattern matching files to restore.").schema()
|
||||
).schema()
|
||||
),
|
||||
(
|
||||
"target",
|
||||
true,
|
||||
&StringSchema::new("Target directory.").schema()
|
||||
),
|
||||
(
|
||||
"verbose",
|
||||
true,
|
||||
&BooleanSchema::new("Verbose output.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-xattrs",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore extended file attributes.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-fcaps",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore file capabilities.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-acls",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore access control list entries.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"allow-existing-dirs",
|
||||
true,
|
||||
&BooleanSchema::new("Allows directories to already exist on restore.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"files-from",
|
||||
true,
|
||||
&StringSchema::new("Match pattern for files to restore.").schema()
|
||||
),
|
||||
(
|
||||
"no-device-nodes",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore device nodes.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-fifos",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore fifos.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-sockets",
|
||||
true,
|
||||
&BooleanSchema::new("Ignore sockets.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_MOUNT_ARCHIVE: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&mount_archive),
|
||||
&ObjectSchema::new(
|
||||
"Mount the archive as filesystem via FUSE.",
|
||||
&sorted!([
|
||||
(
|
||||
"archive",
|
||||
false,
|
||||
&StringSchema::new("Archive name.").schema()
|
||||
),
|
||||
(
|
||||
"mountpoint",
|
||||
false,
|
||||
&StringSchema::new("Mountpoint for the filesystem root.").schema()
|
||||
),
|
||||
(
|
||||
"verbose",
|
||||
true,
|
||||
&BooleanSchema::new("Verbose output, keeps process running in foreground (for debugging).")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
(
|
||||
"no-mt",
|
||||
true,
|
||||
&BooleanSchema::new("Run in single threaded mode (for debugging).")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
]),
|
||||
)
|
||||
);
|
||||
|
||||
#[sortable]
|
||||
const API_METHOD_DUMP_ARCHIVE: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::Sync(&dump_archive),
|
||||
&ObjectSchema::new(
|
||||
"List the contents of an archive.",
|
||||
&sorted!([
|
||||
( "archive", false, &StringSchema::new("Archive name.").schema()),
|
||||
( "verbose", true, &BooleanSchema::new("Verbose output.")
|
||||
.default(false)
|
||||
.schema()
|
||||
),
|
||||
])
|
||||
)
|
||||
);
|
||||
if verbose {
|
||||
println!("{}", format_single_line_entry(&entry));
|
||||
} else {
|
||||
println!("{:?}", entry.path());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("create", CliCommand::new(&API_METHOD_CREATE_ARCHIVE)
|
||||
.arg_param(&["archive", "source"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("source", tools::complete_file_name)
|
||||
.insert(
|
||||
"create",
|
||||
CliCommand::new(&API_METHOD_CREATE_ARCHIVE)
|
||||
.arg_param(&["archive", "source"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("source", tools::complete_file_name),
|
||||
)
|
||||
.insert("extract", CliCommand::new(&API_METHOD_EXTRACT_ARCHIVE)
|
||||
.arg_param(&["archive", "target"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("target", tools::complete_file_name)
|
||||
.completion_cb("files-from", tools::complete_file_name)
|
||||
)
|
||||
.insert("mount", CliCommand::new(&API_METHOD_MOUNT_ARCHIVE)
|
||||
.arg_param(&["archive", "mountpoint"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("mountpoint", tools::complete_file_name)
|
||||
.insert(
|
||||
"extract",
|
||||
CliCommand::new(&API_METHOD_EXTRACT_ARCHIVE)
|
||||
.arg_param(&["archive", "target"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("target", tools::complete_file_name)
|
||||
.completion_cb("files-from", tools::complete_file_name),
|
||||
)
|
||||
.insert("list", CliCommand::new(&API_METHOD_DUMP_ARCHIVE)
|
||||
.arg_param(&["archive"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.insert(
|
||||
"mount",
|
||||
CliCommand::new(&API_METHOD_MOUNT_ARCHIVE)
|
||||
.arg_param(&["archive", "mountpoint"])
|
||||
.completion_cb("archive", tools::complete_file_name)
|
||||
.completion_cb("mountpoint", tools::complete_file_name),
|
||||
)
|
||||
.insert(
|
||||
"list",
|
||||
CliCommand::new(&API_METHOD_DUMP_ARCHIVE)
|
||||
.arg_param(&["archive"])
|
||||
.completion_cb("archive", tools::complete_file_name),
|
||||
);
|
||||
|
||||
let rpcenv = CliEnvironment::new();
|
||||
run_cli_command(cmd_def, rpcenv, None);
|
||||
run_cli_command(cmd_def, rpcenv, Some(|future| {
|
||||
proxmox_backup::tools::runtime::main(future)
|
||||
}));
|
||||
}
|
||||
|
@ -3,11 +3,11 @@
|
||||
//! This library implements the client side to access the backups
|
||||
//! server using https.
|
||||
|
||||
pub mod pipe_to_stream;
|
||||
mod merge_known_chunks;
|
||||
pub mod pipe_to_stream;
|
||||
|
||||
mod http_client;
|
||||
pub use http_client::*;
|
||||
pub use http_client::*;
|
||||
|
||||
mod task_log;
|
||||
pub use task_log::*;
|
||||
@ -24,9 +24,6 @@ pub use remote_chunk_reader::*;
|
||||
mod pxar_backup_stream;
|
||||
pub use pxar_backup_stream::*;
|
||||
|
||||
mod pxar_decode_writer;
|
||||
pub use pxar_decode_writer::*;
|
||||
|
||||
mod backup_repo;
|
||||
pub use backup_repo::*;
|
||||
|
||||
|
@ -91,7 +91,7 @@ impl BackupReader {
|
||||
&self,
|
||||
file_name: &str,
|
||||
output: W,
|
||||
) -> Result<W, Error> {
|
||||
) -> Result<(), Error> {
|
||||
let path = "download";
|
||||
let param = json!({ "file-name": file_name });
|
||||
self.h2.download(path, Some(param), output).await
|
||||
@ -103,7 +103,7 @@ impl BackupReader {
|
||||
pub async fn speedtest<W: Write + Send>(
|
||||
&self,
|
||||
output: W,
|
||||
) -> Result<W, Error> {
|
||||
) -> Result<(), Error> {
|
||||
self.h2.download("speedtest", None, output).await
|
||||
}
|
||||
|
||||
@ -112,7 +112,7 @@ impl BackupReader {
|
||||
&self,
|
||||
digest: &[u8; 32],
|
||||
output: W,
|
||||
) -> Result<W, Error> {
|
||||
) -> Result<(), Error> {
|
||||
let path = "chunk";
|
||||
let param = json!({ "digest": digest_to_hex(digest) });
|
||||
self.h2.download(path, Some(param), output).await
|
||||
@ -123,17 +123,19 @@ impl BackupReader {
|
||||
}
|
||||
|
||||
/// Download backup manifest (index.json)
|
||||
pub async fn download_manifest(&self) -> Result<BackupManifest, Error> {
|
||||
///
|
||||
/// The manifest signature is verified if we have a crypt_config.
|
||||
pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec<u8>), Error> {
|
||||
|
||||
use std::convert::TryFrom;
|
||||
|
||||
let raw_data = self.download(MANIFEST_BLOB_NAME, Vec::with_capacity(64*1024)).await?;
|
||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
blob.verify_crc()?;
|
||||
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
let json: Value = serde_json::from_slice(&data[..])?;
|
||||
let data = blob.decode(None)?;
|
||||
|
||||
BackupManifest::try_from(json)
|
||||
let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
Ok((manifest, data))
|
||||
}
|
||||
|
||||
/// Download a .blob file
|
||||
@ -146,13 +148,13 @@ impl BackupReader {
|
||||
name: &str,
|
||||
) -> Result<DataBlobReader<File>, Error> {
|
||||
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let mut tmpfile = self.download(name, tmpfile).await?;
|
||||
self.download(name, &mut tmpfile).await?;
|
||||
|
||||
let (csum, size) = compute_file_csum(&mut tmpfile)?;
|
||||
manifest.verify_file(name, &csum, size)?;
|
||||
@ -172,13 +174,13 @@ impl BackupReader {
|
||||
name: &str,
|
||||
) -> Result<DynamicIndexReader, Error> {
|
||||
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let tmpfile = self.download(name, tmpfile).await?;
|
||||
self.download(name, &mut tmpfile).await?;
|
||||
|
||||
let index = DynamicIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?;
|
||||
@ -200,13 +202,13 @@ impl BackupReader {
|
||||
name: &str,
|
||||
) -> Result<FixedIndexReader, Error> {
|
||||
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let tmpfile = self.download(name, tmpfile).await?;
|
||||
self.download(name, &mut tmpfile).await?;
|
||||
|
||||
let index = FixedIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?;
|
||||
|
@ -1,8 +1,9 @@
|
||||
use std::collections::HashSet;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::{DateTime, Utc};
|
||||
use futures::*;
|
||||
use futures::stream::Stream;
|
||||
@ -22,6 +23,7 @@ pub struct BackupWriter {
|
||||
h2: H2Client,
|
||||
abort: AbortHandle,
|
||||
verbose: bool,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
}
|
||||
|
||||
impl Drop for BackupWriter {
|
||||
@ -38,12 +40,13 @@ pub struct BackupStats {
|
||||
|
||||
impl BackupWriter {
|
||||
|
||||
fn new(h2: H2Client, abort: AbortHandle, verbose: bool) -> Arc<Self> {
|
||||
Arc::new(Self { h2, abort, verbose })
|
||||
fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>, verbose: bool) -> Arc<Self> {
|
||||
Arc::new(Self { h2, abort, crypt_config, verbose })
|
||||
}
|
||||
|
||||
pub async fn start(
|
||||
client: HttpClient,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
datastore: &str,
|
||||
backup_type: &str,
|
||||
backup_id: &str,
|
||||
@ -64,7 +67,7 @@ impl BackupWriter {
|
||||
|
||||
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!())).await?;
|
||||
|
||||
Ok(BackupWriter::new(h2, abort, debug))
|
||||
Ok(BackupWriter::new(h2, abort, crypt_config, debug))
|
||||
}
|
||||
|
||||
pub async fn get(
|
||||
@ -159,19 +162,13 @@ impl BackupWriter {
|
||||
&self,
|
||||
data: Vec<u8>,
|
||||
file_name: &str,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
compress: bool,
|
||||
sign_only: bool,
|
||||
) -> Result<BackupStats, Error> {
|
||||
|
||||
let blob = if let Some(ref crypt_config) = crypt_config {
|
||||
if sign_only {
|
||||
DataBlob::create_signed(&data, crypt_config, compress)?
|
||||
} else {
|
||||
DataBlob::encode(&data, Some(crypt_config), compress)?
|
||||
}
|
||||
} else {
|
||||
DataBlob::encode(&data, None, compress)?
|
||||
encrypt: bool,
|
||||
) -> Result<BackupStats, Error> {
|
||||
let blob = match (encrypt, &self.crypt_config) {
|
||||
(false, _) => DataBlob::encode(&data, None, compress)?,
|
||||
(true, None) => bail!("requested encryption without a crypt config"),
|
||||
(true, Some(crypt_config)) => DataBlob::encode(&data, Some(crypt_config), compress)?,
|
||||
};
|
||||
|
||||
let raw_data = blob.into_inner();
|
||||
@ -187,9 +184,9 @@ impl BackupWriter {
|
||||
&self,
|
||||
src_path: P,
|
||||
file_name: &str,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
compress: bool,
|
||||
) -> Result<BackupStats, Error> {
|
||||
encrypt: bool,
|
||||
) -> Result<BackupStats, Error> {
|
||||
|
||||
let src_path = src_path.as_ref();
|
||||
|
||||
@ -203,25 +200,18 @@ impl BackupWriter {
|
||||
.await
|
||||
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
|
||||
|
||||
let blob = DataBlob::encode(&contents, crypt_config.as_ref().map(AsRef::as_ref), compress)?;
|
||||
let raw_data = blob.into_inner();
|
||||
let size = raw_data.len() as u64;
|
||||
let csum = openssl::sha::sha256(&raw_data);
|
||||
let param = json!({
|
||||
"encoded-size": size,
|
||||
"file-name": file_name,
|
||||
});
|
||||
self.h2.upload("POST", "blob", Some(param), "application/octet-stream", raw_data).await?;
|
||||
Ok(BackupStats { size, csum })
|
||||
self.upload_blob_from_data(contents, file_name, compress, encrypt).await
|
||||
}
|
||||
|
||||
pub async fn upload_stream(
|
||||
&self,
|
||||
previous_manifest: Option<Arc<BackupManifest>>,
|
||||
archive_name: &str,
|
||||
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
||||
prefix: &str,
|
||||
fixed_size: Option<u64>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
compress: bool,
|
||||
encrypt: bool,
|
||||
) -> Result<BackupStats, Error> {
|
||||
let known_chunks = Arc::new(Mutex::new(HashSet::new()));
|
||||
|
||||
@ -230,10 +220,25 @@ impl BackupWriter {
|
||||
param["size"] = size.into();
|
||||
}
|
||||
|
||||
if encrypt && self.crypt_config.is_none() {
|
||||
bail!("requested encryption without a crypt config");
|
||||
}
|
||||
|
||||
let index_path = format!("{}_index", prefix);
|
||||
let close_path = format!("{}_close", prefix);
|
||||
|
||||
self.download_chunk_list(&index_path, archive_name, known_chunks.clone()).await?;
|
||||
if let Some(manifest) = previous_manifest {
|
||||
// try, but ignore errors
|
||||
match archive_type(archive_name) {
|
||||
Ok(ArchiveType::FixedIndex) => {
|
||||
let _ = self.download_previous_fixed_index(archive_name, &manifest, known_chunks.clone()).await;
|
||||
}
|
||||
Ok(ArchiveType::DynamicIndex) => {
|
||||
let _ = self.download_previous_dynamic_index(archive_name, &manifest, known_chunks.clone()).await;
|
||||
}
|
||||
_ => { /* do nothing */ }
|
||||
}
|
||||
}
|
||||
|
||||
let wid = self.h2.post(&index_path, Some(param)).await?.as_u64().unwrap();
|
||||
|
||||
@ -244,7 +249,8 @@ impl BackupWriter {
|
||||
stream,
|
||||
&prefix,
|
||||
known_chunks.clone(),
|
||||
crypt_config,
|
||||
if encrypt { self.crypt_config.clone() } else { None },
|
||||
compress,
|
||||
self.verbose,
|
||||
)
|
||||
.await?;
|
||||
@ -374,41 +380,91 @@ impl BackupWriter {
|
||||
(verify_queue_tx, verify_result_rx)
|
||||
}
|
||||
|
||||
pub async fn download_chunk_list(
|
||||
pub async fn download_previous_fixed_index(
|
||||
&self,
|
||||
path: &str,
|
||||
archive_name: &str,
|
||||
manifest: &BackupManifest,
|
||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
) -> Result<FixedIndexReader, Error> {
|
||||
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let param = json!({ "archive-name": archive_name });
|
||||
let request = H2Client::request_builder("localhost", "GET", path, Some(param), None).unwrap();
|
||||
self.h2.download("previous", Some(param), &mut tmpfile).await?;
|
||||
|
||||
let h2request = self.h2.send_request(request, None).await?;
|
||||
let resp = h2request.await?;
|
||||
let index = FixedIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", archive_name, err))?;
|
||||
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(archive_name, &csum, size)?;
|
||||
|
||||
let status = resp.status();
|
||||
|
||||
if !status.is_success() {
|
||||
H2Client::h2api_response(resp).await?; // raise error
|
||||
unreachable!();
|
||||
}
|
||||
|
||||
let mut body = resp.into_body();
|
||||
let mut flow_control = body.flow_control().clone();
|
||||
|
||||
let mut stream = DigestListDecoder::new(body.map_err(Error::from));
|
||||
|
||||
while let Some(chunk) = stream.try_next().await? {
|
||||
let _ = flow_control.release_capacity(chunk.len());
|
||||
known_chunks.lock().unwrap().insert(chunk);
|
||||
// add index chunks to known chunks
|
||||
let mut known_chunks = known_chunks.lock().unwrap();
|
||||
for i in 0..index.index_count() {
|
||||
known_chunks.insert(*index.index_digest(i).unwrap());
|
||||
}
|
||||
|
||||
if self.verbose {
|
||||
println!("{}: known chunks list length is {}", archive_name, known_chunks.lock().unwrap().len());
|
||||
println!("{}: known chunks list length is {}", archive_name, index.index_count());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
pub async fn download_previous_dynamic_index(
|
||||
&self,
|
||||
archive_name: &str,
|
||||
manifest: &BackupManifest,
|
||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
) -> Result<DynamicIndexReader, Error> {
|
||||
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
|
||||
let param = json!({ "archive-name": archive_name });
|
||||
self.h2.download("previous", Some(param), &mut tmpfile).await?;
|
||||
|
||||
let index = DynamicIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read dynmamic index '{}' - {}", archive_name, err))?;
|
||||
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(archive_name, &csum, size)?;
|
||||
|
||||
// add index chunks to known chunks
|
||||
let mut known_chunks = known_chunks.lock().unwrap();
|
||||
for i in 0..index.index_count() {
|
||||
known_chunks.insert(*index.index_digest(i).unwrap());
|
||||
}
|
||||
|
||||
if self.verbose {
|
||||
println!("{}: known chunks list length is {}", archive_name, index.index_count());
|
||||
}
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
/// Download backup manifest (index.json) of last backup
|
||||
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
|
||||
|
||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||
|
||||
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
|
||||
self.h2.download("previous", Some(param), &mut raw_data).await?;
|
||||
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
blob.verify_crc()?;
|
||||
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
Ok(manifest)
|
||||
}
|
||||
|
||||
fn upload_chunk_info_stream(
|
||||
@ -418,6 +474,7 @@ impl BackupWriter {
|
||||
prefix: &str,
|
||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
compress: bool,
|
||||
verbose: bool,
|
||||
) -> impl Future<Output = Result<(usize, usize, std::time::Duration, usize, [u8; 32]), Error>> {
|
||||
|
||||
@ -448,7 +505,7 @@ impl BackupWriter {
|
||||
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
|
||||
|
||||
let mut chunk_builder = DataChunkBuilder::new(data.as_ref())
|
||||
.compress(true);
|
||||
.compress(compress);
|
||||
|
||||
if let Some(ref crypt_config) = crypt_config {
|
||||
chunk_builder = chunk_builder.crypt_config(crypt_config);
|
||||
|
@ -466,7 +466,7 @@ impl HttpClient {
|
||||
&mut self,
|
||||
path: &str,
|
||||
output: &mut (dyn Write + Send),
|
||||
) -> Result<(), Error> {
|
||||
) -> Result<(), Error> {
|
||||
let mut req = Self::request_builder(&self.server, "GET", path, None).unwrap();
|
||||
|
||||
let client = self.client.clone();
|
||||
@ -707,7 +707,7 @@ impl H2Client {
|
||||
path: &str,
|
||||
param: Option<Value>,
|
||||
mut output: W,
|
||||
) -> Result<W, Error> {
|
||||
) -> Result<(), Error> {
|
||||
let request = Self::request_builder("localhost", "GET", path, param, None).unwrap();
|
||||
|
||||
let response_future = self.send_request(request, None).await?;
|
||||
@ -727,7 +727,7 @@ impl H2Client {
|
||||
output.write_all(&chunk)?;
|
||||
}
|
||||
|
||||
Ok(output)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn upload(
|
||||
|
@ -34,7 +34,7 @@ async fn pull_index_chunks<I: IndexFile>(
|
||||
continue;
|
||||
}
|
||||
//worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||
let chunk = chunk_reader.read_raw_chunk(&digest)?;
|
||||
let chunk = chunk_reader.read_raw_chunk(&digest).await?;
|
||||
|
||||
target.insert_chunk(&chunk, &digest)?;
|
||||
}
|
||||
@ -47,13 +47,13 @@ async fn download_manifest(
|
||||
filename: &std::path::Path,
|
||||
) -> Result<std::fs::File, Error> {
|
||||
|
||||
let tmp_manifest_file = std::fs::OpenOptions::new()
|
||||
let mut tmp_manifest_file = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.read(true)
|
||||
.open(&filename)?;
|
||||
|
||||
let mut tmp_manifest_file = reader.download(MANIFEST_BLOB_NAME, tmp_manifest_file).await?;
|
||||
reader.download(MANIFEST_BLOB_NAME, &mut tmp_manifest_file).await?;
|
||||
|
||||
tmp_manifest_file.seek(SeekFrom::Start(0))?;
|
||||
|
||||
@ -77,13 +77,13 @@ async fn pull_single_archive(
|
||||
tmp_path.set_extension("tmp");
|
||||
|
||||
worker.log(format!("sync archive {}", archive_name));
|
||||
let tmpfile = std::fs::OpenOptions::new()
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.read(true)
|
||||
.open(&tmp_path)?;
|
||||
|
||||
let tmpfile = reader.download(archive_name, tmpfile).await?;
|
||||
reader.download(archive_name, &mut tmpfile).await?;
|
||||
|
||||
match archive_type(archive_name)? {
|
||||
ArchiveType::DynamicIndex => {
|
||||
@ -124,7 +124,7 @@ async fn try_client_log_download(
|
||||
.open(&tmp_path)?;
|
||||
|
||||
// Note: be silent if there is no log - only log successful download
|
||||
if let Ok(_) = reader.download(CLIENT_LOG_BLOB_NAME, tmpfile).await {
|
||||
if let Ok(()) = reader.download(CLIENT_LOG_BLOB_NAME, tmpfile).await {
|
||||
if let Err(err) = std::fs::rename(&tmp_path, &path) {
|
||||
bail!("Atomic rename file {:?} failed - {}", path, err);
|
||||
}
|
||||
|
@ -9,12 +9,12 @@ use std::thread;
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::stream::Stream;
|
||||
|
||||
use nix::dir::Dir;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
use nix::dir::Dir;
|
||||
|
||||
use crate::pxar;
|
||||
use pathpatterns::MatchEntry;
|
||||
|
||||
use crate::backup::CatalogWriter;
|
||||
|
||||
/// Stream implementation to encode and upload .pxar archives.
|
||||
@ -29,7 +29,6 @@ pub struct PxarBackupStream {
|
||||
}
|
||||
|
||||
impl Drop for PxarBackupStream {
|
||||
|
||||
fn drop(&mut self) {
|
||||
self.rx = None;
|
||||
self.child.take().unwrap().join().unwrap();
|
||||
@ -37,46 +36,49 @@ impl Drop for PxarBackupStream {
|
||||
}
|
||||
|
||||
impl PxarBackupStream {
|
||||
|
||||
pub fn new<W: Write + Send + 'static>(
|
||||
mut dir: Dir,
|
||||
path: PathBuf,
|
||||
dir: Dir,
|
||||
_path: PathBuf,
|
||||
device_set: Option<HashSet<u64>>,
|
||||
verbose: bool,
|
||||
_verbose: bool,
|
||||
skip_lost_and_found: bool,
|
||||
catalog: Arc<Mutex<CatalogWriter<W>>>,
|
||||
exclude_pattern: Vec<pxar::MatchPattern>,
|
||||
patterns: Vec<MatchEntry>,
|
||||
entries_max: usize,
|
||||
) -> Result<Self, Error> {
|
||||
|
||||
let (tx, rx) = std::sync::mpsc::sync_channel(10);
|
||||
|
||||
let buffer_size = 256*1024;
|
||||
let buffer_size = 256 * 1024;
|
||||
|
||||
let error = Arc::new(Mutex::new(None));
|
||||
let error2 = error.clone();
|
||||
let child = std::thread::Builder::new()
|
||||
.name("PxarBackupStream".to_string())
|
||||
.spawn({
|
||||
let error = Arc::clone(&error);
|
||||
move || {
|
||||
let mut catalog_guard = catalog.lock().unwrap();
|
||||
let writer = std::io::BufWriter::with_capacity(
|
||||
buffer_size,
|
||||
crate::tools::StdChannelWriter::new(tx),
|
||||
);
|
||||
|
||||
let catalog = catalog.clone();
|
||||
let child = std::thread::Builder::new().name("PxarBackupStream".to_string()).spawn(move || {
|
||||
let mut guard = catalog.lock().unwrap();
|
||||
let mut writer = std::io::BufWriter::with_capacity(buffer_size, crate::tools::StdChannelWriter::new(tx));
|
||||
|
||||
if let Err(err) = pxar::Encoder::encode(
|
||||
path,
|
||||
&mut dir,
|
||||
&mut writer,
|
||||
Some(&mut *guard),
|
||||
device_set,
|
||||
verbose,
|
||||
skip_lost_and_found,
|
||||
pxar::flags::DEFAULT,
|
||||
exclude_pattern,
|
||||
entries_max,
|
||||
) {
|
||||
let mut error = error2.lock().unwrap();
|
||||
*error = Some(err.to_string());
|
||||
}
|
||||
})?;
|
||||
let writer = pxar::encoder::sync::StandardWriter::new(writer);
|
||||
if let Err(err) = crate::pxar::create_archive(
|
||||
dir,
|
||||
writer,
|
||||
patterns,
|
||||
crate::pxar::Flags::DEFAULT,
|
||||
device_set,
|
||||
skip_lost_and_found,
|
||||
|_| Ok(()),
|
||||
entries_max,
|
||||
Some(&mut *catalog_guard),
|
||||
) {
|
||||
let mut error = error.lock().unwrap();
|
||||
*error = Some(err.to_string());
|
||||
}
|
||||
}
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
rx: Some(rx),
|
||||
@ -91,23 +93,31 @@ impl PxarBackupStream {
|
||||
verbose: bool,
|
||||
skip_lost_and_found: bool,
|
||||
catalog: Arc<Mutex<CatalogWriter<W>>>,
|
||||
exclude_pattern: Vec<pxar::MatchPattern>,
|
||||
patterns: Vec<MatchEntry>,
|
||||
entries_max: usize,
|
||||
) -> Result<Self, Error> {
|
||||
|
||||
let dir = nix::dir::Dir::open(dirname, OFlag::O_DIRECTORY, Mode::empty())?;
|
||||
let path = std::path::PathBuf::from(dirname);
|
||||
|
||||
Self::new(dir, path, device_set, verbose, skip_lost_and_found, catalog, exclude_pattern, entries_max)
|
||||
Self::new(
|
||||
dir,
|
||||
path,
|
||||
device_set,
|
||||
verbose,
|
||||
skip_lost_and_found,
|
||||
catalog,
|
||||
patterns,
|
||||
entries_max,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for PxarBackupStream {
|
||||
|
||||
type Item = Result<Vec<u8>, Error>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
{ // limit lock scope
|
||||
{
|
||||
// limit lock scope
|
||||
let error = self.error.lock().unwrap();
|
||||
if let Some(ref msg) = *error {
|
||||
return Poll::Ready(Some(Err(format_err!("{}", msg))));
|
||||
|
@ -1,70 +0,0 @@
|
||||
use anyhow::{Error};
|
||||
|
||||
use std::thread;
|
||||
use std::os::unix::io::FromRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::io::Write;
|
||||
|
||||
use crate::pxar;
|
||||
|
||||
/// Writer implementation to deccode a .pxar archive (download).
|
||||
|
||||
pub struct PxarDecodeWriter {
|
||||
pipe: Option<std::fs::File>,
|
||||
child: Option<thread::JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Drop for PxarDecodeWriter {
|
||||
|
||||
fn drop(&mut self) {
|
||||
drop(self.pipe.take());
|
||||
self.child.take().unwrap().join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
impl PxarDecodeWriter {
|
||||
|
||||
pub fn new(base: &Path, verbose: bool) -> Result<Self, Error> {
|
||||
let (rx, tx) = nix::unistd::pipe()?;
|
||||
|
||||
let base = PathBuf::from(base);
|
||||
|
||||
let child = thread::spawn(move|| {
|
||||
let mut reader = unsafe { std::fs::File::from_raw_fd(rx) };
|
||||
let mut decoder = pxar::SequentialDecoder::new(&mut reader, pxar::flags::DEFAULT);
|
||||
decoder.set_callback(move |path| {
|
||||
if verbose {
|
||||
println!("{:?}", path);
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
|
||||
if let Err(err) = decoder.restore(&base, &Vec::new()) {
|
||||
eprintln!("pxar decode failed - {}", err);
|
||||
}
|
||||
});
|
||||
|
||||
let pipe = unsafe { std::fs::File::from_raw_fd(tx) };
|
||||
|
||||
Ok(Self { pipe: Some(pipe), child: Some(child) })
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for PxarDecodeWriter {
|
||||
|
||||
fn write(&mut self, buffer: &[u8]) -> Result<usize, std::io::Error> {
|
||||
let pipe = match self.pipe {
|
||||
Some(ref mut pipe) => pipe,
|
||||
None => unreachable!(),
|
||||
};
|
||||
pipe.write(buffer)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> Result<(), std::io::Error> {
|
||||
let pipe = match self.pipe {
|
||||
Some(ref mut pipe) => pipe,
|
||||
None => unreachable!(),
|
||||
};
|
||||
pipe.flush()
|
||||
}
|
||||
}
|
@ -1,22 +1,24 @@
|
||||
use std::future::Future;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::{Error};
|
||||
use anyhow::Error;
|
||||
|
||||
use super::BackupReader;
|
||||
use crate::backup::{ReadChunk, DataBlob, CryptConfig};
|
||||
use crate::backup::{AsyncReadChunk, CryptConfig, DataBlob, ReadChunk};
|
||||
use crate::tools::runtime::block_on;
|
||||
|
||||
/// Read chunks from remote host using ``BackupReader``
|
||||
#[derive(Clone)]
|
||||
pub struct RemoteChunkReader {
|
||||
client: Arc<BackupReader>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
cache_hint: HashMap<[u8; 32], usize>,
|
||||
cache: HashMap<[u8; 32], Vec<u8>>,
|
||||
cache: Arc<Mutex<HashMap<[u8; 32], Vec<u8>>>>,
|
||||
}
|
||||
|
||||
impl RemoteChunkReader {
|
||||
|
||||
/// Create a new instance.
|
||||
///
|
||||
/// Chunks listed in ``cache_hint`` are cached and kept in RAM.
|
||||
@ -25,50 +27,82 @@ impl RemoteChunkReader {
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
cache_hint: HashMap<[u8; 32], usize>,
|
||||
) -> Self {
|
||||
|
||||
Self { client, crypt_config, cache_hint, cache: HashMap::new() }
|
||||
Self {
|
||||
client,
|
||||
crypt_config,
|
||||
cache_hint,
|
||||
cache: Arc::new(Mutex::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadChunk for RemoteChunkReader {
|
||||
pub async fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
|
||||
|
||||
fn read_raw_chunk(&mut self, digest:&[u8; 32]) -> Result<DataBlob, Error> {
|
||||
|
||||
let mut chunk_data = Vec::with_capacity(4*1024*1024);
|
||||
|
||||
//tokio::task::block_in_place(|| futures::executor::block_on(self.client.download_chunk(&digest, &mut chunk_data)))?;
|
||||
block_on(async {
|
||||
// download_chunk returns the writer back to us, but we need to return a 'static value
|
||||
self.client
|
||||
.download_chunk(&digest, &mut chunk_data)
|
||||
.await
|
||||
.map(drop)
|
||||
})?;
|
||||
self.client
|
||||
.download_chunk(&digest, &mut chunk_data)
|
||||
.await?;
|
||||
|
||||
let chunk = DataBlob::from_raw(chunk_data)?;
|
||||
chunk.verify_crc()?;
|
||||
|
||||
Ok(chunk)
|
||||
}
|
||||
}
|
||||
|
||||
fn read_chunk(&mut self, digest:&[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||
impl ReadChunk for RemoteChunkReader {
|
||||
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
block_on(Self::read_raw_chunk(self, digest))
|
||||
}
|
||||
|
||||
if let Some(raw_data) = self.cache.get(digest) {
|
||||
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||
if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
|
||||
return Ok(raw_data.to_vec());
|
||||
}
|
||||
|
||||
let chunk = self.read_raw_chunk(digest)?;
|
||||
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
|
||||
let use_cache = self.cache_hint.contains_key(digest);
|
||||
if use_cache {
|
||||
self.cache.insert(*digest, raw_data.to_vec());
|
||||
(*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
|
||||
}
|
||||
|
||||
Ok(raw_data)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl AsyncReadChunk for RemoteChunkReader {
|
||||
fn read_raw_chunk<'a>(
|
||||
&'a self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
||||
Box::pin(Self::read_raw_chunk(self, digest))
|
||||
}
|
||||
|
||||
fn read_chunk<'a>(
|
||||
&'a self,
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, Error>> + Send + 'a>> {
|
||||
Box::pin(async move {
|
||||
if let Some(raw_data) = (*self.cache.lock().unwrap()).get(digest) {
|
||||
return Ok(raw_data.to_vec());
|
||||
}
|
||||
|
||||
let chunk = Self::read_raw_chunk(self, digest).await?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
|
||||
let use_cache = self.cache_hint.contains_key(digest);
|
||||
if use_cache {
|
||||
(*self.cache.lock().unwrap()).insert(*digest, raw_data.to_vec());
|
||||
}
|
||||
|
||||
Ok(raw_data)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -190,7 +190,7 @@ pub fn check_acl_path(path: &str) -> Result<(), Error> {
|
||||
"system" => {
|
||||
if components_len == 1 { return Ok(()); }
|
||||
match components[1] {
|
||||
"log" | "status" | "tasks" | "time" => {
|
||||
"disks" | "log" | "status" | "tasks" | "time" => {
|
||||
if components_len == 2 { return Ok(()); }
|
||||
}
|
||||
"services" => { // /system/services/{service}
|
||||
|
@ -141,7 +141,7 @@ pub fn get_network_interfaces() -> Result<HashMap<String, bool>, Error> {
|
||||
|
||||
pub fn compute_file_diff(filename: &str, shadow: &str) -> Result<String, Error> {
|
||||
|
||||
let output = Command::new("/usr/bin/diff")
|
||||
let output = Command::new("diff")
|
||||
.arg("-b")
|
||||
.arg("-u")
|
||||
.arg(filename)
|
||||
@ -165,10 +165,10 @@ pub fn assert_ifupdown2_installed() -> Result<(), Error> {
|
||||
|
||||
pub fn network_reload() -> Result<(), Error> {
|
||||
|
||||
let output = Command::new("/sbin/ifreload")
|
||||
let output = Command::new("ifreload")
|
||||
.arg("-a")
|
||||
.output()
|
||||
.map_err(|err| format_err!("failed to execute '/sbin/ifreload' - {}", err))?;
|
||||
.map_err(|err| format_err!("failed to execute 'ifreload' - {}", err))?;
|
||||
|
||||
crate::tools::command_output(output, None)
|
||||
.map_err(|err| format_err!("ifreload failed: {}", err))?;
|
||||
|
@ -1,229 +0,0 @@
|
||||
//! Helpers to generate a binary search tree stored in an array from a
|
||||
//! sorted array.
|
||||
//!
|
||||
//! Specifically, for any given sorted array 'input' permute the
|
||||
//! array so that the following rule holds:
|
||||
//!
|
||||
//! For each array item with index i, the item at 2i+1 is smaller and
|
||||
//! the item 2i+2 is larger.
|
||||
//!
|
||||
//! This structure permits efficient (meaning: O(log(n)) binary
|
||||
//! searches: start with item i=0 (i.e. the root of the BST), compare
|
||||
//! the value with the searched item, if smaller proceed at item
|
||||
//! 2i+1, if larger proceed at item 2i+2, and repeat, until either
|
||||
//! the item is found, or the indexes grow beyond the array size,
|
||||
//! which means the entry does not exist.
|
||||
//!
|
||||
//! Effectively this implements bisection, but instead of jumping
|
||||
//! around wildly in the array during a single search we only search
|
||||
//! with strictly monotonically increasing indexes.
|
||||
//!
|
||||
//! Algorithm is from casync (camakebst.c), simplified and optimized
|
||||
//! for rust. Permutation function originally by L. Bressel, 2017. We
|
||||
//! pass permutation info to user provided callback, which actually
|
||||
//! implements the data copy.
|
||||
//!
|
||||
//! The Wikipedia Artikel for [Binary
|
||||
//! Heap](https://en.wikipedia.org/wiki/Binary_heap) gives a short
|
||||
//! intro howto store binary trees using an array.
|
||||
|
||||
use std::cmp::Ordering;
|
||||
|
||||
#[allow(clippy::many_single_char_names)]
|
||||
fn copy_binary_search_tree_inner<F: FnMut(usize, usize)>(
|
||||
copy_func: &mut F,
|
||||
// we work on input array input[o..o+n]
|
||||
n: usize,
|
||||
o: usize,
|
||||
e: usize,
|
||||
i: usize,
|
||||
) {
|
||||
let p = 1 << e;
|
||||
|
||||
let t = p + (p>>1) - 1;
|
||||
|
||||
let m = if n > t {
|
||||
// |...........p.............t....n........(2p)|
|
||||
p - 1
|
||||
} else {
|
||||
// |...........p.....n.......t.............(2p)|
|
||||
p - 1 - (t-n)
|
||||
};
|
||||
|
||||
(copy_func)(o+m, i);
|
||||
|
||||
if m > 0 {
|
||||
copy_binary_search_tree_inner(copy_func, m, o, e-1, i*2+1);
|
||||
}
|
||||
|
||||
if (m + 1) < n {
|
||||
copy_binary_search_tree_inner(copy_func, n-m-1, o+m+1, e-1, i*2+2);
|
||||
}
|
||||
}
|
||||
|
||||
/// This function calls the provided `copy_func()` with the permutation
|
||||
/// info.
|
||||
///
|
||||
/// ```
|
||||
/// # use proxmox_backup::pxar::copy_binary_search_tree;
|
||||
/// copy_binary_search_tree(5, |src, dest| {
|
||||
/// println!("Copy {} to {}", src, dest);
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
/// This will produce the following output:
|
||||
///
|
||||
/// ```no-compile
|
||||
/// Copy 3 to 0
|
||||
/// Copy 1 to 1
|
||||
/// Copy 0 to 3
|
||||
/// Copy 2 to 4
|
||||
/// Copy 4 to 2
|
||||
/// ```
|
||||
///
|
||||
/// So this generates the following permutation: `[3,1,4,0,2]`.
|
||||
|
||||
pub fn copy_binary_search_tree<F: FnMut(usize, usize)>(
|
||||
n: usize,
|
||||
mut copy_func: F,
|
||||
) {
|
||||
if n == 0 { return };
|
||||
let e = (64 - n.leading_zeros() - 1) as usize; // fast log2(n)
|
||||
|
||||
copy_binary_search_tree_inner(&mut copy_func, n, 0, e, 0);
|
||||
}
|
||||
|
||||
|
||||
/// This function searches for the index where the comparison by the provided
|
||||
/// `compare()` function returns `Ordering::Equal`.
|
||||
/// The order of the comparison matters (noncommutative) and should be search
|
||||
/// value compared to value at given index as shown in the examples.
|
||||
/// The parameter `skip_multiples` defines the number of matches to ignore while
|
||||
/// searching before returning the index in order to lookup duplicate entries in
|
||||
/// the tree.
|
||||
///
|
||||
/// ```
|
||||
/// # use proxmox_backup::pxar::{copy_binary_search_tree, search_binary_tree_by};
|
||||
/// let mut vals = vec![0,1,2,2,2,3,4,5,6,6,7,8,8,8];
|
||||
///
|
||||
/// let clone = vals.clone();
|
||||
/// copy_binary_search_tree(vals.len(), |s, d| {
|
||||
/// vals[d] = clone[s];
|
||||
/// });
|
||||
/// let should_be = vec![5,2,8,1,3,6,8,0,2,2,4,6,7,8];
|
||||
/// assert_eq!(vals, should_be);
|
||||
///
|
||||
/// let find = 8;
|
||||
/// let skip_multiples = 0;
|
||||
/// let idx = search_binary_tree_by(0, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert_eq!(idx, Some(2));
|
||||
///
|
||||
/// let find = 8;
|
||||
/// let skip_multiples = 1;
|
||||
/// let idx = search_binary_tree_by(2, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert_eq!(idx, Some(6));
|
||||
///
|
||||
/// let find = 8;
|
||||
/// let skip_multiples = 1;
|
||||
/// let idx = search_binary_tree_by(6, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert_eq!(idx, Some(13));
|
||||
///
|
||||
/// let find = 5;
|
||||
/// let skip_multiples = 1;
|
||||
/// let idx = search_binary_tree_by(0, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert!(idx.is_none());
|
||||
///
|
||||
/// let find = 5;
|
||||
/// let skip_multiples = 0;
|
||||
/// // if start index is equal to the array length, `None` is returned.
|
||||
/// let idx = search_binary_tree_by(vals.len(), vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert!(idx.is_none());
|
||||
///
|
||||
/// let find = 5;
|
||||
/// let skip_multiples = 0;
|
||||
/// // if start index is larger than length, `None` is returned.
|
||||
/// let idx = search_binary_tree_by(vals.len() + 1, vals.len(), skip_multiples, |idx| find.cmp(&vals[idx]));
|
||||
/// assert!(idx.is_none());
|
||||
/// ```
|
||||
|
||||
pub fn search_binary_tree_by<F: Copy + Fn(usize) -> Ordering>(
|
||||
start: usize,
|
||||
size: usize,
|
||||
skip_multiples: usize,
|
||||
compare: F
|
||||
) -> Option<usize> {
|
||||
if start >= size {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut skip = skip_multiples;
|
||||
let cmp = compare(start);
|
||||
if cmp == Ordering::Equal {
|
||||
if skip == 0 {
|
||||
// Found matching hash and want this one
|
||||
return Some(start);
|
||||
}
|
||||
// Found matching hash, but we should skip the first `skip_multiple`,
|
||||
// so continue search with reduced skip count.
|
||||
skip -= 1;
|
||||
}
|
||||
|
||||
if cmp == Ordering::Less || cmp == Ordering::Equal {
|
||||
let res = search_binary_tree_by(2 * start + 1, size, skip, compare);
|
||||
if res.is_some() {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
if cmp == Ordering::Greater || cmp == Ordering::Equal {
|
||||
let res = search_binary_tree_by(2 * start + 2, size, skip, compare);
|
||||
if res.is_some() {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binary_search_tree() {
|
||||
|
||||
fn run_test(len: usize) -> Vec<usize> {
|
||||
|
||||
const MARKER: usize = 0xfffffff;
|
||||
let mut output = vec![];
|
||||
for _i in 0..len { output.push(MARKER); }
|
||||
copy_binary_search_tree(len, |s, d| {
|
||||
assert!(output[d] == MARKER);
|
||||
output[d] = s;
|
||||
});
|
||||
if len < 32 { println!("GOT:{}:{:?}", len, output); }
|
||||
for i in 0..len {
|
||||
assert!(output[i] != MARKER);
|
||||
}
|
||||
output
|
||||
}
|
||||
|
||||
assert!(run_test(0).len() == 0);
|
||||
assert!(run_test(1) == [0]);
|
||||
assert!(run_test(2) == [1,0]);
|
||||
assert!(run_test(3) == [1,0,2]);
|
||||
assert!(run_test(4) == [2,1,3,0]);
|
||||
assert!(run_test(5) == [3,1,4,0,2]);
|
||||
assert!(run_test(6) == [3,1,5,0,2,4]);
|
||||
assert!(run_test(7) == [3,1,5,0,2,4,6]);
|
||||
assert!(run_test(8) == [4,2,6,1,3,5,7,0]);
|
||||
assert!(run_test(9) == [5,3,7,1,4,6,8,0,2]);
|
||||
assert!(run_test(10) == [6,3,8,1,5,7,9,0,2,4]);
|
||||
assert!(run_test(11) == [7,3,9,1,5,8,10,0,2,4,6]);
|
||||
assert!(run_test(12) == [7,3,10,1,5,9,11,0,2,4,6,8]);
|
||||
assert!(run_test(13) == [7,3,11,1,5,9,12,0,2,4,6,8,10]);
|
||||
assert!(run_test(14) == [7,3,11,1,5,9,13,0,2,4,6,8,10,12]);
|
||||
assert!(run_test(15) == [7,3,11,1,5,9,13,0,2,4,6,8,10,12,14]);
|
||||
assert!(run_test(16) == [8,4,12,2,6,10,14,1,3,5,7,9,11,13,15,0]);
|
||||
assert!(run_test(17) == [9,5,13,3,7,11,15,1,4,6,8,10,12,14,16,0,2]);
|
||||
|
||||
for len in 18..1000 {
|
||||
run_test(len);
|
||||
}
|
||||
}
|
1006
src/pxar/create.rs
Normal file
1006
src/pxar/create.rs
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user