Compare commits
336 Commits
Author | SHA1 | Date | |
---|---|---|---|
97cd0a2a6d | |||
49a92084a9 | |||
9bdeecaee4 | |||
843880f008 | |||
a6ed5e1273 | |||
74f94d0678 | |||
946c3e8a81 | |||
7b212c1f79 | |||
3b2046d263 | |||
1ffe030123 | |||
5255e641fa | |||
c86b6f40d7 | |||
5a718dce17 | |||
1b32750644 | |||
5aa103c3c3 | |||
fd3f690104 | |||
24b638bd9f | |||
9624c5eecb | |||
503dd339a8 | |||
36ea5df444 | |||
dce9dd6f70 | |||
88e28e15e4 | |||
399e48a1ed | |||
7ae571e7cb | |||
4264c5023b | |||
82b7adf90b | |||
71c4a3138f | |||
52991f239f | |||
3435f5491b | |||
aafe8609e5 | |||
a8d69fcf05 | |||
1e68497c03 | |||
74fc844787 | |||
4cda7603c4 | |||
11e1e27a42 | |||
4ea831bfa1 | |||
c1d7d708d4 | |||
3fa2b983c1 | |||
a1e9c05738 | |||
934deeff2d | |||
c162df60c8 | |||
98161fddb5 | |||
be614c625f | |||
87c4cb7419 | |||
93bb51fe7e | |||
713b66b6ed | |||
77bd2a469c | |||
97af919530 | |||
c91602316b | |||
a13573c24a | |||
02543a5c7f | |||
42b68f72e6 | |||
664d8a2765 | |||
e6263c2662 | |||
ae197dda23 | |||
4c116bafb8 | |||
df30017ff8 | |||
3f3ae19d63 | |||
72dc68323c | |||
593f917742 | |||
639419b049 | |||
c5ac2b9ddd | |||
81f293513e | |||
8b5f72b176 | |||
f23f75433f | |||
6d6b4e72d3 | |||
e434258592 | |||
3dc1a2d5b6 | |||
5d95558bae | |||
882c082369 | |||
9a38fa29c2 | |||
14f6c9cb8b | |||
2d55beeca0 | |||
9238cdf50d | |||
5d30f03826 | |||
14263ef989 | |||
e7cb4dc50d | |||
27d864210a | |||
f667f49dab | |||
866c556faf | |||
90d515c97d | |||
4dbe129284 | |||
747c3bc087 | |||
c23e257c5a | |||
16a18dadba | |||
5f76ac37b5 | |||
d74edc3d89 | |||
2f57a433b1 | |||
df7f04364b | |||
98c259b4c1 | |||
799b3d88bc | |||
db22e6b270 | |||
16f0afbfb5 | |||
d3d566f7bd | |||
c96b0de48f | |||
2ce159343b | |||
9e496ff6f1 | |||
8819d1f2f5 | |||
0f9218079a | |||
1cafbdc70d | |||
a3eb7b2cea | |||
d9b8e2c795 | |||
4bd2a9e42d | |||
cef03f4149 | |||
eeb19aeb2d | |||
6c96ec418d | |||
5e4b32706c | |||
30c3c5d66c | |||
e51be33807 | |||
70030b43d0 | |||
724de093dd | |||
ff86ef00a7 | |||
912b3f5bc9 | |||
a4acb6ef84 | |||
d7ee07d838 | |||
53705acece | |||
c8fff67d88 | |||
9fa55e09a7 | |||
e443902583 | |||
32dc4c4604 | |||
f39a900722 | |||
1fc82c41f2 | |||
d2b0c78e23 | |||
adfdc36936 | |||
d8594d87f1 | |||
f66f537da9 | |||
d44185c4a1 | |||
d53fbe2474 | |||
95bda2f25d | |||
c9756b40d1 | |||
8cd29fb24a | |||
505c5f0f76 | |||
2aaae9705e | |||
8aa67ee758 | |||
3865e27e96 | |||
f6c6e09a8a | |||
71282dd988 | |||
80db161e05 | |||
be10cdb122 | |||
7fde1a71ca | |||
a83674ad48 | |||
02f82148cf | |||
39f18b30b6 | |||
69d970a658 | |||
6d55603dcc | |||
3e395378bc | |||
bccdc5fa04 | |||
0bf7ba6c92 | |||
e6b599aa6c | |||
d757021f4c | |||
ee15af6bb8 | |||
3da9b7e0dd | |||
beaa683a52 | |||
33a88dafb9 | |||
224c65f8de | |||
f2b4b4b9fe | |||
ea9e559fc4 | |||
0cf14984cc | |||
7d07b73def | |||
3d3670d786 | |||
14291179ce | |||
e744de0eb0 | |||
98b1733760 | |||
fdac28fcec | |||
653e2031d2 | |||
01ca99da2d | |||
1c2f842a98 | |||
a4d1675513 | |||
2ab5acac5a | |||
27fde64794 | |||
fa3f0584bb | |||
d12720c796 | |||
a4e86972a4 | |||
3a3af6e2b6 | |||
482409641f | |||
9688f6de0f | |||
5b32820e93 | |||
f40b4fb05a | |||
6e1deb158a | |||
50ec1a8712 | |||
a74b026baa | |||
7e42ccdaf2 | |||
e713ee5c56 | |||
ec5f9d3525 | |||
d0463b67ca | |||
2ff4c2cd5f | |||
c3b090ac8a | |||
c47e294ea7 | |||
25455bd06d | |||
c1c4a18f48 | |||
91f5594c08 | |||
86f6f74114 | |||
13d9fe3a6c | |||
41e4388005 | |||
06a94edcf6 | |||
ef496e2c20 | |||
113c9b5981 | |||
956295cefe | |||
a26c27c8e6 | |||
0c1c492d48 | |||
255ed62166 | |||
b96b11cdb7 | |||
faa8e6948a | |||
8314ca9c10 | |||
538c2b6dcf | |||
e9b44bec01 | |||
65418a0763 | |||
aef4976801 | |||
295d4f4116 | |||
c47a900ceb | |||
1b1110581a | |||
eb13d9151a | |||
449e4a66fe | |||
217c22c754 | |||
ba5b8a3e76 | |||
ac5e9e770b | |||
b25deec0be | |||
cdf1da2872 | |||
3cfc56f5c2 | |||
37e53b4c07 | |||
77d634710e | |||
5c5181a252 | |||
67042466e8 | |||
757d0ccc76 | |||
4a55fa87d5 | |||
032cd1b862 | |||
ec2434fe3c | |||
34389132d9 | |||
78ee20d72d | |||
601e42ac35 | |||
e1897b363b | |||
cf063c1973 | |||
f58233a73a | |||
d257c2ecbd | |||
e4ee7b7ac8 | |||
1f0d23f792 | |||
bfcef26a99 | |||
ec01eeadc6 | |||
660a34892d | |||
d86034afec | |||
62593aba1e | |||
0eaef8eb84 | |||
e39974afbf | |||
dde18bbb85 | |||
a40e1b0e8b | |||
a0eb0cd372 | |||
41067870c6 | |||
33a87bc39a | |||
bed3e15f16 | |||
c687da9e8e | |||
be30e7d269 | |||
106603c58f | |||
7ba2c1c386 | |||
4327a8462a | |||
e193544b8e | |||
323b2f3dd6 | |||
7884e7ef4f | |||
fae11693f0 | |||
22231524e2 | |||
9634ca07db | |||
62f6a7e3d9 | |||
86443141b5 | |||
f6e964b96e | |||
c8bed1b4d7 | |||
a3970d6c1e | |||
cc83c13660 | |||
bf7e2a4648 | |||
e284073e4a | |||
3ec99affc8 | |||
a9649ddc44 | |||
4f9096a211 | |||
c3a4b5e2e1 | |||
7957fabff2 | |||
20a4e4e252 | |||
2774566b03 | |||
4459ffe30e | |||
d16ed66c88 | |||
3ec6e249b3 | |||
dfa517ad6c | |||
8b2ad84a25 | |||
3dacedce71 | |||
512d50a455 | |||
b53f637914 | |||
152a926149 | |||
7f388acea8 | |||
b2bfb46835 | |||
24406ebc0c | |||
1f24d9114c | |||
859fe9c1fb | |||
2107a5aebc | |||
3638341aa4 | |||
067fe514e6 | |||
8c6e5ce23c | |||
0351f23ba4 | |||
c1ff544eff | |||
69e5d71961 | |||
48e22a8900 | |||
a7a5f56daa | |||
05389a0109 | |||
b65390ebc9 | |||
3bad3e6e52 | |||
24be37e3f6 | |||
1008a69a13 | |||
521a0acb2e | |||
3b66040de6 | |||
af3a0ae7b1 | |||
4e36f78438 | |||
f28d9088ed | |||
56b814e378 | |||
0c136efe30 | |||
cdead6cd12 | |||
c950826e46 | |||
f91d58e157 | |||
1ff840ffad | |||
7443a6e092 | |||
3a9988638b | |||
96ee857752 | |||
887018bb79 | |||
9696f5193b | |||
e13c4f66bb | |||
8a25809573 | |||
d87b193b0b | |||
ea5289e869 | |||
1f6a4f587a | |||
705b2293ec | |||
d2c7ef09ba | |||
27f86f997e | |||
fc93d38076 | |||
a5a85d41ff | |||
08cb2038bd | |||
6f711c1737 | |||
42ec9f577f | |||
9de69cdb1a | |||
bd260569d3 | |||
36cb4b30ef | |||
4e717240bf |
1
.gitignore
vendored
1
.gitignore
vendored
@ -3,3 +3,4 @@ local.mak
|
|||||||
**/*.rs.bk
|
**/*.rs.bk
|
||||||
/etc/proxmox-backup.service
|
/etc/proxmox-backup.service
|
||||||
/etc/proxmox-backup-proxy.service
|
/etc/proxmox-backup-proxy.service
|
||||||
|
build/
|
||||||
|
15
Cargo.toml
15
Cargo.toml
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "0.5.0"
|
version = "0.8.13"
|
||||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3"
|
license = "AGPL-3"
|
||||||
@ -14,6 +14,7 @@ name = "proxmox_backup"
|
|||||||
path = "src/lib.rs"
|
path = "src/lib.rs"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
apt-pkg-native = "0.3.1" # custom patched version
|
||||||
base64 = "0.12"
|
base64 = "0.12"
|
||||||
bitflags = "1.2.1"
|
bitflags = "1.2.1"
|
||||||
bytes = "0.5"
|
bytes = "0.5"
|
||||||
@ -37,12 +38,12 @@ pam = "0.7"
|
|||||||
pam-sys = "0.5"
|
pam-sys = "0.5"
|
||||||
percent-encoding = "2.1"
|
percent-encoding = "2.1"
|
||||||
pin-utils = "0.1.0"
|
pin-utils = "0.1.0"
|
||||||
pathpatterns = "0.1.1"
|
pathpatterns = "0.1.2"
|
||||||
proxmox = { version = "0.1.41", features = [ "sortable-macro", "api-macro" ] }
|
proxmox = { version = "0.3.3", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
proxmox-fuse = "0.1.0"
|
proxmox-fuse = "0.1.0"
|
||||||
pxar = { version = "0.2.0", features = [ "tokio-io", "futures-io" ] }
|
pxar = { version = "0.6.0", features = [ "tokio-io", "futures-io" ] }
|
||||||
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
||||||
regex = "1.2"
|
regex = "1.2"
|
||||||
rustyline = "6"
|
rustyline = "6"
|
||||||
@ -50,11 +51,11 @@ serde = { version = "1.0", features = ["derive"] }
|
|||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
siphasher = "0.3"
|
siphasher = "0.3"
|
||||||
syslog = "4.0"
|
syslog = "4.0"
|
||||||
tokio = { version = "0.2.9", features = [ "blocking", "fs", "io-util", "macros", "rt-threaded", "signal", "stream", "tcp", "time", "uds" ] }
|
tokio = { version = "0.2.9", features = [ "blocking", "fs", "dns", "io-util", "macros", "process", "rt-threaded", "signal", "stream", "tcp", "time", "uds" ] }
|
||||||
tokio-openssl = "0.4.0"
|
tokio-openssl = "0.4.0"
|
||||||
tokio-util = { version = "0.3", features = [ "codec" ] }
|
tokio-util = { version = "0.3", features = [ "codec" ] }
|
||||||
tower-service = "0.3.0"
|
tower-service = "0.3.0"
|
||||||
udev = "0.3"
|
udev = ">= 0.3, <0.5"
|
||||||
url = "2.1"
|
url = "2.1"
|
||||||
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
||||||
walkdir = "2"
|
walkdir = "2"
|
||||||
|
23
Makefile
23
Makefile
@ -37,11 +37,15 @@ CARGO ?= cargo
|
|||||||
COMPILED_BINS := \
|
COMPILED_BINS := \
|
||||||
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN))
|
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN))
|
||||||
|
|
||||||
|
export DEB_VERSION DEB_VERSION_UPSTREAM
|
||||||
|
|
||||||
SERVER_DEB=${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb
|
SERVER_DEB=${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb
|
||||||
|
SERVER_DBG_DEB=${PACKAGE}-server-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||||
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
||||||
|
CLIENT_DBG_DEB=${PACKAGE}-client-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||||
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
||||||
|
|
||||||
DEBS=${SERVER_DEB} ${CLIENT_DEB}
|
DEBS=${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB}
|
||||||
|
|
||||||
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
|
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
|
||||||
|
|
||||||
@ -65,10 +69,12 @@ doc:
|
|||||||
.PHONY: build
|
.PHONY: build
|
||||||
build:
|
build:
|
||||||
rm -rf build
|
rm -rf build
|
||||||
|
rm -f debian/control
|
||||||
debcargo package --config debian/debcargo.toml --changelog-ready --no-overlay-write-back --directory build proxmox-backup $(shell dpkg-parsechangelog -l debian/changelog -SVersion | sed -e 's/-.*//')
|
debcargo package --config debian/debcargo.toml --changelog-ready --no-overlay-write-back --directory build proxmox-backup $(shell dpkg-parsechangelog -l debian/changelog -SVersion | sed -e 's/-.*//')
|
||||||
sed -e '1,/^$$/ ! d' build/debian/control > build/debian/control.src
|
sed -e '1,/^$$/ ! d' build/debian/control > build/debian/control.src
|
||||||
cat build/debian/control.src build/debian/control.in > build/debian/control
|
cat build/debian/control.src build/debian/control.in > build/debian/control
|
||||||
rm build/debian/control.in build/debian/control.src
|
rm build/debian/control.in build/debian/control.src
|
||||||
|
cp build/debian/control debian/control
|
||||||
rm build/Cargo.lock
|
rm build/Cargo.lock
|
||||||
find build/debian -name "*.hint" -delete
|
find build/debian -name "*.hint" -delete
|
||||||
$(foreach i,$(SUBDIRS), \
|
$(foreach i,$(SUBDIRS), \
|
||||||
@ -76,18 +82,21 @@ build:
|
|||||||
|
|
||||||
|
|
||||||
.PHONY: proxmox-backup-docs
|
.PHONY: proxmox-backup-docs
|
||||||
proxmox-backup-docs: $(DOC_DEB)
|
$(DOC_DEB) $(DEBS): proxmox-backup-docs
|
||||||
$(DOC_DEB): build
|
proxmox-backup-docs: build
|
||||||
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean
|
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean
|
||||||
lintian $(DOC_DEB)
|
lintian $(DOC_DEB)
|
||||||
|
|
||||||
# copy the local target/ dir as a build-cache
|
# copy the local target/ dir as a build-cache
|
||||||
.PHONY: deb
|
.PHONY: deb
|
||||||
deb: $(DEBS)
|
$(DEBS): deb
|
||||||
$(DEBS): build
|
deb: build
|
||||||
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean --build-profiles=nodoc
|
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean --build-profiles=nodoc
|
||||||
lintian $(DEBS)
|
lintian $(DEBS)
|
||||||
|
|
||||||
|
.PHONY: deb-all
|
||||||
|
deb-all: $(DOC_DEB) $(DEBS)
|
||||||
|
|
||||||
.PHONY: dsc
|
.PHONY: dsc
|
||||||
dsc: $(DSC)
|
dsc: $(DSC)
|
||||||
$(DSC): build
|
$(DSC): build
|
||||||
@ -140,5 +149,5 @@ install: $(COMPILED_BINS)
|
|||||||
upload: ${SERVER_DEB} ${CLIENT_DEB} ${DOC_DEB}
|
upload: ${SERVER_DEB} ${CLIENT_DEB} ${DOC_DEB}
|
||||||
# check if working directory is clean
|
# check if working directory is clean
|
||||||
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
||||||
tar cf - ${SERVER_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
||||||
tar cf - ${CLIENT_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve" --dist buster
|
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve" --dist buster
|
||||||
|
294
debian/changelog
vendored
294
debian/changelog
vendored
@ -1,3 +1,297 @@
|
|||||||
|
rust-proxmox-backup (0.8.13-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* improve and add to documentation
|
||||||
|
|
||||||
|
* save last verify result in snapshot manifest and show it in the GUI
|
||||||
|
|
||||||
|
* gc: use human readable units for summary in task log
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 27 Aug 2020 16:12:07 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.12-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* verify: speedup - only verify chunks once
|
||||||
|
|
||||||
|
* verify: sort backup groups
|
||||||
|
|
||||||
|
* bump pxar dep to 0.4.0
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 25 Aug 2020 08:55:52 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.11-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* improve sync jobs, allow to stop them and better logging
|
||||||
|
|
||||||
|
* fix #2926: make network interfaces parser more flexible
|
||||||
|
|
||||||
|
* fix #2904: zpool status: parse also those vdevs without READ/ẀRITE/...
|
||||||
|
statistics
|
||||||
|
|
||||||
|
* api2/node/services: turn service api calls into workers
|
||||||
|
|
||||||
|
* docs: add sections describing ACL related commands and describing
|
||||||
|
benchmarking
|
||||||
|
|
||||||
|
* docs: general grammar, wording and typo improvements
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 19 Aug 2020 19:20:03 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.10-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* ui: acl: add improved permission selector
|
||||||
|
|
||||||
|
* services: make reload safer and default to it in gui
|
||||||
|
|
||||||
|
* ui: rework DataStore content Panel
|
||||||
|
|
||||||
|
* ui: add search box to DataStore content
|
||||||
|
|
||||||
|
* ui: DataStoreContent: keep selection and expansion on reload
|
||||||
|
|
||||||
|
* upload_chunk: allow upload of empty blobs
|
||||||
|
|
||||||
|
* fix #2856: also check whole device for device mapper
|
||||||
|
|
||||||
|
* ui: fix error when reloading DataStoreContent
|
||||||
|
|
||||||
|
* ui: fix in-progress snapshots always showing as "Encrypted"
|
||||||
|
|
||||||
|
* update to pxar 0.3 to support negative timestamps
|
||||||
|
|
||||||
|
* fix #2873: if --pattern is used, default to not extracting
|
||||||
|
|
||||||
|
* finish_backup: test/verify manifest at server side
|
||||||
|
|
||||||
|
* finish_backup: add chunk_upload_stats to manifest
|
||||||
|
|
||||||
|
* src/api2/admin/datastore.rs: add API to get/set Notes for backus
|
||||||
|
|
||||||
|
* list_snapshots: Returns new "comment" property (first line from notes)
|
||||||
|
|
||||||
|
* pxar: create: attempt to use O_NOATIME
|
||||||
|
|
||||||
|
* systemd/time: fix weekday wrapping on month
|
||||||
|
|
||||||
|
* pxar: better error handling on extract
|
||||||
|
|
||||||
|
* pxar/extract: fixup path stack for errors
|
||||||
|
|
||||||
|
* datastore: allow browsing signed pxar files
|
||||||
|
|
||||||
|
* GC: use time pre phase1 to calculate min_atime in phase2
|
||||||
|
|
||||||
|
* gui: user: fix #2898 add dialog to set password
|
||||||
|
|
||||||
|
* fix #2909: handle missing chunks gracefully in garbage collection
|
||||||
|
|
||||||
|
* finish_backup: mark backup as finished only after checks have passed
|
||||||
|
|
||||||
|
* fix: master-key: upload RSA encoded key with backup
|
||||||
|
|
||||||
|
* admin-guide: add section explaining master keys
|
||||||
|
|
||||||
|
* backup: only allow finished backups as base snapshot
|
||||||
|
|
||||||
|
* datastore api: only decode unencrypted indices
|
||||||
|
|
||||||
|
* datastore api: verify blob/index csum from manifest
|
||||||
|
|
||||||
|
* sync, blobs and chunk readers: add more checks and verification
|
||||||
|
|
||||||
|
* verify: add more checks, don't fail on first error
|
||||||
|
|
||||||
|
* mark signed manifests as such
|
||||||
|
|
||||||
|
* backup/prune/forget: improve locking
|
||||||
|
|
||||||
|
* backup: ensure base snapshots are still available after backup
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 11 Aug 2020 15:37:29 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.9-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* improve termprocy (console) behavior on updating proxmox-backup-server and
|
||||||
|
other daemon restarts
|
||||||
|
|
||||||
|
* client: improve upload log output and speed calculation
|
||||||
|
|
||||||
|
* fix #2885: client upload: bail on duplicate backup targets
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 24 Jul 2020 11:24:07 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.8-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* pxar: .pxarexclude: match behavior from absolute paths to the one described
|
||||||
|
in the documentation and use byte based paths
|
||||||
|
|
||||||
|
* catalog shell: add exit command
|
||||||
|
|
||||||
|
* manifest: revert signature canonicalization to old behaviour. Fallout from
|
||||||
|
encrypted older backups is expected and was ignored due to the beta status
|
||||||
|
of Proxmox Backup.
|
||||||
|
|
||||||
|
* documentation: various improvements and additions
|
||||||
|
|
||||||
|
* cached user info: print privilege path in error message
|
||||||
|
|
||||||
|
* docs: fix #2851 Add note about GC grace period
|
||||||
|
|
||||||
|
* api2/status: fix datastore full estimation bug if there where (almost) no
|
||||||
|
change for several days
|
||||||
|
|
||||||
|
* schedules, calendar event: support the 'weekly' special expression
|
||||||
|
|
||||||
|
* ui: sync job: group remote fields and use "Source" in labels
|
||||||
|
|
||||||
|
* ui: add calendar event selector
|
||||||
|
|
||||||
|
* ui: sync job: change default to false for "remove-vanished" for new jobs
|
||||||
|
|
||||||
|
* fix #2860: skip in-progress snapshots when syncing
|
||||||
|
|
||||||
|
* fix #2865: detect and skip vanished snapshots
|
||||||
|
|
||||||
|
* fix #2871: close FDs when scanning backup group, avoid leaking
|
||||||
|
|
||||||
|
* backup: list images: handle walkdir error, catch "lost+found" special
|
||||||
|
directory
|
||||||
|
|
||||||
|
* implement AsyncSeek for AsyncIndexReader
|
||||||
|
|
||||||
|
* client: rework logging upload info like size or bandwidth
|
||||||
|
|
||||||
|
* client writer: do not output chunklist for now on verbose=true
|
||||||
|
|
||||||
|
* add initial API for listing available updates and updating the APT
|
||||||
|
database
|
||||||
|
|
||||||
|
* ui: add xterm.js console implementation
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 23 Jul 2020 12:16:05 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.7-2) unstable; urgency=medium
|
||||||
|
|
||||||
|
* support restoring file attributes from pxar archives
|
||||||
|
|
||||||
|
* docs: additions and fixes
|
||||||
|
|
||||||
|
* ui: running tasks: update limit to 100
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 14 Jul 2020 12:05:25 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.6-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* ui: add button for easily showing the server fingerprint dashboard
|
||||||
|
|
||||||
|
* proxmox-backup-client benchmark: add --verbose flag and improve output
|
||||||
|
format
|
||||||
|
|
||||||
|
* docs: reference PDF variant in HTML output
|
||||||
|
|
||||||
|
* proxmox-backup-client: add simple version command
|
||||||
|
|
||||||
|
* improve keyfile and signature handling in catalog and manifest
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 10 Jul 2020 11:34:14 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.5-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* fix cross process task listing
|
||||||
|
|
||||||
|
* docs: expand datastore documentation
|
||||||
|
|
||||||
|
* docs: add remotes and sync-jobs and schedules
|
||||||
|
|
||||||
|
* bump pathpatterns to 0.1.2
|
||||||
|
|
||||||
|
* ui: align version and user-menu spacing with pve/pmg
|
||||||
|
|
||||||
|
* ui: make username a menu-button
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 15:32:39 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.4-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* add TaskButton in header
|
||||||
|
|
||||||
|
* simpler lost+found pattern
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 14:28:24 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.3-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* get_disks: don't fail on zfs_devices
|
||||||
|
|
||||||
|
* allow some more characters for zpool list
|
||||||
|
|
||||||
|
* ui: adapt for new sign-only crypt mode
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 13:55:06 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.2-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* buildsys: also upload debug packages
|
||||||
|
|
||||||
|
* src/backup/manifest.rs: rename into_string -> to_string
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 11:58:51 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.1-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* remove authhenticated data blobs (not needed)
|
||||||
|
|
||||||
|
* add signature to manifest
|
||||||
|
|
||||||
|
* improve docs
|
||||||
|
|
||||||
|
* client: introduce --keyfd parameter
|
||||||
|
|
||||||
|
* ui improvements
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 10:01:25 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.8.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* implement get_runtime_with_builder
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 07 Jul 2020 10:15:26 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.7.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* implement clone for RemoteChunkReader
|
||||||
|
|
||||||
|
* improve docs
|
||||||
|
|
||||||
|
* client: add --encryption boolen parameter
|
||||||
|
|
||||||
|
* client: use default encryption key if it is available
|
||||||
|
|
||||||
|
* d/rules: do not compress .pdf files
|
||||||
|
|
||||||
|
* ui: various fixes
|
||||||
|
|
||||||
|
* add beta text with link to bugtracker
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 07 Jul 2020 07:40:05 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.6.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* make ReadChunk not require mutable self.
|
||||||
|
|
||||||
|
* ui: increase timeout for snapshot listing
|
||||||
|
|
||||||
|
* ui: consistently spell Datastore without space between words
|
||||||
|
|
||||||
|
* ui: disk create: sync and improve 'add-datastore' checkbox label
|
||||||
|
|
||||||
|
* proxmox-backup-client: add benchmark command
|
||||||
|
|
||||||
|
* pxar: fixup 'vanished-file' logic a bit
|
||||||
|
|
||||||
|
* ui: add verify button
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 03 Jul 2020 09:45:52 +0200
|
||||||
|
|
||||||
rust-proxmox-backup (0.5.0-1) unstable; urgency=medium
|
rust-proxmox-backup (0.5.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
* partially revert commit 1f82f9b7b5d231da22a541432d5617cb303c0000
|
* partially revert commit 1f82f9b7b5d231da22a541432d5617cb303c0000
|
||||||
|
132
debian/control
vendored
Normal file
132
debian/control
vendored
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
Source: rust-proxmox-backup
|
||||||
|
Section: admin
|
||||||
|
Priority: optional
|
||||||
|
Build-Depends: debhelper (>= 11),
|
||||||
|
dh-cargo (>= 18),
|
||||||
|
cargo:native,
|
||||||
|
rustc:native,
|
||||||
|
libstd-rust-dev,
|
||||||
|
librust-anyhow-1+default-dev,
|
||||||
|
librust-apt-pkg-native-0.3+default-dev (>= 0.3.1-~~),
|
||||||
|
librust-base64-0.12+default-dev,
|
||||||
|
librust-bitflags-1+default-dev (>= 1.2.1-~~),
|
||||||
|
librust-bytes-0.5+default-dev,
|
||||||
|
librust-chrono-0.4+default-dev,
|
||||||
|
librust-crc32fast-1+default-dev,
|
||||||
|
librust-endian-trait-0.6+arrays-dev,
|
||||||
|
librust-endian-trait-0.6+default-dev,
|
||||||
|
librust-futures-0.3+default-dev,
|
||||||
|
librust-h2-0.2+default-dev,
|
||||||
|
librust-h2-0.2+stream-dev,
|
||||||
|
librust-handlebars-3+default-dev,
|
||||||
|
librust-http-0.2+default-dev,
|
||||||
|
librust-hyper-0.13+default-dev,
|
||||||
|
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
||||||
|
librust-libc-0.2+default-dev,
|
||||||
|
librust-log-0.4+default-dev,
|
||||||
|
librust-nix-0.16+default-dev,
|
||||||
|
librust-nom-5+default-dev (>= 5.1-~~),
|
||||||
|
librust-num-traits-0.2+default-dev,
|
||||||
|
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
||||||
|
librust-openssl-0.10+default-dev,
|
||||||
|
librust-pam-0.7+default-dev,
|
||||||
|
librust-pam-sys-0.5+default-dev,
|
||||||
|
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
||||||
|
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||||
|
librust-pin-utils-0.1+default-dev,
|
||||||
|
librust-proxmox-0.3+api-macro-dev (>= 0.3.3-~~),
|
||||||
|
librust-proxmox-0.3+default-dev (>= 0.3.3-~~),
|
||||||
|
librust-proxmox-0.3+sortable-macro-dev (>= 0.3.3-~~),
|
||||||
|
librust-proxmox-0.3+websocket-dev (>= 0.3.3-~~),
|
||||||
|
librust-proxmox-fuse-0.1+default-dev,
|
||||||
|
librust-pxar-0.6+default-dev,
|
||||||
|
librust-pxar-0.6+futures-io-dev,
|
||||||
|
librust-pxar-0.6+tokio-io-dev,
|
||||||
|
librust-regex-1+default-dev (>= 1.2-~~),
|
||||||
|
librust-rustyline-6+default-dev,
|
||||||
|
librust-serde-1+default-dev,
|
||||||
|
librust-serde-1+derive-dev,
|
||||||
|
librust-serde-json-1+default-dev,
|
||||||
|
librust-siphasher-0.3+default-dev,
|
||||||
|
librust-syslog-4+default-dev,
|
||||||
|
librust-tokio-0.2+blocking-dev (>= 0.2.9-~~),
|
||||||
|
librust-tokio-0.2+default-dev (>= 0.2.9-~~),
|
||||||
|
librust-tokio-0.2+dns-dev (>= 0.2.9-~~),
|
||||||
|
librust-tokio-0.2+fs-dev (>= 0.2.9-~~),
|
||||||
|
librust-tokio-0.2+io-util-dev (>= 0.2.9-~~),
|
||||||
|
librust-tokio-0.2+macros-dev (>= 0.2.9-~~),
|
||||||
|
librust-tokio-0.2+process-dev (>= 0.2.9-~~),
|
||||||
|
librust-tokio-0.2+rt-threaded-dev (>= 0.2.9-~~),
|
||||||
|
librust-tokio-0.2+signal-dev (>= 0.2.9-~~),
|
||||||
|
librust-tokio-0.2+stream-dev (>= 0.2.9-~~),
|
||||||
|
librust-tokio-0.2+tcp-dev (>= 0.2.9-~~),
|
||||||
|
librust-tokio-0.2+time-dev (>= 0.2.9-~~),
|
||||||
|
librust-tokio-0.2+uds-dev (>= 0.2.9-~~),
|
||||||
|
librust-tokio-openssl-0.4+default-dev,
|
||||||
|
librust-tokio-util-0.3+codec-dev,
|
||||||
|
librust-tokio-util-0.3+default-dev,
|
||||||
|
librust-tower-service-0.3+default-dev,
|
||||||
|
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
|
||||||
|
librust-url-2+default-dev (>= 2.1-~~),
|
||||||
|
librust-walkdir-2+default-dev,
|
||||||
|
librust-xdg-2+default-dev (>= 2.2-~~),
|
||||||
|
librust-zstd-0.4+bindgen-dev,
|
||||||
|
librust-zstd-0.4+default-dev,
|
||||||
|
libacl1-dev,
|
||||||
|
libfuse3-dev,
|
||||||
|
libsystemd-dev,
|
||||||
|
uuid-dev,
|
||||||
|
debhelper (>= 12~),
|
||||||
|
bash-completion,
|
||||||
|
python3-docutils,
|
||||||
|
python3-pygments,
|
||||||
|
rsync,
|
||||||
|
fonts-dejavu-core <!nodoc>,
|
||||||
|
fonts-lato <!nodoc>,
|
||||||
|
fonts-open-sans <!nodoc>,
|
||||||
|
graphviz <!nodoc>,
|
||||||
|
latexmk <!nodoc>,
|
||||||
|
python3-sphinx <!nodoc>,
|
||||||
|
texlive-fonts-extra <!nodoc>,
|
||||||
|
texlive-fonts-recommended <!nodoc>,
|
||||||
|
texlive-xetex <!nodoc>,
|
||||||
|
xindy <!nodoc>
|
||||||
|
Maintainer: Proxmox Support Team <support@proxmox.com>
|
||||||
|
Standards-Version: 4.4.1
|
||||||
|
Vcs-Git:
|
||||||
|
Vcs-Browser:
|
||||||
|
Homepage: https://www.proxmox.com
|
||||||
|
|
||||||
|
Package: proxmox-backup-server
|
||||||
|
Architecture: any
|
||||||
|
Depends: fonts-font-awesome,
|
||||||
|
libjs-extjs (>= 6.0.1),
|
||||||
|
libzstd1 (>= 1.3.8),
|
||||||
|
lvm2,
|
||||||
|
proxmox-backup-docs,
|
||||||
|
proxmox-mini-journalreader,
|
||||||
|
proxmox-widget-toolkit (>= 2.2-4),
|
||||||
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
|
smartmontools,
|
||||||
|
${misc:Depends},
|
||||||
|
${shlibs:Depends},
|
||||||
|
Recommends: zfsutils-linux,
|
||||||
|
Description: Proxmox Backup Server daemon with tools and GUI
|
||||||
|
This package contains the Proxmox Backup Server daemons and related
|
||||||
|
tools. This includes a web-based graphical user interface.
|
||||||
|
|
||||||
|
Package: proxmox-backup-client
|
||||||
|
Architecture: any
|
||||||
|
Depends: ${misc:Depends}, ${shlibs:Depends}
|
||||||
|
Description: Proxmox Backup Client tools
|
||||||
|
This package contains the Proxmox Backup client, which provides a
|
||||||
|
simple command line tool to create and restore backups.
|
||||||
|
|
||||||
|
Package: proxmox-backup-docs
|
||||||
|
Build-Profiles: <!nodoc>
|
||||||
|
Section: doc
|
||||||
|
Depends: libjs-extjs,
|
||||||
|
${misc:Depends},
|
||||||
|
Architecture: all
|
||||||
|
Description: Proxmox Backup Documentation
|
||||||
|
This package contains the Proxmox Backup Documentation files.
|
4
debian/control.in
vendored
4
debian/control.in
vendored
@ -3,11 +3,15 @@ Architecture: any
|
|||||||
Depends: fonts-font-awesome,
|
Depends: fonts-font-awesome,
|
||||||
libjs-extjs (>= 6.0.1),
|
libjs-extjs (>= 6.0.1),
|
||||||
libzstd1 (>= 1.3.8),
|
libzstd1 (>= 1.3.8),
|
||||||
|
lvm2,
|
||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.2-4),
|
proxmox-widget-toolkit (>= 2.2-4),
|
||||||
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
|
smartmontools,
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
${shlibs:Depends},
|
${shlibs:Depends},
|
||||||
|
Recommends: zfsutils-linux,
|
||||||
Description: Proxmox Backup Server daemon with tools and GUI
|
Description: Proxmox Backup Server daemon with tools and GUI
|
||||||
This package contains the Proxmox Backup Server daemons and related
|
This package contains the Proxmox Backup Server daemons and related
|
||||||
tools. This includes a web-based graphical user interface.
|
tools. This includes a web-based graphical user interface.
|
||||||
|
2
debian/lintian-overrides
vendored
Normal file
2
debian/lintian-overrides
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
proxmox-backup-server: package-installs-apt-sources etc/apt/sources.list.d/pbstest-beta.list
|
||||||
|
proxmox-backup-server: systemd-service-file-refers-to-unusual-wantedby-target lib/systemd/system/proxmox-backup-banner.service getty.target
|
7
debian/postinst
vendored
7
debian/postinst
vendored
@ -14,6 +14,13 @@ case "$1" in
|
|||||||
_dh_action=start
|
_dh_action=start
|
||||||
fi
|
fi
|
||||||
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
||||||
|
|
||||||
|
if test -n "$2"; then
|
||||||
|
if dpkg --compare-versions "$2" 'le' '0.8.10-1'; then
|
||||||
|
echo "Fixing up termproxy user id in task log..."
|
||||||
|
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::root: /:termproxy::root@pam: /' /var/log/proxmox-backup/tasks/active
|
||||||
|
fi
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
||||||
abort-upgrade|abort-remove|abort-deconfigure)
|
abort-upgrade|abort-remove|abort-deconfigure)
|
||||||
|
1
debian/proxmox-backup-docs.links
vendored
Normal file
1
debian/proxmox-backup-docs.links
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
|
1
debian/proxmox-backup-server.install
vendored
1
debian/proxmox-backup-server.install
vendored
@ -1,6 +1,7 @@
|
|||||||
etc/proxmox-backup-proxy.service /lib/systemd/system/
|
etc/proxmox-backup-proxy.service /lib/systemd/system/
|
||||||
etc/proxmox-backup.service /lib/systemd/system/
|
etc/proxmox-backup.service /lib/systemd/system/
|
||||||
etc/proxmox-backup-banner.service /lib/systemd/system/
|
etc/proxmox-backup-banner.service /lib/systemd/system/
|
||||||
|
etc/pbstest-beta.list /etc/apt/sources.list.d/
|
||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api
|
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api
|
||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
|
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
|
||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
||||||
|
3
debian/rules
vendored
3
debian/rules
vendored
@ -45,3 +45,6 @@ override_dh_installsystemd:
|
|||||||
# TODO: remove once available (Debian 11 ?)
|
# TODO: remove once available (Debian 11 ?)
|
||||||
override_dh_dwz:
|
override_dh_dwz:
|
||||||
dh_dwz --no-dwz-multifile
|
dh_dwz --no-dwz-multifile
|
||||||
|
|
||||||
|
override_dh_compress:
|
||||||
|
dh_compress -X.pdf
|
||||||
|
@ -1,11 +1,5 @@
|
|||||||
include ../defines.mk
|
include ../defines.mk
|
||||||
|
|
||||||
ifeq ($(BUILD_MODE), release)
|
|
||||||
COMPILEDIR := ../target/release
|
|
||||||
else
|
|
||||||
COMPILEDIR := ../target/debug
|
|
||||||
endif
|
|
||||||
|
|
||||||
GENERATED_SYNOPSIS := \
|
GENERATED_SYNOPSIS := \
|
||||||
proxmox-backup-client/synopsis.rst \
|
proxmox-backup-client/synopsis.rst \
|
||||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||||
@ -26,6 +20,15 @@ SPHINXOPTS =
|
|||||||
SPHINXBUILD = sphinx-build
|
SPHINXBUILD = sphinx-build
|
||||||
BUILDDIR = output
|
BUILDDIR = output
|
||||||
|
|
||||||
|
ifeq ($(BUILD_MODE), release)
|
||||||
|
COMPILEDIR := ../target/release
|
||||||
|
SPHINXOPTS += -t release
|
||||||
|
else
|
||||||
|
COMPILEDIR := ../target/debug
|
||||||
|
SPHINXOPTS += -t devbuild
|
||||||
|
endif
|
||||||
|
|
||||||
|
|
||||||
# Sphinx internal variables.
|
# Sphinx internal variables.
|
||||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) .
|
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) .
|
||||||
|
|
||||||
|
@ -1,9 +1,8 @@
|
|||||||
Administration Guide
|
Backup Management
|
||||||
====================
|
=================
|
||||||
|
|
||||||
The administration guide.
|
.. The administration guide.
|
||||||
|
.. todo:: either add a bit more explanation or remove the previous sentence
|
||||||
.. todo:: either add a bit more explanation or remove the previous sentence
|
|
||||||
|
|
||||||
Terminology
|
Terminology
|
||||||
-----------
|
-----------
|
||||||
@ -13,16 +12,16 @@ Backup Content
|
|||||||
|
|
||||||
When doing deduplication, there are different strategies to get
|
When doing deduplication, there are different strategies to get
|
||||||
optimal results in terms of performance and/or deduplication rates.
|
optimal results in terms of performance and/or deduplication rates.
|
||||||
Depending on the type of data, one can split data into *fixed* or *variable*
|
Depending on the type of data, it can be split into *fixed* or *variable*
|
||||||
sized chunks.
|
sized chunks.
|
||||||
|
|
||||||
Fixed sized chunking needs almost no CPU performance, and is used to
|
Fixed sized chunking requires minimal CPU power, and is used to
|
||||||
backup virtual machine images.
|
backup virtual machine images.
|
||||||
|
|
||||||
Variable sized chunking needs more CPU power, but is essential to get
|
Variable sized chunking needs more CPU power, but is essential to get
|
||||||
good deduplication rates for file archives.
|
good deduplication rates for file archives.
|
||||||
|
|
||||||
The backup server supports both strategies.
|
The Proxmox Backup Server supports both strategies.
|
||||||
|
|
||||||
|
|
||||||
File Archives: ``<name>.pxar``
|
File Archives: ``<name>.pxar``
|
||||||
@ -31,7 +30,7 @@ File Archives: ``<name>.pxar``
|
|||||||
.. see https://moinakg.wordpress.com/2013/06/22/high-performance-content-defined-chunking/
|
.. see https://moinakg.wordpress.com/2013/06/22/high-performance-content-defined-chunking/
|
||||||
|
|
||||||
A file archive stores a full directory tree. Content is stored using
|
A file archive stores a full directory tree. Content is stored using
|
||||||
the :ref:`pxar-format`, split into variable sized chunks. The format
|
the :ref:`pxar-format`, split into variable-sized chunks. The format
|
||||||
is optimized to achieve good deduplication rates.
|
is optimized to achieve good deduplication rates.
|
||||||
|
|
||||||
|
|
||||||
@ -39,7 +38,7 @@ Image Archives: ``<name>.img``
|
|||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
This is used for virtual machine images and other large binary
|
This is used for virtual machine images and other large binary
|
||||||
data. Content is split into fixed sized chunks.
|
data. Content is split into fixed-sized chunks.
|
||||||
|
|
||||||
|
|
||||||
Binary Data (BLOBs)
|
Binary Data (BLOBs)
|
||||||
@ -56,7 +55,7 @@ Catalog File: ``catalog.pcat1``
|
|||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
The catalog file is an index for file archives. It contains
|
The catalog file is an index for file archives. It contains
|
||||||
the list of files and is used to speed-up search operations.
|
the list of files and is used to speed up search operations.
|
||||||
|
|
||||||
|
|
||||||
The Manifest: ``index.json``
|
The Manifest: ``index.json``
|
||||||
@ -74,12 +73,12 @@ The backup server groups backups by *type*, where *type* is one of:
|
|||||||
|
|
||||||
``vm``
|
``vm``
|
||||||
This type is used for :term:`virtual machine`\ s. Typically
|
This type is used for :term:`virtual machine`\ s. Typically
|
||||||
contains the virtual machine's configuration and an image archive
|
consists of the virtual machine's configuration file and an image archive
|
||||||
for each disk.
|
for each disk.
|
||||||
|
|
||||||
``ct``
|
``ct``
|
||||||
This type is used for :term:`container`\ s. Contains the container's
|
This type is used for :term:`container`\ s. Consists of the container's
|
||||||
configuration and a single file archive for the container content.
|
configuration and a single file archive for the filesystem content.
|
||||||
|
|
||||||
``host``
|
``host``
|
||||||
This type is used for backups created from within the backed up machine.
|
This type is used for backups created from within the backed up machine.
|
||||||
@ -90,7 +89,7 @@ The backup server groups backups by *type*, where *type* is one of:
|
|||||||
Backup ID
|
Backup ID
|
||||||
~~~~~~~~~
|
~~~~~~~~~
|
||||||
|
|
||||||
An unique ID. Usually the virtual machine or container ID. ``host``
|
A unique ID. Usually the virtual machine or container ID. ``host``
|
||||||
type backups normally use the hostname.
|
type backups normally use the hostname.
|
||||||
|
|
||||||
|
|
||||||
@ -122,6 +121,13 @@ uniquely identifies a specific backup within a datastore.
|
|||||||
As you can see, the time format is RFC3399_ with Coordinated
|
As you can see, the time format is RFC3399_ with Coordinated
|
||||||
Universal Time (UTC_, identified by the trailing *Z*).
|
Universal Time (UTC_, identified by the trailing *Z*).
|
||||||
|
|
||||||
|
Backup Server Management
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
The command line tool to configure and manage the backup server is called
|
||||||
|
:command:`proxmox-backup-manager`.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
:term:`DataStore`
|
:term:`DataStore`
|
||||||
~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~
|
||||||
@ -133,21 +139,93 @@ or ``zfs``) to store the backup data.
|
|||||||
Datastores are identified by a simple *ID*. You can configure it
|
Datastores are identified by a simple *ID*. You can configure it
|
||||||
when setting up the backup server.
|
when setting up the backup server.
|
||||||
|
|
||||||
|
.. note:: The `File Layout`_ requires the file system to support at least *65538*
|
||||||
|
subdirectories per directory. That number comes from the 2\ :sup:`16`
|
||||||
|
pre-created chunk namespace directories, and the ``.`` and ``..`` default
|
||||||
|
directory entries. This requirement excludes certain filesystems and
|
||||||
|
filesystem configuration from being supported for a datastore. For example,
|
||||||
|
``ext3`` as a whole or ``ext4`` with the ``dir_nlink`` feature manually disabled.
|
||||||
|
|
||||||
Backup Server Management
|
Disk Management
|
||||||
------------------------
|
~~~~~~~~~~~~~~~
|
||||||
|
Proxmox Backup Server comes with a set of disk utilities, which are
|
||||||
|
accessed using the ``disk`` subcommand. This subcommand allows you to initialize
|
||||||
|
disks, create various filesystems, and get information about the disks.
|
||||||
|
|
||||||
The command line tool to configure and manage the backup server is called
|
To view the disks connected to the system, use the ``list`` subcommand of
|
||||||
:command:`proxmox-backup-manager`.
|
``disk``:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager disk list
|
||||||
|
┌──────┬────────┬─────┬───────────┬─────────────┬───────────────┬─────────┬────────┐
|
||||||
|
│ name │ used │ gpt │ disk-type │ size │ model │ wearout │ status │
|
||||||
|
╞══════╪════════╪═════╪═══════════╪═════════════╪═══════════════╪═════════╪════════╡
|
||||||
|
│ sda │ lvm │ 1 │ hdd │ 34359738368 │ QEMU_HARDDISK │ - │ passed │
|
||||||
|
├──────┼────────┼─────┼───────────┼─────────────┼───────────────┼─────────┼────────┤
|
||||||
|
│ sdb │ unused │ 1 │ hdd │ 68719476736 │ QEMU_HARDDISK │ - │ passed │
|
||||||
|
├──────┼────────┼─────┼───────────┼─────────────┼───────────────┼─────────┼────────┤
|
||||||
|
│ sdc │ unused │ 1 │ hdd │ 68719476736 │ QEMU_HARDDISK │ - │ passed │
|
||||||
|
└──────┴────────┴─────┴───────────┴─────────────┴───────────────┴─────────┴────────┘
|
||||||
|
|
||||||
|
To initialize a disk with a new GPT, use the ``initialize`` subcommand:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager disk initialize sdX
|
||||||
|
|
||||||
|
You can create an ``ext4`` or ``xfs`` filesystem on a disk, using ``fs
|
||||||
|
create``. The following command creates an ``ext4`` filesystem and passes the
|
||||||
|
``--add-datastore`` parameter, in order to automatically create a datastore on
|
||||||
|
the disk (in this case ``sdd``). This will create a datastore at the location
|
||||||
|
``/mnt/datastore/store1``:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager disk fs create store1 --disk sdd --filesystem ext4 --add-datastore true
|
||||||
|
create datastore 'store1' on disk sdd
|
||||||
|
Percentage done: 1
|
||||||
|
...
|
||||||
|
Percentage done: 99
|
||||||
|
TASK OK
|
||||||
|
|
||||||
|
You can also create a ``zpool`` with various raid levels. The command below
|
||||||
|
creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and mounts it
|
||||||
|
on the root directory (default):
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager disk zpool create zpool1 --devices sdb,sdc --raidlevel mirror
|
||||||
|
create Mirror zpool 'zpool1' on devices 'sdb,sdc'
|
||||||
|
# "zpool" "create" "-o" "ashift=12" "zpool1" "mirror" "sdb" "sdc"
|
||||||
|
|
||||||
|
TASK OK
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
You can also pass the ``--add-datastore`` parameter here, to automatically
|
||||||
|
create a datastore from the disk.
|
||||||
|
|
||||||
|
You can use ``disk fs list`` and ``disk zpool list`` to keep track of your
|
||||||
|
filesystems and zpools respectively.
|
||||||
|
|
||||||
|
If a disk supports S.M.A.R.T. capability, and you have this enabled, you can
|
||||||
|
display S.M.A.R.T. attributes using the command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager disk smart-attributes sdX
|
||||||
|
|
||||||
Datastore Configuration
|
Datastore Configuration
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
A :term:`datastore` is a place to store backups. You can configure
|
You can configure multiple datastores. Minimum one datastore needs to be
|
||||||
multiple datastores. At least one datastore needs to be
|
configured. The datastore is identified by a simple `name` and points to a
|
||||||
configured. The datastore is identified by a simple `name` and points
|
directory on the filesystem. Each datastore also has associated retention
|
||||||
to a directory.
|
settings of how many backup snapshots for each interval of ``hourly``,
|
||||||
|
``daily``, ``weekly``, ``monthly``, ``yearly`` as well as a time-independent
|
||||||
|
number of backups to keep in that store. :ref:`Pruning <pruning>` and
|
||||||
|
:ref:`garbage collection <garbage-collection>` can also be configured to run
|
||||||
|
periodically based on a configured :term:`schedule` per datastore.
|
||||||
|
|
||||||
The following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
The following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
||||||
|
|
||||||
@ -166,6 +244,30 @@ To list existing datastores run:
|
|||||||
│ store1 │ /backup/disk1/store1 │ This is my default storage. │
|
│ store1 │ /backup/disk1/store1 │ This is my default storage. │
|
||||||
└────────┴──────────────────────┴─────────────────────────────┘
|
└────────┴──────────────────────┴─────────────────────────────┘
|
||||||
|
|
||||||
|
You can change settings of a datastore, for example to set a prune and garbage
|
||||||
|
collection schedule or retention settings using ``update`` subcommand and view
|
||||||
|
a datastore with the ``show`` subcommand:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager datastore update store1 --keep-last 7 --prune-schedule daily --gc-schedule 'Tue 04:27'
|
||||||
|
# proxmox-backup-manager datastore show store1
|
||||||
|
┌────────────────┬─────────────────────────────┐
|
||||||
|
│ Name │ Value │
|
||||||
|
╞════════════════╪═════════════════════════════╡
|
||||||
|
│ name │ store1 │
|
||||||
|
├────────────────┼─────────────────────────────┤
|
||||||
|
│ path │ /backup/disk1/store1 │
|
||||||
|
├────────────────┼─────────────────────────────┤
|
||||||
|
│ comment │ This is my default storage. │
|
||||||
|
├────────────────┼─────────────────────────────┤
|
||||||
|
│ gc-schedule │ Tue 04:27 │
|
||||||
|
├────────────────┼─────────────────────────────┤
|
||||||
|
│ keep-last │ 7 │
|
||||||
|
├────────────────┼─────────────────────────────┤
|
||||||
|
│ prune-schedule │ daily │
|
||||||
|
└────────────────┴─────────────────────────────┘
|
||||||
|
|
||||||
Finally, it is possible to remove the datastore configuration:
|
Finally, it is possible to remove the datastore configuration:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -179,17 +281,58 @@ Finally, it is possible to remove the datastore configuration:
|
|||||||
File Layout
|
File Layout
|
||||||
^^^^^^^^^^^
|
^^^^^^^^^^^
|
||||||
|
|
||||||
.. todo:: Add datastore file layout example
|
After creating a datastore, the following default layout will appear:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# ls -arilh /backup/disk1/store1
|
||||||
|
276493 -rw-r--r-- 1 backup backup 0 Jul 8 12:35 .lock
|
||||||
|
276490 drwxr-x--- 1 backup backup 1064960 Jul 8 12:35 .chunks
|
||||||
|
|
||||||
|
`.lock` is an empty file used for process locking.
|
||||||
|
|
||||||
|
The `.chunks` directory contains folders, starting from `0000` and taking hexadecimal values until `ffff`. These
|
||||||
|
directories will store the chunked data after a backup operation has been executed.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# ls -arilh /backup/disk1/store1/.chunks
|
||||||
|
545824 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 ffff
|
||||||
|
545823 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffe
|
||||||
|
415621 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffd
|
||||||
|
415620 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffc
|
||||||
|
353187 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffb
|
||||||
|
344995 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fffa
|
||||||
|
144079 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff9
|
||||||
|
144078 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff8
|
||||||
|
144077 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 fff7
|
||||||
|
...
|
||||||
|
403180 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000c
|
||||||
|
403179 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000b
|
||||||
|
403177 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 000a
|
||||||
|
402530 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0009
|
||||||
|
402513 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0008
|
||||||
|
402509 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0007
|
||||||
|
276509 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0006
|
||||||
|
276508 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0005
|
||||||
|
276507 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0004
|
||||||
|
276501 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0003
|
||||||
|
276499 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0002
|
||||||
|
276498 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0001
|
||||||
|
276494 drwxr-x--- 2 backup backup 4.0K Jul 8 12:35 0000
|
||||||
|
276489 drwxr-xr-x 3 backup backup 4.0K Jul 8 12:35 ..
|
||||||
|
276490 drwxr-x--- 1 backup backup 1.1M Jul 8 12:35 .
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
User Management
|
User Management
|
||||||
~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Proxmox Backup support several authentication realms, and you need to
|
Proxmox Backup Server supports several authentication realms, and you need to
|
||||||
choose the realm when you add a new user. Possible realms are:
|
choose the realm when you add a new user. Possible realms are:
|
||||||
|
|
||||||
:pam: Linux PAM standard authentication. Use this if you want to
|
:pam: Linux PAM standard authentication. Use this if you want to
|
||||||
authenticate as Linux system user (Users needs to exist on the
|
authenticate as Linux system user (Users need to exist on the
|
||||||
system).
|
system).
|
||||||
|
|
||||||
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
||||||
@ -216,8 +359,8 @@ normally want to add other users with less privileges:
|
|||||||
|
|
||||||
# proxmox-backup-manager user create john@pbs --email john@example.com
|
# proxmox-backup-manager user create john@pbs --email john@example.com
|
||||||
|
|
||||||
The create command lets you specify many option like ``--email`` or
|
The create command lets you specify many options like ``--email`` or
|
||||||
``--password``, but you can update or change any of them using the
|
``--password``. You can update or change any of them using the
|
||||||
update command later:
|
update command later:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -225,11 +368,10 @@ update command later:
|
|||||||
# proxmox-backup-manager user update john@pbs --firstname John --lastname Smith
|
# proxmox-backup-manager user update john@pbs --firstname John --lastname Smith
|
||||||
# proxmox-backup-manager user update john@pbs --comment "An example user."
|
# proxmox-backup-manager user update john@pbs --comment "An example user."
|
||||||
|
|
||||||
|
|
||||||
.. todo:: Mention how to set password without passing plaintext password as cli argument.
|
.. todo:: Mention how to set password without passing plaintext password as cli argument.
|
||||||
|
|
||||||
|
|
||||||
The resulting use list looks like this:
|
The resulting user list looks like this:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -242,16 +384,16 @@ The resulting use list looks like this:
|
|||||||
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
||||||
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
|
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
|
||||||
|
|
||||||
Newly created users do not have an permissions. Please read the next
|
Newly created users do not have any permissions. Please read the next
|
||||||
section to learn how to set access permissions.
|
section to learn how to set access permissions.
|
||||||
|
|
||||||
If you want to disable an user account, you can do that by setting ``--enable`` to ``0``
|
If you want to disable a user account, you can do that by setting ``--enable`` to ``0``
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-manager user update john@pbs --enable 0
|
# proxmox-backup-manager user update john@pbs --enable 0
|
||||||
|
|
||||||
Or completely remove the users with:
|
Or completely remove the user with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -261,20 +403,20 @@ Or completely remove the users with:
|
|||||||
Access Control
|
Access Control
|
||||||
~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Users do not have any permission by default. Instead you need to
|
By default new users do not have any permission. Instead you need to
|
||||||
specify what is allowed and what not. You can do this by assigning
|
specify what is allowed and what is not. You can do this by assigning
|
||||||
roles to users on specific objects like datastores or remotes. The
|
roles to users on specific objects like datastores or remotes. The
|
||||||
following roles exist:
|
following roles exist:
|
||||||
|
|
||||||
**Admin**
|
|
||||||
The Administrator can do anything.
|
|
||||||
|
|
||||||
**Audit**
|
|
||||||
An Auditor can view things, but is not allowed to change settings.
|
|
||||||
|
|
||||||
**NoAccess**
|
**NoAccess**
|
||||||
Disable Access - nothing is allowed.
|
Disable Access - nothing is allowed.
|
||||||
|
|
||||||
|
**Admin**
|
||||||
|
Can do anything.
|
||||||
|
|
||||||
|
**Audit**
|
||||||
|
Can view things, but is not allowed to change settings.
|
||||||
|
|
||||||
**DatastoreAdmin**
|
**DatastoreAdmin**
|
||||||
Can do anything on datastores.
|
Can do anything on datastores.
|
||||||
|
|
||||||
@ -282,10 +424,10 @@ following roles exist:
|
|||||||
Can view datastore settings and list content. But
|
Can view datastore settings and list content. But
|
||||||
is not allowed to read the actual data.
|
is not allowed to read the actual data.
|
||||||
|
|
||||||
**DataStoreReader**
|
**DatastoreReader**
|
||||||
Can Inspect datastore content and can do restores.
|
Can Inspect datastore content and can do restores.
|
||||||
|
|
||||||
**DataStoreBackup**
|
**DatastoreBackup**
|
||||||
Can backup and restore owned backups.
|
Can backup and restore owned backups.
|
||||||
|
|
||||||
**DatastorePowerUser**
|
**DatastorePowerUser**
|
||||||
@ -300,6 +442,166 @@ following roles exist:
|
|||||||
**RemoteSyncOperator**
|
**RemoteSyncOperator**
|
||||||
Is allowed to read data from a remote.
|
Is allowed to read data from a remote.
|
||||||
|
|
||||||
|
You can use the ``acl`` subcommand to manage and monitor user permissions. For
|
||||||
|
example, the command below will add the user ``john@pbs`` as a
|
||||||
|
**DatastoreAdmin** for the data store ``store1``, located at ``/backup/disk1/store1``:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager acl update /datastore/store1 DatastoreAdmin --userid john@pbs
|
||||||
|
|
||||||
|
You can monitor the roles of each user using the following command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager acl list
|
||||||
|
┌──────────┬──────────────────┬───────────┬────────────────┐
|
||||||
|
│ ugid │ path │ propagate │ roleid │
|
||||||
|
╞══════════╪══════════════════╪═══════════╪════════════════╡
|
||||||
|
│ john@pbs │ /datastore/disk1 │ 1 │ DatastoreAdmin │
|
||||||
|
└──────────┴──────────────────┴───────────┴────────────────┘
|
||||||
|
|
||||||
|
A single user can be assigned multiple permission sets for different data stores.
|
||||||
|
|
||||||
|
.. Note::
|
||||||
|
Naming convention is important here. For data stores on the host,
|
||||||
|
you must use the convention ``/datastore/{storename}``. For example, to set
|
||||||
|
permissions for a data store mounted at ``/mnt/backup/disk4/store2``, you would use
|
||||||
|
``/datastore/store2`` for the path. For remote stores, use the convention
|
||||||
|
``/remote/{remote}/{storename}``, where ``{remote}`` signifies the name of the
|
||||||
|
remote (see `Remote` below) and ``{storename}`` is the name of the data store on
|
||||||
|
the remote.
|
||||||
|
|
||||||
|
Network Management
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
Proxmox Backup Server provides an interface for network configuration, through the
|
||||||
|
``network`` subcommand. This allows you to carry out some basic network
|
||||||
|
management tasks such as adding, configuring and removing network interfaces.
|
||||||
|
|
||||||
|
To get a list of available interfaces, use the following command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network list
|
||||||
|
┌───────┬────────┬───────────┬────────┬─────────┬───────────────────┬──────────────┬──────────────┐
|
||||||
|
│ name │ type │ autostart │ method │ method6 │ address │ gateway │ ports/slaves │
|
||||||
|
╞═══════╪════════╪═══════════╪════════╪═════════╪═══════════════════╪══════════════╪══════════════╡
|
||||||
|
│ bond0 │ bond │ 1 │ manual │ │ │ │ ens18 ens19 │
|
||||||
|
├───────┼────────┼───────────┼────────┼─────────┼───────────────────┼──────────────┼──────────────┤
|
||||||
|
│ ens18 │ eth │ 1 │ manual │ │ │ │ │
|
||||||
|
├───────┼────────┼───────────┼────────┼─────────┼───────────────────┼──────────────┼──────────────┤
|
||||||
|
│ ens19 │ eth │ 1 │ manual │ │ │ │ │
|
||||||
|
├───────┼────────┼───────────┼────────┼─────────┼───────────────────┼──────────────┼──────────────┤
|
||||||
|
│ vmbr0 │ bridge │ 1 │ static │ │ x.x.x.x/x │ x.x.x.x │ bond0 │
|
||||||
|
└───────┴────────┴───────────┴────────┴─────────┴───────────────────┴──────────────┴──────────────┘
|
||||||
|
|
||||||
|
To add a new network interface, use the ``create`` subcommand with the relevant
|
||||||
|
parameters. The following command shows a template for creating a new bridge:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network create vmbr1 --autostart true --cidr x.x.x.x/x --gateway x.x.x.x --bridge_ports iface_name --type bridge
|
||||||
|
|
||||||
|
You can make changes to the configuration of a network interface with the
|
||||||
|
``update`` subcommand:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network update vmbr1 --cidr y.y.y.y/y
|
||||||
|
|
||||||
|
You can also remove a network interface:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network remove vmbr1
|
||||||
|
|
||||||
|
To view the changes made to the network configuration file, before committing
|
||||||
|
them, use the command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network changes
|
||||||
|
|
||||||
|
If you would like to cancel all changes at this point, you can do this using:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network revert
|
||||||
|
|
||||||
|
If you are happy with the changes and would like to write them into the
|
||||||
|
configuration file, the command is:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager network reload
|
||||||
|
|
||||||
|
You can also configure DNS settings using the ``dns`` subcommand of
|
||||||
|
``proxmox-backup-manager``.
|
||||||
|
|
||||||
|
:term:`Remote`
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
A remote refers to a separate Proxmox Backup Server installation and a user on that
|
||||||
|
installation, from which you can `sync` datastores to a local datastore with a
|
||||||
|
`Sync Job`.
|
||||||
|
|
||||||
|
To add a remote, you need its hostname or ip, a userid and password on the
|
||||||
|
remote, and its certificate fingerprint. To get the fingerprint, use the
|
||||||
|
``proxmox-backup-manager cert info`` command on the remote.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager cert info |grep Fingerprint
|
||||||
|
Fingerprint (sha256): 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||||
|
|
||||||
|
Using the information specified above, add the remote with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager remote create pbs2 --host pbs2.mydomain.example --userid sync@pam --password 'SECRET' --fingerprint 64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe
|
||||||
|
|
||||||
|
Use the ``list``, ``show``, ``update``, ``remove`` subcommands of
|
||||||
|
``proxmox-backup-manager remote`` to manage your remotes:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager remote update pbs2 --host pbs2.example
|
||||||
|
# proxmox-backup-manager remote list
|
||||||
|
┌──────┬──────────────┬──────────┬───────────────────────────────────────────┬─────────┐
|
||||||
|
│ name │ host │ userid │ fingerprint │ comment │
|
||||||
|
╞══════╪══════════════╪══════════╪═══════════════════════════════════════════╪═════════╡
|
||||||
|
│ pbs2 │ pbs2.example │ sync@pam │64:d3:ff:3a:50:38:53:5a:9b:f7:50:...:ab:fe │ │
|
||||||
|
└──────┴──────────────┴──────────┴───────────────────────────────────────────┴─────────┘
|
||||||
|
# proxmox-backup-manager remote remove pbs2
|
||||||
|
|
||||||
|
|
||||||
|
Sync Jobs
|
||||||
|
~~~~~~~~~
|
||||||
|
|
||||||
|
Sync jobs are configured to pull the contents of a datastore on a `Remote` to a
|
||||||
|
local datastore. You can either start the sync job manually on the GUI or
|
||||||
|
provide it with a :term:`schedule` to run regularly. The
|
||||||
|
``proxmox-backup-manager sync-job`` command is used to manage sync jobs:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager sync-job create pbs2-local --remote pbs2 --remote-store local --store local --schedule 'Wed 02:30'
|
||||||
|
# proxmox-backup-manager sync-job update pbs2-local --comment 'offsite'
|
||||||
|
# proxmox-backup-manager sync-job list
|
||||||
|
┌────────────┬───────┬────────┬──────────────┬───────────┬─────────┐
|
||||||
|
│ id │ store │ remote │ remote-store │ schedule │ comment │
|
||||||
|
╞════════════╪═══════╪════════╪══════════════╪═══════════╪═════════╡
|
||||||
|
│ pbs2-local │ local │ pbs2 │ local │ Wed 02:30 │ offsite │
|
||||||
|
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
|
||||||
|
# proxmox-backup-manager sync-job remove pbs2-local
|
||||||
|
|
||||||
|
Garbage Collection
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
You can monitor and run :ref:`garbage collection <garbage-collection>` on the
|
||||||
|
Proxmox Backup Server using the ``garbage-collection`` subcommand of
|
||||||
|
``proxmox-backup-manager``. You can use the ``start`` subcommand to manually start garbage
|
||||||
|
collection on an entire data store and the ``status`` subcommand to see
|
||||||
|
attributes relating to the :ref:`garbage collection <garbage-collection>`.
|
||||||
|
|
||||||
|
|
||||||
Backup Client usage
|
Backup Client usage
|
||||||
@ -308,16 +610,16 @@ Backup Client usage
|
|||||||
The command line client is called :command:`proxmox-backup-client`.
|
The command line client is called :command:`proxmox-backup-client`.
|
||||||
|
|
||||||
|
|
||||||
Respository Locations
|
Repository Locations
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
The client uses the following notation to specify a datastore repository
|
The client uses the following notation to specify a datastore repository
|
||||||
on the backup server.
|
on the backup server.
|
||||||
|
|
||||||
[[username@]server:]datastore
|
[[username@]server:]datastore
|
||||||
|
|
||||||
The default value for ``username`` ist ``root``. If no server is specified, the
|
The default value for ``username`` ist ``root``. If no server is specified,
|
||||||
default is the local host (``localhost``).
|
the default is the local host (``localhost``).
|
||||||
|
|
||||||
You can pass the repository with the ``--repository`` command
|
You can pass the repository with the ``--repository`` command
|
||||||
line option, or by setting the ``PBS_REPOSITORY`` environment
|
line option, or by setting the ``PBS_REPOSITORY`` environment
|
||||||
@ -381,7 +683,7 @@ This section explains how to create a backup from within the machine. This can
|
|||||||
be a physical host, a virtual machine, or a container. Such backups may contain file
|
be a physical host, a virtual machine, or a container. Such backups may contain file
|
||||||
and image archives. There are no restrictions in this case.
|
and image archives. There are no restrictions in this case.
|
||||||
|
|
||||||
.. note:: If you want to backup virtual machines or containers on Proxmov VE, see :ref:`pve-integration`.
|
.. note:: If you want to backup virtual machines or containers on Proxmox VE, see :ref:`pve-integration`.
|
||||||
|
|
||||||
For the following example you need to have a backup server set up, working
|
For the following example you need to have a backup server set up, working
|
||||||
credentials and need to know the repository name.
|
credentials and need to know the repository name.
|
||||||
@ -412,11 +714,13 @@ This will prompt you for a password and then uploads a file archive named
|
|||||||
|
|
||||||
The ``--repository`` option can get quite long and is used by all
|
The ``--repository`` option can get quite long and is used by all
|
||||||
commands. You can avoid having to enter this value by setting the
|
commands. You can avoid having to enter this value by setting the
|
||||||
environment variable ``PBS_REPOSITORY``.
|
environment variable ``PBS_REPOSITORY``. Note that if you would like this to remain set
|
||||||
|
over multiple sessions, you should instead add the below line to your
|
||||||
|
``.bashrc`` file.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# export PBS_REPOSTORY=backup-server:store1
|
# export PBS_REPOSITORY=backup-server:store1
|
||||||
|
|
||||||
After this you can execute all commands without specifying the ``--repository``
|
After this you can execute all commands without specifying the ``--repository``
|
||||||
option.
|
option.
|
||||||
@ -447,7 +751,7 @@ Excluding files/folders from a backup
|
|||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Sometimes it is desired to exclude certain files or folders from a backup archive.
|
Sometimes it is desired to exclude certain files or folders from a backup archive.
|
||||||
To tell the Proxmox backup client when and how to ignore files and directories,
|
To tell the Proxmox Backup client when and how to ignore files and directories,
|
||||||
place a text file called ``.pxarexclude`` in the filesystem hierarchy.
|
place a text file called ``.pxarexclude`` in the filesystem hierarchy.
|
||||||
Whenever the backup client encounters such a file in a directory, it interprets
|
Whenever the backup client encounters such a file in a directory, it interprets
|
||||||
each line as glob match patterns for files and directories that are to be excluded
|
each line as glob match patterns for files and directories that are to be excluded
|
||||||
@ -469,17 +773,17 @@ the given patterns. It is only possible to match files in this directory and its
|
|||||||
all files ending in ``.tmp`` within the directory or subdirectories with the
|
all files ending in ``.tmp`` within the directory or subdirectories with the
|
||||||
following pattern ``**/*.tmp``.
|
following pattern ``**/*.tmp``.
|
||||||
``[...]`` matches a single character from any of the provided characters within
|
``[...]`` matches a single character from any of the provided characters within
|
||||||
the brackets. ``[!...]`` does the complementary and matches any singe character
|
the brackets. ``[!...]`` does the complementary and matches any single character
|
||||||
not contained within the brackets. It is also possible to specify ranges with two
|
not contained within the brackets. It is also possible to specify ranges with two
|
||||||
characters separated by ``-``. For example, ``[a-z]`` matches any lowercase
|
characters separated by ``-``. For example, ``[a-z]`` matches any lowercase
|
||||||
alphabetic character and ``[0-9]`` matches any one single digit.
|
alphabetic character and ``[0-9]`` matches any one single digit.
|
||||||
|
|
||||||
The order of the glob match patterns defines if a file is included or
|
The order of the glob match patterns defines whether a file is included or
|
||||||
excluded, later entries win over previous ones.
|
excluded, that is to say later entries override previous ones.
|
||||||
This is also true for match patterns encountered deeper down the directory tree,
|
This is also true for match patterns encountered deeper down the directory tree,
|
||||||
which can override a previous exclusion.
|
which can override a previous exclusion.
|
||||||
Be aware that excluded directories will **not** be read by the backup client.
|
Be aware that excluded directories will **not** be read by the backup client.
|
||||||
A ``.pxarexclude`` file in a subdirectory will have no effect.
|
Thus, a ``.pxarexclude`` file in an excluded subdirectory will have no effect.
|
||||||
``.pxarexclude`` files are treated as regular files and will be included in the
|
``.pxarexclude`` files are treated as regular files and will be included in the
|
||||||
backup archive.
|
backup archive.
|
||||||
|
|
||||||
@ -529,10 +833,10 @@ Restoring this backup will result in:
|
|||||||
. .. file2
|
. .. file2
|
||||||
|
|
||||||
Encryption
|
Encryption
|
||||||
^^^^^^^^^^
|
~~~~~~~~~~
|
||||||
|
|
||||||
Proxmox backup supports client side encryption with AES-256 in GCM_
|
Proxmox Backup supports client-side encryption with AES-256 in GCM_
|
||||||
mode. First you need to create an encryption key:
|
mode. To set this up, you first need to create an encryption key:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -546,6 +850,8 @@ extra protection, you can also create it without a password:
|
|||||||
|
|
||||||
# proxmox-backup-client key create /path/to/my-backup.key --kdf none
|
# proxmox-backup-client key create /path/to/my-backup.key --kdf none
|
||||||
|
|
||||||
|
Having created this key, it is now possible to create an encrypted backup, by
|
||||||
|
passing the ``--keyfile`` parameter, with the path to the key file.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -554,23 +860,108 @@ extra protection, you can also create it without a password:
|
|||||||
Encryption Key Password: **************
|
Encryption Key Password: **************
|
||||||
...
|
...
|
||||||
|
|
||||||
|
.. Note:: If you do not specify the name of the backup key, the key will be
|
||||||
|
created in the default location
|
||||||
|
``~/.config/proxmox-backup/encryption-key.json``. ``proxmox-backup-client``
|
||||||
|
will also search this location by default, in case the ``--keyfile``
|
||||||
|
parameter is not specified.
|
||||||
|
|
||||||
You can avoid entering the passwords by setting the environment
|
You can avoid entering the passwords by setting the environment
|
||||||
variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``.
|
variables ``PBS_PASSWORD`` and ``PBS_ENCRYPTION_PASSWORD``.
|
||||||
|
|
||||||
.. todo:: Explain master-key
|
Using a master key to store and recover encryption keys
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
You can also use ``proxmox-backup-client key`` to create an RSA public/private
|
||||||
|
key pair, which can be used to store an encrypted version of the symmetric
|
||||||
|
backup encryption key alongside each backup and recover it later.
|
||||||
|
|
||||||
|
To set up a master key:
|
||||||
|
|
||||||
|
1. Create an encryption key for the backup:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client key create
|
||||||
|
creating default key at: "~/.config/proxmox-backup/encryption-key.json"
|
||||||
|
Encryption Key Password: **********
|
||||||
|
...
|
||||||
|
|
||||||
|
The resulting file will be saved to ``~/.config/proxmox-backup/encryption-key.json``.
|
||||||
|
|
||||||
|
2. Create an RSA public/private key pair:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client key create-master-key
|
||||||
|
Master Key Password: *********
|
||||||
|
...
|
||||||
|
|
||||||
|
This will create two files in your current directory, ``master-public.pem``
|
||||||
|
and ``master-private.pem``.
|
||||||
|
|
||||||
|
3. Import the newly created ``master-public.pem`` public certificate, so that
|
||||||
|
``proxmox-backup-client`` can find and use it upon backup.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client key import-master-pubkey /path/to/master-public.pem
|
||||||
|
Imported public master key to "~/.config/proxmox-backup/master-public.pem"
|
||||||
|
|
||||||
|
4. With all these files in place, run a backup job:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client backup etc.pxar:/etc
|
||||||
|
|
||||||
|
The key will be stored in your backup, under the name ``rsa-encrypted.key``.
|
||||||
|
|
||||||
|
.. Note:: The ``--keyfile`` parameter can be excluded, if the encryption key
|
||||||
|
is in the default path. If you specified another path upon creation, you
|
||||||
|
must pass the ``--keyfile`` parameter.
|
||||||
|
|
||||||
|
5. To test that everything worked, you can restore the key from the backup:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client restore /path/to/backup/ rsa-encrypted.key /path/to/target
|
||||||
|
|
||||||
|
.. Note:: You should not need an encryption key to extract this file. However, if
|
||||||
|
a key exists at the default location
|
||||||
|
(``~/.config/proxmox-backup/encryption-key.json``) the program will prompt
|
||||||
|
you for an encryption key password. Simply moving ``encryption-key.json``
|
||||||
|
out of this directory will fix this issue.
|
||||||
|
|
||||||
|
6. Then, use the previously generated master key to decrypt the file:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out /path/to/target
|
||||||
|
Enter pass phrase for ./master-private.pem: *********
|
||||||
|
|
||||||
|
7. The target file will now contain the encryption key information in plain
|
||||||
|
text. The success of this can be confirmed by passing the resulting ``json``
|
||||||
|
file, with the ``--keyfile`` parameter, when decrypting files from the backup.
|
||||||
|
|
||||||
|
.. warning:: Without their key, backed up files will be inaccessible. Thus, you should
|
||||||
|
keep keys ordered and in a place that is separate from the contents being
|
||||||
|
backed up. It can happen, for example, that you back up an entire system, using
|
||||||
|
a key on that system. If the system then becomes inaccessable for any reason
|
||||||
|
and needs to be restored, this will not be possible as the encryption key will be
|
||||||
|
lost along with the broken system. In preparation for the worst case scenario,
|
||||||
|
you should consider keeping a paper copy of this key locked away in
|
||||||
|
a safe place.
|
||||||
|
|
||||||
Restoring Data
|
Restoring Data
|
||||||
~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
The regular creation of backups is a necessary step to avoid data
|
The regular creation of backups is a necessary step to avoiding data
|
||||||
loss. More important, however, is the restoration. It is good practice to perform
|
loss. More importantly, however, is the restoration. It is good practice to perform
|
||||||
periodic recovery tests to ensure that you can access the data in
|
periodic recovery tests to ensure that you can access the data in
|
||||||
case of problems.
|
case of problems.
|
||||||
|
|
||||||
First, you need to find the snapshot which you want to restore. The snapshot
|
First, you need to find the snapshot which you want to restore. The snapshot
|
||||||
command gives a list of all snapshots on the server:
|
command provides a list of all the snapshots on the server:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -602,8 +993,8 @@ backup.
|
|||||||
|
|
||||||
# proxmox-backup-client restore host/elsa/2019-12-03T09:35:01Z root.pxar /target/path/
|
# proxmox-backup-client restore host/elsa/2019-12-03T09:35:01Z root.pxar /target/path/
|
||||||
|
|
||||||
To get the contents of any archive you can restore the ``ìndex.json`` file in the
|
To get the contents of any archive, you can restore the ``index.json`` file in the
|
||||||
repository and restore it to '-'. This will dump the content to the standard output.
|
repository to the target path '-'. This will dump the contents to the standard output.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -640,13 +1031,13 @@ working directory and list directory contents in the archive.
|
|||||||
``pwd`` shows the full path of the current working directory with respect to the
|
``pwd`` shows the full path of the current working directory with respect to the
|
||||||
archive root.
|
archive root.
|
||||||
|
|
||||||
Being able to quickly search the contents of the archive is a often needed feature.
|
Being able to quickly search the contents of the archive is a commmonly needed feature.
|
||||||
That's where the catalog is most valuable.
|
That's where the catalog is most valuable.
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
pxar:/ > find etc/ **/*.txt --select
|
pxar:/ > find etc/**/*.txt --select
|
||||||
"/etc/X11/rgb.txt"
|
"/etc/X11/rgb.txt"
|
||||||
pxar:/ > list-selected
|
pxar:/ > list-selected
|
||||||
etc/**/*.txt
|
etc/**/*.txt
|
||||||
@ -684,15 +1075,15 @@ file archive as a read-only filesystem to a mountpoint on your host.
|
|||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-client mount host/backup-client/2020-01-29T11:29:22Z root.pxar /mnt
|
# proxmox-backup-client mount host/backup-client/2020-01-29T11:29:22Z root.pxar /mnt/mountpoint
|
||||||
# ls /mnt
|
# ls /mnt/mountpoint
|
||||||
bin dev home lib32 libx32 media opt root sbin sys usr
|
bin dev home lib32 libx32 media opt root sbin sys usr
|
||||||
boot etc lib lib64 lost+found mnt proc run srv tmp var
|
boot etc lib lib64 lost+found mnt proc run srv tmp var
|
||||||
|
|
||||||
This allows you to access the full content of the archive in a seamless manner.
|
This allows you to access the full contents of the archive in a seamless manner.
|
||||||
|
|
||||||
.. note:: As the FUSE connection needs to fetch and decrypt chunks from the
|
.. note:: As the FUSE connection needs to fetch and decrypt chunks from the
|
||||||
backup servers datastore, this can cause some additional network and CPU
|
backup server's datastore, this can cause some additional network and CPU
|
||||||
load on your host, depending on the operations you perform on the mounted
|
load on your host, depending on the operations you perform on the mounted
|
||||||
filesystem.
|
filesystem.
|
||||||
|
|
||||||
@ -700,7 +1091,7 @@ To unmount the filesystem use the ``umount`` command on the mountpoint:
|
|||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# umount /mnt
|
# umount /mnt/mountpoint
|
||||||
|
|
||||||
Login and Logout
|
Login and Logout
|
||||||
~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~
|
||||||
@ -726,6 +1117,8 @@ To remove the ticket, issue a logout:
|
|||||||
# proxmox-backup-client logout
|
# proxmox-backup-client logout
|
||||||
|
|
||||||
|
|
||||||
|
.. _pruning:
|
||||||
|
|
||||||
Pruning and Removing Backups
|
Pruning and Removing Backups
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
@ -741,8 +1134,8 @@ command:
|
|||||||
snapshot. They will be inaccessible and unrecoverable.
|
snapshot. They will be inaccessible and unrecoverable.
|
||||||
|
|
||||||
|
|
||||||
The manual removal is sometimes required, but normally the prune
|
Although manual removal is sometimes required, the ``prune``
|
||||||
command is used to systematically delete older backups. Prune lets
|
command is normally used to systematically delete older backups. Prune lets
|
||||||
you specify which backup snapshots you want to keep. The
|
you specify which backup snapshots you want to keep. The
|
||||||
following retention options are available:
|
following retention options are available:
|
||||||
|
|
||||||
@ -787,7 +1180,7 @@ backup is retained.
|
|||||||
|
|
||||||
|
|
||||||
You can use the ``--dry-run`` option to test your settings. This only
|
You can use the ``--dry-run`` option to test your settings. This only
|
||||||
shows the list of existing snapshots and which action prune would take.
|
shows the list of existing snapshots and what actions prune would take.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -829,6 +1222,17 @@ unused data blocks are removed.
|
|||||||
depending on the number of chunks and the speed of the underlying
|
depending on the number of chunks and the speed of the underlying
|
||||||
disks.
|
disks.
|
||||||
|
|
||||||
|
.. note:: The garbage collection will only remove chunks that haven't been used
|
||||||
|
for at least one day (exactly 24h 5m). This grace period is necessary because
|
||||||
|
chunks in use are marked by touching the chunk which updates the ``atime``
|
||||||
|
(access time) property. Filesystems are mounted with the ``relatime`` option
|
||||||
|
by default. This results in a better performance by only updating the
|
||||||
|
``atime`` property if the last access has been at least 24 hours ago. The
|
||||||
|
downside is, that touching a chunk within these 24 hours will not always
|
||||||
|
update its ``atime`` property.
|
||||||
|
|
||||||
|
Chunks in the grace period will be logged at the end of the garbage
|
||||||
|
collection task as *Pending removals*.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -851,6 +1255,42 @@ unused data blocks are removed.
|
|||||||
|
|
||||||
.. todo:: howto run garbage-collection at regular intervalls (cron)
|
.. todo:: howto run garbage-collection at regular intervalls (cron)
|
||||||
|
|
||||||
|
Benchmarking
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
The backup client also comes with a benchmarking tool. This tool measures
|
||||||
|
various metrics relating to compression and encryption speeds. You can run a
|
||||||
|
benchmark using the ``benchmark`` subcommand of ``proxmox-backup-client``:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client benchmark
|
||||||
|
Uploaded 656 chunks in 5 seconds.
|
||||||
|
Time per request: 7659 microseconds.
|
||||||
|
TLS speed: 547.60 MB/s
|
||||||
|
SHA256 speed: 585.76 MB/s
|
||||||
|
Compression speed: 1923.96 MB/s
|
||||||
|
Decompress speed: 7885.24 MB/s
|
||||||
|
AES256/GCM speed: 3974.03 MB/s
|
||||||
|
┌───────────────────────────────────┬─────────────────────┐
|
||||||
|
│ Name │ Value │
|
||||||
|
╞═══════════════════════════════════╪═════════════════════╡
|
||||||
|
│ TLS (maximal backup upload speed) │ 547.60 MB/s (93%) │
|
||||||
|
├───────────────────────────────────┼─────────────────────┤
|
||||||
|
│ SHA256 checksum computation speed │ 585.76 MB/s (28%) │
|
||||||
|
├───────────────────────────────────┼─────────────────────┤
|
||||||
|
│ ZStd level 1 compression speed │ 1923.96 MB/s (89%) │
|
||||||
|
├───────────────────────────────────┼─────────────────────┤
|
||||||
|
│ ZStd level 1 decompression speed │ 7885.24 MB/s (98%) │
|
||||||
|
├───────────────────────────────────┼─────────────────────┤
|
||||||
|
│ AES256 GCM encryption speed │ 3974.03 MB/s (104%) │
|
||||||
|
└───────────────────────────────────┴─────────────────────┘
|
||||||
|
|
||||||
|
.. note:: The percentages given in the output table correspond to a
|
||||||
|
comparison against a Ryzen 7 2700X. The TLS test connects to the
|
||||||
|
local host, so there is no network involved.
|
||||||
|
|
||||||
|
You can also pass the ``--output-format`` parameter to output stats in ``json``,
|
||||||
|
rather than the default table format.
|
||||||
|
|
||||||
.. _pve-integration:
|
.. _pve-integration:
|
||||||
|
|
||||||
@ -896,7 +1336,3 @@ After that you should be able to see storage status with:
|
|||||||
.. include:: command-line-tools.rst
|
.. include:: command-line-tools.rst
|
||||||
|
|
||||||
.. include:: services.rst
|
.. include:: services.rst
|
||||||
|
|
||||||
.. include host system admin at the end
|
|
||||||
|
|
||||||
.. include:: sysadmin.rst
|
|
||||||
|
13
docs/conf.py
13
docs/conf.py
@ -17,7 +17,7 @@
|
|||||||
# add these directories to sys.path here. If the directory is relative to the
|
# add these directories to sys.path here. If the directory is relative to the
|
||||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||||
#
|
#
|
||||||
# import os
|
import os
|
||||||
# import sys
|
# import sys
|
||||||
# sys.path.insert(0, os.path.abspath('.'))
|
# sys.path.insert(0, os.path.abspath('.'))
|
||||||
|
|
||||||
@ -45,8 +45,11 @@ PygmentsBridge.latex_formatter = CustomLatexFormatter
|
|||||||
# Add any Sphinx extension module names here, as strings. They can be
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||||
# ones.
|
# ones.
|
||||||
|
|
||||||
extensions = ["sphinx.ext.graphviz", "sphinx.ext.todo"]
|
extensions = ["sphinx.ext.graphviz", "sphinx.ext.todo"]
|
||||||
|
|
||||||
|
todo_link_only = True
|
||||||
|
|
||||||
# Add any paths that contain templates here, relative to this directory.
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
templates_path = ['_templates']
|
templates_path = ['_templates']
|
||||||
|
|
||||||
@ -76,9 +79,11 @@ author = 'Proxmox Support Team'
|
|||||||
# built documents.
|
# built documents.
|
||||||
#
|
#
|
||||||
# The short X.Y version.
|
# The short X.Y version.
|
||||||
version = '0.2'
|
vstr = lambda s: '<devbuild>' if s is None else str(s)
|
||||||
|
|
||||||
|
version = vstr(os.getenv('DEB_VERSION_UPSTREAM'))
|
||||||
# The full version, including alpha/beta/rc tags.
|
# The full version, including alpha/beta/rc tags.
|
||||||
release = '0.2-1'
|
release = vstr(os.getenv('DEB_VERSION'))
|
||||||
|
|
||||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||||
# for a list of supported languages.
|
# for a list of supported languages.
|
||||||
@ -107,7 +112,7 @@ exclude_patterns = [
|
|||||||
'pxar/man1.rst',
|
'pxar/man1.rst',
|
||||||
'epilog.rst',
|
'epilog.rst',
|
||||||
'pbs-copyright.rst',
|
'pbs-copyright.rst',
|
||||||
'sysadmin.rst',
|
'local-zfs.rst'
|
||||||
'package-repositories.rst',
|
'package-repositories.rst',
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -11,8 +11,11 @@
|
|||||||
.. _Container: https://en.wikipedia.org/wiki/Container_(virtualization)
|
.. _Container: https://en.wikipedia.org/wiki/Container_(virtualization)
|
||||||
.. _Zstandard: https://en.wikipedia.org/wiki/Zstandard
|
.. _Zstandard: https://en.wikipedia.org/wiki/Zstandard
|
||||||
.. _Proxmox: https://www.proxmox.com
|
.. _Proxmox: https://www.proxmox.com
|
||||||
|
.. _Proxmox Community Forum: https://forum.proxmox.com
|
||||||
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
||||||
.. _Proxmox Backup: https://www.proxmox.com/proxmox-backup
|
// FIXME
|
||||||
|
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page
|
||||||
|
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
||||||
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
||||||
.. _Rust: https://www.rust-lang.org/
|
.. _Rust: https://www.rust-lang.org/
|
||||||
.. _SHA-256: https://en.wikipedia.org/wiki/SHA-2
|
.. _SHA-256: https://en.wikipedia.org/wiki/SHA-2
|
||||||
|
@ -16,7 +16,7 @@ Glossary
|
|||||||
Datastore
|
Datastore
|
||||||
|
|
||||||
A place to store backups. A directory which contains the backup data.
|
A place to store backups. A directory which contains the backup data.
|
||||||
The current implemenation is file-system based.
|
The current implementation is file-system based.
|
||||||
|
|
||||||
`Rust`_
|
`Rust`_
|
||||||
|
|
||||||
@ -46,3 +46,19 @@ Glossary
|
|||||||
kernel driver handles filesystem requests and sends them to a
|
kernel driver handles filesystem requests and sends them to a
|
||||||
userspace application.
|
userspace application.
|
||||||
|
|
||||||
|
Remote
|
||||||
|
|
||||||
|
A remote Proxmox Backup Server installation and credentials for a user on it.
|
||||||
|
You can pull datastores from a remote to a local datastore in order to
|
||||||
|
have redundant backups.
|
||||||
|
|
||||||
|
Schedule
|
||||||
|
|
||||||
|
Certain tasks, for example pruning and garbage collection, need to be
|
||||||
|
performed on a regular basis. Proxmox Backup Server uses a subset of the
|
||||||
|
`systemd Time and Date Specification
|
||||||
|
<https://www.freedesktop.org/software/systemd/man/systemd.time.html#>`_.
|
||||||
|
The subset currently supports time of day specifications and weekdays, in
|
||||||
|
addition to the shorthand expressions 'minutely', 'hourly', 'daily'.
|
||||||
|
There is no support for specifying timezones, the tasks are run in the
|
||||||
|
timezone configured on the server.
|
||||||
|
@ -1,19 +1,20 @@
|
|||||||
.. Proxmox Backup documentation master file
|
.. Proxmox Backup documentation master file
|
||||||
|
|
||||||
Welcome to Proxmox Backup's documentation!
|
Welcome to the Proxmox Backup documentation!
|
||||||
==========================================
|
============================================
|
||||||
|
|
||||||
Copyright (C) 2019 Proxmox Server Solutions GmbH
|
Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
||||||
|
|
||||||
Permission is granted to copy, distribute and/or modify this document
|
Permission is granted to copy, distribute and/or modify this document under the
|
||||||
under the terms of the GNU Free Documentation License, Version 1.3 or
|
terms of the GNU Free Documentation License, Version 1.3 or any later version
|
||||||
any later version published by the Free Software Foundation; with no
|
published by the Free Software Foundation; with no Invariant Sections, no
|
||||||
Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A
|
Front-Cover Texts, and no Back-Cover Texts. A copy of the license is included
|
||||||
copy of the license is included in the section entitled "GNU Free
|
in the section entitled "GNU Free Documentation License".
|
||||||
Documentation License".
|
|
||||||
|
|
||||||
.. todolist::
|
|
||||||
|
|
||||||
|
.. only:: html
|
||||||
|
|
||||||
|
A `PDF` version of the documentation is `also available here <./proxmox-backup.pdf>`_
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
:maxdepth: 3
|
:maxdepth: 3
|
||||||
@ -22,6 +23,7 @@ Documentation License".
|
|||||||
introduction.rst
|
introduction.rst
|
||||||
installation.rst
|
installation.rst
|
||||||
administration-guide.rst
|
administration-guide.rst
|
||||||
|
sysadmin.rst
|
||||||
|
|
||||||
.. raw:: latex
|
.. raw:: latex
|
||||||
|
|
||||||
@ -37,5 +39,14 @@ Documentation License".
|
|||||||
glossary.rst
|
glossary.rst
|
||||||
GFDL.rst
|
GFDL.rst
|
||||||
|
|
||||||
|
.. only:: html and devbuild
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
:caption: Developer Appendix
|
||||||
|
|
||||||
|
todos.rst
|
||||||
|
|
||||||
|
|
||||||
* :ref:`genindex`
|
* :ref:`genindex`
|
||||||
|
|
||||||
|
@ -19,9 +19,9 @@ for various management tasks such as disk management.
|
|||||||
The disk image (ISO file) provided by Proxmox includes a complete Debian system
|
The disk image (ISO file) provided by Proxmox includes a complete Debian system
|
||||||
("buster" for version 1.x) as well as all necessary packages for the `Proxmox Backup`_ server.
|
("buster" for version 1.x) as well as all necessary packages for the `Proxmox Backup`_ server.
|
||||||
|
|
||||||
The installer will guide you through the setup process and allows
|
The installer will guide you through the setup process and allow
|
||||||
you to partition the local disk(s), apply basic system configurations
|
you to partition the local disk(s), apply basic system configurations
|
||||||
(e.g. timezone, language, network), and installs all required packages.
|
(e.g. timezone, language, network), and install all required packages.
|
||||||
The provided ISO will get you started in just a few minutes, and is the
|
The provided ISO will get you started in just a few minutes, and is the
|
||||||
recommended method for new and existing users.
|
recommended method for new and existing users.
|
||||||
|
|
||||||
@ -36,11 +36,11 @@ It includes the following:
|
|||||||
|
|
||||||
* The `Proxmox Backup`_ server installer, which partitions the local
|
* The `Proxmox Backup`_ server installer, which partitions the local
|
||||||
disk(s) with ext4, ext3, xfs or ZFS, and installs the operating
|
disk(s) with ext4, ext3, xfs or ZFS, and installs the operating
|
||||||
system.
|
system
|
||||||
|
|
||||||
* Complete operating system (Debian Linux, 64-bit)
|
* Complete operating system (Debian Linux, 64-bit)
|
||||||
|
|
||||||
* Our Linux kernel with ZFS support.
|
* Our Linux kernel with ZFS support
|
||||||
|
|
||||||
* Complete tool-set to administer backups and all necessary resources
|
* Complete tool-set to administer backups and all necessary resources
|
||||||
|
|
||||||
@ -54,7 +54,7 @@ Install `Proxmox Backup`_ server on Debian
|
|||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Proxmox ships as a set of Debian packages which can be installed on top of a
|
Proxmox ships as a set of Debian packages which can be installed on top of a
|
||||||
standard Debian installation. After configuring the
|
standard Debian installation. After configuring the
|
||||||
:ref:`sysadmin_package_repositories`, you need to run:
|
:ref:`sysadmin_package_repositories`, you need to run:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -76,12 +76,15 @@ does, please use the following:
|
|||||||
This will install all required packages, the Proxmox kernel with ZFS_
|
This will install all required packages, the Proxmox kernel with ZFS_
|
||||||
support, and a set of common and useful packages.
|
support, and a set of common and useful packages.
|
||||||
|
|
||||||
Installing `Proxmox Backup`_ on top of an existing Debian_ installation looks easy, but
|
.. caution:: Installing `Proxmox Backup`_ on top of an existing Debian_
|
||||||
it presumes that the base system and local storage has been set up correctly.
|
installation looks easy, but it assumes that the base system and local
|
||||||
|
storage have been set up correctly. In general this is not trivial, especially
|
||||||
|
when LVM_ or ZFS_ is used. The network configuration is completely up to you
|
||||||
|
as well.
|
||||||
|
|
||||||
In general this is not trivial, especially when LVM_ or ZFS_ is used.
|
.. note:: You can access the webinterface of the Proxmox Backup Server with
|
||||||
|
your web browser, using HTTPS on port 8007. For example at
|
||||||
The network configuration is completely up to you as well.
|
``https://<ip-or-dns-name>:8007``
|
||||||
|
|
||||||
Install Proxmox Backup server on `Proxmox VE`_
|
Install Proxmox Backup server on `Proxmox VE`_
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
@ -99,6 +102,10 @@ After configuring the
|
|||||||
server to store backups. Should the hypervisor server fail, you can
|
server to store backups. Should the hypervisor server fail, you can
|
||||||
still access the backups.
|
still access the backups.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
You can access the webinterface of the Proxmox Backup Server with your web
|
||||||
|
browser, using HTTPS on port 8007. For example at ``https://<ip-or-dns-name>:8007``
|
||||||
|
|
||||||
Client installation
|
Client installation
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
|
@ -1,120 +1,170 @@
|
|||||||
Introduction
|
Introduction
|
||||||
============
|
============
|
||||||
|
|
||||||
This documentation is written in :term:`reStructuredText` and formatted with :term:`Sphinx`.
|
What is Proxmox Backup Server
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
Proxmox Backup Server is an enterprise-class, client-server backup software
|
||||||
|
package that backs up :term:`virtual machine`\ s, :term:`container`\ s, and
|
||||||
|
physical hosts. It is specially optimized for the `Proxmox Virtual Environment`_
|
||||||
|
platform and allows you to back up your data securely, even between remote
|
||||||
|
sites, providing easy management with a web-based user interface.
|
||||||
|
|
||||||
What is Proxmox Backup
|
Proxmox Backup Server supports deduplication, compression, and authenticated
|
||||||
----------------------
|
encryption (AE_). Using :term:`Rust` as the implementation language guarantees high
|
||||||
|
performance, low resource usage, and a safe, high-quality codebase.
|
||||||
|
|
||||||
Proxmox Backup is an enterprise class client-server backup software,
|
It features strong client-side encryption. Thus, it's possible to
|
||||||
specially optimized for the `Proxmox Virtual Environment`_ to backup
|
backup data to targets that are not fully trusted.
|
||||||
:term:`virtual machine`\ s and :term:`container`\ s. It is also
|
|
||||||
possible to backup physical hosts.
|
|
||||||
|
|
||||||
It supports deduplication, compression and authenticated encryption
|
|
||||||
(AE_). Using :term:`Rust` as implementation language guarantees high
|
|
||||||
performance, low resource usage, and a safe, high quality code base.
|
|
||||||
|
|
||||||
Encryption is done at the client side. This makes backups to not fully
|
|
||||||
trusted targets possible.
|
|
||||||
|
|
||||||
|
|
||||||
Architecture
|
Architecture
|
||||||
------------
|
------------
|
||||||
|
|
||||||
Proxmox Backup uses a `Client-server model`_. The server is
|
Proxmox Backup Server uses a `client-server model`_. The server stores the
|
||||||
responsible to store the backup data and provides an API to create
|
backup data and provides an API to create and manage data stores. With the
|
||||||
backups and restore data. It is possible to manage disks and
|
API, it's also possible to manage disks and other server-side resources.
|
||||||
other server side resources using this API.
|
|
||||||
|
|
||||||
A backup client uses this API to access the backed up data,
|
The backup client uses this API to access the backed up data. With the command
|
||||||
i.e. ``proxmox-backup-client`` is a command line tool to create
|
line tool ``proxmox-backup-client`` you can create backups and restore data.
|
||||||
backups and restore data. We deliver an integrated client for
|
For QEMU_ with `Proxmox Virtual Environment`_ we deliver an integrated client.
|
||||||
QEMU_ with `Proxmox Virtual Environment`_.
|
|
||||||
|
|
||||||
A single backup is allowed to contain several archives. For example,
|
A single backup is allowed to contain several archives. For example, when you
|
||||||
when you backup a :term:`virtual machine`, each disk is stored as a
|
backup a :term:`virtual machine`, each disk is stored as a separate archive
|
||||||
separate archive inside that backup. The VM configuration also gets an
|
inside that backup. The VM configuration itself is stored as an extra file.
|
||||||
extra file. This way, it is easy to access and restore important parts
|
This way, it's easy to access and restore only important parts of the backup,
|
||||||
of the backup without having to scan the whole backup.
|
without the need to scan the whole backup.
|
||||||
|
|
||||||
|
|
||||||
Main Features
|
Main Features
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
:Proxmox VE: The `Proxmox Virtual Environment`_ is fully
|
:Support for Proxmox VE: The `Proxmox Virtual Environment`_ is fully
|
||||||
supported. You can backup :term:`virtual machine`\ s and
|
supported and you can easily backup :term:`virtual machine`\ s and
|
||||||
:term:`container`\ s.
|
:term:`container`\ s.
|
||||||
|
|
||||||
:GUI: We provide a graphical, web based user interface.
|
:Performance: The whole software stack is written in :term:`Rust`,
|
||||||
|
in order to provide high speed and memory efficiency.
|
||||||
|
|
||||||
:Deduplication: Incremental backups produce large amounts of duplicate
|
:Deduplication: Periodic backups produce large amounts of duplicate
|
||||||
data. The deduplication layer removes that redundancy and makes
|
data. The deduplication layer avoids redundancy and minimizes the storage
|
||||||
incremental backups small and space efficient.
|
space used.
|
||||||
|
|
||||||
:Data Integrity: The built in `SHA-256`_ checksum algorithm assures the
|
:Incremental backups: Changes between backups are typically low. Reading and
|
||||||
accuracy and consistency of your backups.
|
sending only the delta reduces the storage and network impact of backups.
|
||||||
|
|
||||||
|
:Data Integrity: The built-in `SHA-256`_ checksum algorithm ensures accuracy and
|
||||||
|
consistency in your backups.
|
||||||
|
|
||||||
:Remote Sync: It is possible to efficiently synchronize data to remote
|
:Remote Sync: It is possible to efficiently synchronize data to remote
|
||||||
sites. Only deltas containing new data are transferred.
|
sites. Only deltas containing new data are transferred.
|
||||||
|
|
||||||
:Performance: The whole software stack is written in :term:`Rust`,
|
:Compression: The ultra-fast Zstandard_ compression is able to compress
|
||||||
to provide high speed and memory efficiency.
|
|
||||||
|
|
||||||
:Compression: Ultra fast Zstandard_ compression is able to compress
|
|
||||||
several gigabytes of data per second.
|
several gigabytes of data per second.
|
||||||
|
|
||||||
:Encryption: Backups can be encrypted client-side using AES-256 in
|
:Encryption: Backups can be encrypted on the client-side, using AES-256 in
|
||||||
GCM_ mode. This authenticated encryption mode (AE_) provides very
|
Galois/Counter Mode (GCM_) mode. This authenticated encryption (AE_) mode
|
||||||
high performance on modern hardware.
|
provides very high performance on modern hardware.
|
||||||
|
|
||||||
:Open Source: No secrets. You have access to all the source code.
|
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
||||||
|
user interface.
|
||||||
|
|
||||||
:Support: Commercial support options are available from `Proxmox`_.
|
:Open Source: No secrets. Proxmox Backup Server is free and open-source
|
||||||
|
software. The source code is licensed under AGPL, v3.
|
||||||
|
|
||||||
|
:Support: Enterprise support will be available from `Proxmox`_ once the beta
|
||||||
|
phase is over.
|
||||||
|
|
||||||
|
|
||||||
Why Backup?
|
Reasons for Data Backup?
|
||||||
-----------
|
------------------------
|
||||||
|
|
||||||
The primary purpose of a backup is to protect against data loss. Data
|
The main purpose of a backup is to protect against data loss. Data loss can be
|
||||||
loss can be caused by faulty hardware, but also by human error.
|
caused by both faulty hardware and human error.
|
||||||
|
|
||||||
A common mistake is to delete a file or folder which is still
|
A common mistake is to accidentally delete a file or folder which is still
|
||||||
required. Virtualization can amplify this problem. It is now
|
required. Virtualization can even amplify this problem, as deleting a whole
|
||||||
easy to delete a whole virtual machine by pressing a single button.
|
virtual machine can be as easy as pressing a single button.
|
||||||
|
|
||||||
Backups can serve as a toolkit for administrators to temporarily
|
For administrators, backups can serve as a useful toolkit for temporarily
|
||||||
store data. For example, it is common practice to perform full backups
|
storing data. For example, it is common practice to perform full backups before
|
||||||
before installing major software updates. If something goes wrong, you
|
installing major software updates. If something goes wrong, you can easily
|
||||||
can restore the previous state.
|
restore the previous state.
|
||||||
|
|
||||||
Another reason for backups are legal requirements. Some data must be
|
Another reason for backups are legal requirements. Some data, especially
|
||||||
kept in a safe place for several years by law, so that it can be accessed if
|
business records, must be kept in a safe place for several years by law, so
|
||||||
required.
|
that they can be accessed if required.
|
||||||
|
|
||||||
Data loss can be very costly as it can severely restrict your
|
In general, data loss is very costly as it can severely damage your business.
|
||||||
business. Therefore, make sure that you perform a backup regularly
|
Therefore, ensure that you perform regular backups and run restore tests.
|
||||||
and run restore tests.
|
|
||||||
|
|
||||||
|
|
||||||
Software Stack
|
Software Stack
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
.. todo:: Eplain why we use Rust (and Flutter)
|
Proxmox Backup Server consists of multiple components:
|
||||||
|
|
||||||
|
* A server-daemon providing, among other things, a RESTfull API, super-fast
|
||||||
|
asynchronous tasks, lightweight usage statistic collection, scheduling
|
||||||
|
events, strict separation of privileged and unprivileged execution
|
||||||
|
environments
|
||||||
|
* A JavaScript management web interface
|
||||||
|
* A management CLI tool for the server (`proxmox-backup-manager`)
|
||||||
|
* A client CLI tool (`proxmox-backup-client`) to access the server easily from
|
||||||
|
any `Linux amd64` environment
|
||||||
|
|
||||||
|
Aside from the web interface, everything is written in the Rust programming
|
||||||
|
language.
|
||||||
|
|
||||||
|
"The Rust programming language helps you write faster, more reliable software.
|
||||||
|
High-level ergonomics and low-level control are often at odds in programming
|
||||||
|
language design; Rust challenges that conflict. Through balancing powerful
|
||||||
|
technical capacity and a great developer experience, Rust gives you the option
|
||||||
|
to control low-level details (such as memory usage) without all the hassle
|
||||||
|
traditionally associated with such control."
|
||||||
|
|
||||||
|
-- `The Rust Programming Language <https://doc.rust-lang.org/book/ch00-00-introduction.html>`_
|
||||||
|
|
||||||
|
.. todo:: further explain the software stack
|
||||||
|
|
||||||
|
Getting Help
|
||||||
|
------------
|
||||||
|
|
||||||
|
Community Support Forum
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
We always encourage our users to discuss and share their knowledge using the
|
||||||
|
`Proxmox Community Forum`_. The forum is moderated by the Proxmox support team.
|
||||||
|
The large user base is spread out all over the world. Needless to say that such
|
||||||
|
a large forum is a great place to get information.
|
||||||
|
|
||||||
|
Mailing Lists
|
||||||
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Proxmox Backup Server is fully open-source and contributions are welcome! Here
|
||||||
|
is the primary communication channel for developers:
|
||||||
|
|
||||||
|
:Mailing list for developers: `PBS Development List`_
|
||||||
|
|
||||||
|
Bug Tracker
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
Proxmox runs a public bug tracker at `<https://bugzilla.proxmox.com>`_. If an
|
||||||
|
issue appears, file your report there. An issue can be a bug as well as a
|
||||||
|
request for a new feature or enhancement. The bug tracker helps to keep track
|
||||||
|
of the issue and will send a notification once it has been solved.
|
||||||
|
|
||||||
License
|
License
|
||||||
-------
|
-------
|
||||||
|
|
||||||
Copyright (C) 2019 Proxmox Server Solutions GmbH
|
Copyright (C) 2019-2020 Proxmox Server Solutions GmbH
|
||||||
|
|
||||||
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
||||||
|
|
||||||
Proxmox Backup is free software: you can redistribute it and/or modify
|
Proxmox Backup Server is free and open source software: you can use it,
|
||||||
it under the terms of the GNU Affero General Public License as
|
redistribute it, and/or modify it under the terms of the GNU Affero General
|
||||||
published by the Free Software Foundation, either version 3 of the
|
Public License as published by the Free Software Foundation, either version 3
|
||||||
License, or (at your option) any later version.
|
of the License, or (at your option) any later version.
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful, but
|
This program is distributed in the hope that it will be useful, but
|
||||||
``WITHOUT ANY WARRANTY``; without even the implied warranty of
|
``WITHOUT ANY WARRANTY``; without even the implied warranty of
|
||||||
|
400
docs/local-zfs.rst
Normal file
400
docs/local-zfs.rst
Normal file
@ -0,0 +1,400 @@
|
|||||||
|
ZFS on Linux
|
||||||
|
------------
|
||||||
|
|
||||||
|
ZFS is a combined file system and logical volume manager designed by
|
||||||
|
Sun Microsystems. There is no need to manually compile ZFS modules - all
|
||||||
|
packages are included.
|
||||||
|
|
||||||
|
By using ZFS, it's possible to achieve maximum enterprise features with
|
||||||
|
low budget hardware, but also high performance systems by leveraging
|
||||||
|
SSD caching or even SSD only setups. ZFS can replace cost intense
|
||||||
|
hardware raid cards by moderate CPU and memory load combined with easy
|
||||||
|
management.
|
||||||
|
|
||||||
|
General ZFS advantages
|
||||||
|
|
||||||
|
* Easy configuration and management with GUI and CLI.
|
||||||
|
* Reliable
|
||||||
|
* Protection against data corruption
|
||||||
|
* Data compression on file system level
|
||||||
|
* Snapshots
|
||||||
|
* Copy-on-write clone
|
||||||
|
* Various raid levels: RAID0, RAID1, RAID10, RAIDZ-1, RAIDZ-2 and RAIDZ-3
|
||||||
|
* Can use SSD for cache
|
||||||
|
* Self healing
|
||||||
|
* Continuous integrity checking
|
||||||
|
* Designed for high storage capacities
|
||||||
|
* Asynchronous replication over network
|
||||||
|
* Open Source
|
||||||
|
* Encryption
|
||||||
|
|
||||||
|
Hardware
|
||||||
|
~~~~~~~~~
|
||||||
|
|
||||||
|
ZFS depends heavily on memory, so you need at least 8GB to start. In
|
||||||
|
practice, use as much you can get for your hardware/budget. To prevent
|
||||||
|
data corruption, we recommend the use of high quality ECC RAM.
|
||||||
|
|
||||||
|
If you use a dedicated cache and/or log disk, you should use an
|
||||||
|
enterprise class SSD (e.g. Intel SSD DC S3700 Series). This can
|
||||||
|
increase the overall performance significantly.
|
||||||
|
|
||||||
|
IMPORTANT: Do not use ZFS on top of hardware controller which has its
|
||||||
|
own cache management. ZFS needs to directly communicate with disks. An
|
||||||
|
HBA adapter is the way to go, or something like LSI controller flashed
|
||||||
|
in ``IT`` mode.
|
||||||
|
|
||||||
|
|
||||||
|
ZFS Administration
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This section gives you some usage examples for common tasks. ZFS
|
||||||
|
itself is really powerful and provides many options. The main commands
|
||||||
|
to manage ZFS are `zfs` and `zpool`. Both commands come with great
|
||||||
|
manual pages, which can be read with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# man zpool
|
||||||
|
# man zfs
|
||||||
|
|
||||||
|
Create a new zpool
|
||||||
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
To create a new pool, at least one disk is needed. The `ashift` should
|
||||||
|
have the same sector-size (2 power of `ashift`) or larger as the
|
||||||
|
underlying disk.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> <device>
|
||||||
|
|
||||||
|
Create a new pool with RAID-0
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Minimum 1 disk
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> <device1> <device2>
|
||||||
|
|
||||||
|
Create a new pool with RAID-1
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Minimum 2 disks
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> mirror <device1> <device2>
|
||||||
|
|
||||||
|
Create a new pool with RAID-10
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Minimum 4 disks
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> mirror <device1> <device2> mirror <device3> <device4>
|
||||||
|
|
||||||
|
Create a new pool with RAIDZ-1
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Minimum 3 disks
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> raidz1 <device1> <device2> <device3>
|
||||||
|
|
||||||
|
Create a new pool with RAIDZ-2
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Minimum 4 disks
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> raidz2 <device1> <device2> <device3> <device4>
|
||||||
|
|
||||||
|
Create a new pool with cache (L2ARC)
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
It is possible to use a dedicated cache drive partition to increase
|
||||||
|
the performance (use SSD).
|
||||||
|
|
||||||
|
As `<device>` it is possible to use more devices, like it's shown in
|
||||||
|
"Create a new pool with RAID*".
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> <device> cache <cache_device>
|
||||||
|
|
||||||
|
Create a new pool with log (ZIL)
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
It is possible to use a dedicated cache drive partition to increase
|
||||||
|
the performance (SSD).
|
||||||
|
|
||||||
|
As `<device>` it is possible to use more devices, like it's shown in
|
||||||
|
"Create a new pool with RAID*".
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> <device> log <log_device>
|
||||||
|
|
||||||
|
Add cache and log to an existing pool
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
If you have a pool without cache and log. First partition the SSD in
|
||||||
|
2 partition with `parted` or `gdisk`
|
||||||
|
|
||||||
|
.. important:: Always use GPT partition tables.
|
||||||
|
|
||||||
|
The maximum size of a log device should be about half the size of
|
||||||
|
physical memory, so this is usually quite small. The rest of the SSD
|
||||||
|
can be used as cache.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool add -f <pool> log <device-part1> cache <device-part2>
|
||||||
|
|
||||||
|
|
||||||
|
Changing a failed device
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool replace -f <pool> <old device> <new device>
|
||||||
|
|
||||||
|
|
||||||
|
Changing a failed bootable device
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Depending on how Proxmox Backup was installed it is either using `grub` or `systemd-boot`
|
||||||
|
as bootloader.
|
||||||
|
|
||||||
|
The first steps of copying the partition table, reissuing GUIDs and replacing
|
||||||
|
the ZFS partition are the same. To make the system bootable from the new disk,
|
||||||
|
different steps are needed which depend on the bootloader in use.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# sgdisk <healthy bootable device> -R <new device>
|
||||||
|
# sgdisk -G <new device>
|
||||||
|
# zpool replace -f <pool> <old zfs partition> <new zfs partition>
|
||||||
|
|
||||||
|
.. NOTE:: Use the `zpool status -v` command to monitor how far the resilvering process of the new disk has progressed.
|
||||||
|
|
||||||
|
With `systemd-boot`:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# pve-efiboot-tool format <new disk's ESP>
|
||||||
|
# pve-efiboot-tool init <new disk's ESP>
|
||||||
|
|
||||||
|
.. NOTE:: `ESP` stands for EFI System Partition, which is setup as partition #2 on
|
||||||
|
bootable disks setup by the {pve} installer since version 5.4. For details, see
|
||||||
|
xref:sysboot_systemd_boot_setup[Setting up a new partition for use as synced ESP].
|
||||||
|
|
||||||
|
With `grub`:
|
||||||
|
|
||||||
|
Usually `grub.cfg` is located in `/boot/grub/grub.cfg`
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# grub-install <new disk>
|
||||||
|
# grub-mkconfig -o /path/to/grub.cfg
|
||||||
|
|
||||||
|
|
||||||
|
Activate E-Mail Notification
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
ZFS comes with an event daemon, which monitors events generated by the
|
||||||
|
ZFS kernel module. The daemon can also send emails on ZFS events like
|
||||||
|
pool errors. Newer ZFS packages ship the daemon in a separate package,
|
||||||
|
and you can install it using `apt-get`:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# apt-get install zfs-zed
|
||||||
|
|
||||||
|
To activate the daemon it is necessary to edit `/etc/zfs/zed.d/zed.rc` with your
|
||||||
|
favourite editor, and uncomment the `ZED_EMAIL_ADDR` setting:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
ZED_EMAIL_ADDR="root"
|
||||||
|
|
||||||
|
Please note Proxmox Backup forwards mails to `root` to the email address
|
||||||
|
configured for the root user.
|
||||||
|
|
||||||
|
IMPORTANT: The only setting that is required is `ZED_EMAIL_ADDR`. All
|
||||||
|
other settings are optional.
|
||||||
|
|
||||||
|
Limit ZFS Memory Usage
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
It is good to use at most 50 percent (which is the default) of the
|
||||||
|
system memory for ZFS ARC to prevent performance shortage of the
|
||||||
|
host. Use your preferred editor to change the configuration in
|
||||||
|
`/etc/modprobe.d/zfs.conf` and insert:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
options zfs zfs_arc_max=8589934592
|
||||||
|
|
||||||
|
This example setting limits the usage to 8GB.
|
||||||
|
|
||||||
|
.. IMPORTANT:: If your root file system is ZFS you must update your initramfs every time this value changes:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# update-initramfs -u
|
||||||
|
|
||||||
|
|
||||||
|
SWAP on ZFS
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
Swap-space created on a zvol may generate some troubles, like blocking the
|
||||||
|
server or generating a high IO load, often seen when starting a Backup
|
||||||
|
to an external Storage.
|
||||||
|
|
||||||
|
We strongly recommend to use enough memory, so that you normally do not
|
||||||
|
run into low memory situations. Should you need or want to add swap, it is
|
||||||
|
preferred to create a partition on a physical disk and use it as swapdevice.
|
||||||
|
You can leave some space free for this purpose in the advanced options of the
|
||||||
|
installer. Additionally, you can lower the `swappiness` value.
|
||||||
|
A good value for servers is 10:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# sysctl -w vm.swappiness=10
|
||||||
|
|
||||||
|
To make the swappiness persistent, open `/etc/sysctl.conf` with
|
||||||
|
an editor of your choice and add the following line:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
vm.swappiness = 10
|
||||||
|
|
||||||
|
.. table:: Linux kernel `swappiness` parameter values
|
||||||
|
:widths:auto
|
||||||
|
|
||||||
|
==================== ===============================================================
|
||||||
|
Value Strategy
|
||||||
|
==================== ===============================================================
|
||||||
|
vm.swappiness = 0 The kernel will swap only to avoid an 'out of memory' condition
|
||||||
|
vm.swappiness = 1 Minimum amount of swapping without disabling it entirely.
|
||||||
|
vm.swappiness = 10 Sometimes recommended to improve performance when sufficient memory exists in a system.
|
||||||
|
vm.swappiness = 60 The default value.
|
||||||
|
vm.swappiness = 100 The kernel will swap aggressively.
|
||||||
|
==================== ===============================================================
|
||||||
|
|
||||||
|
ZFS Compression
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
To activate compression:
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool set compression=lz4 <pool>
|
||||||
|
|
||||||
|
We recommend using the `lz4` algorithm, since it adds very little CPU overhead.
|
||||||
|
Other algorithms such as `lzjb` and `gzip-N` (where `N` is an integer `1-9` representing
|
||||||
|
the compression ratio, 1 is fastest and 9 is best compression) are also available.
|
||||||
|
Depending on the algorithm and how compressible the data is, having compression enabled can even increase
|
||||||
|
I/O performance.
|
||||||
|
|
||||||
|
You can disable compression at any time with:
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zfs set compression=off <dataset>
|
||||||
|
|
||||||
|
Only new blocks will be affected by this change.
|
||||||
|
|
||||||
|
ZFS Special Device
|
||||||
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Since version 0.8.0 ZFS supports `special` devices. A `special` device in a
|
||||||
|
pool is used to store metadata, deduplication tables, and optionally small
|
||||||
|
file blocks.
|
||||||
|
|
||||||
|
A `special` device can improve the speed of a pool consisting of slow spinning
|
||||||
|
hard disks with a lot of metadata changes. For example workloads that involve
|
||||||
|
creating, updating or deleting a large number of files will benefit from the
|
||||||
|
presence of a `special` device. ZFS datasets can also be configured to store
|
||||||
|
whole small files on the `special` device which can further improve the
|
||||||
|
performance. Use fast SSDs for the `special` device.
|
||||||
|
|
||||||
|
.. IMPORTANT:: The redundancy of the `special` device should match the one of the
|
||||||
|
pool, since the `special` device is a point of failure for the whole pool.
|
||||||
|
|
||||||
|
.. WARNING:: Adding a `special` device to a pool cannot be undone!
|
||||||
|
|
||||||
|
Create a pool with `special` device and RAID-1:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool create -f -o ashift=12 <pool> mirror <device1> <device2> special mirror <device3> <device4>
|
||||||
|
|
||||||
|
Adding a `special` device to an existing pool with RAID-1:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool add <pool> special mirror <device1> <device2>
|
||||||
|
|
||||||
|
ZFS datasets expose the `special_small_blocks=<size>` property. `size` can be
|
||||||
|
`0` to disable storing small file blocks on the `special` device or a power of
|
||||||
|
two in the range between `512B` to `128K`. After setting the property new file
|
||||||
|
blocks smaller than `size` will be allocated on the `special` device.
|
||||||
|
|
||||||
|
.. IMPORTANT:: If the value for `special_small_blocks` is greater than or equal to
|
||||||
|
the `recordsize` (default `128K`) of the dataset, *all* data will be written to
|
||||||
|
the `special` device, so be careful!
|
||||||
|
|
||||||
|
Setting the `special_small_blocks` property on a pool will change the default
|
||||||
|
value of that property for all child ZFS datasets (for example all containers
|
||||||
|
in the pool will opt in for small file blocks).
|
||||||
|
|
||||||
|
Opt in for all file smaller than 4K-blocks pool-wide:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zfs set special_small_blocks=4K <pool>
|
||||||
|
|
||||||
|
Opt in for small file blocks for a single dataset:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zfs set special_small_blocks=4K <pool>/<filesystem>
|
||||||
|
|
||||||
|
Opt out from small file blocks for a single dataset:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zfs set special_small_blocks=0 <pool>/<filesystem>
|
||||||
|
|
||||||
|
Troubleshooting
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Corrupted cachefile
|
||||||
|
|
||||||
|
In case of a corrupted ZFS cachefile, some volumes may not be mounted during
|
||||||
|
boot until mounted manually later.
|
||||||
|
|
||||||
|
For each pool, run:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool set cachefile=/etc/zfs/zpool.cache POOLNAME
|
||||||
|
|
||||||
|
and afterwards update the `initramfs` by running:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# update-initramfs -u -k all
|
||||||
|
|
||||||
|
and finally reboot your node.
|
||||||
|
|
||||||
|
Sometimes the ZFS cachefile can get corrupted, and `zfs-import-cache.service`
|
||||||
|
doesn't import the pools that aren't present in the cachefile.
|
||||||
|
|
||||||
|
Another workaround to this problem is enabling the `zfs-import-scan.service`,
|
||||||
|
which searches and imports pools via device scanning (usually slower).
|
@ -3,100 +3,149 @@
|
|||||||
Debian Package Repositories
|
Debian Package Repositories
|
||||||
---------------------------
|
---------------------------
|
||||||
|
|
||||||
All Debian based systems use APT_ as package
|
All Debian based systems use APT_ as a package management tool. The lists of
|
||||||
management tool. The list of repositories is defined in
|
repositories are defined in ``/etc/apt/sources.list`` and the ``.list`` files found
|
||||||
``/etc/apt/sources.list`` and ``.list`` files found in the
|
in the ``/etc/apt/sources.d/`` directory. Updates can be installed directly
|
||||||
``/etc/apt/sources.d/`` directory. Updates can be installed directly with
|
with the ``apt`` command line tool, or via the GUI.
|
||||||
the ``apt`` command line tool, or via the GUI.
|
|
||||||
|
|
||||||
APT_ ``sources.list`` files list one package repository per line, with
|
APT_ ``sources.list`` files list one package repository per line, with the most
|
||||||
the most preferred source listed first. Empty lines are ignored and a
|
preferred source listed first. Empty lines are ignored and a ``#`` character
|
||||||
``#`` character anywhere on a line marks the remainder of that line as a
|
anywhere on a line marks the remainder of that line as a comment. The
|
||||||
comment. The information available from the configured sources is
|
information available from the configured sources is acquired by ``apt
|
||||||
acquired by ``apt update``.
|
update``.
|
||||||
|
|
||||||
.. code-block:: sources.list
|
.. code-block:: sources.list
|
||||||
:caption: File: ``/etc/apt/sources.list``
|
:caption: File: ``/etc/apt/sources.list``
|
||||||
|
|
||||||
deb http://ftp.debian.org/debian buster main contrib
|
deb http://ftp.debian.org/debian buster main contrib
|
||||||
deb http://ftp.debian.org/debian buster-updates main contrib
|
deb http://ftp.debian.org/debian buster-updates main contrib
|
||||||
|
|
||||||
# security updates
|
# security updates
|
||||||
deb http://security.debian.org/debian-security buster/updates main contrib
|
deb http://security.debian.org/debian-security buster/updates main contrib
|
||||||
|
|
||||||
|
|
||||||
.. FIXME for 7.0: change security update suite to bullseye-security
|
.. FIXME for 7.0: change security update suite to bullseye-security
|
||||||
|
|
||||||
In addition, Proxmox provides three different package repositories for
|
In addition, you need a package repository from Proxmox to get Proxmox Backup updates.
|
||||||
the backup server binaries.
|
|
||||||
|
|
||||||
`Proxmox Backup`_ Enterprise Repository
|
During the Proxmox Backup beta phase, only one repository (pbstest) will be
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
available. Once released, an Enterprise repository for production use and a
|
||||||
|
no-subscription repository will be provided.
|
||||||
|
|
||||||
This is the default, stable, and recommended repository. It is available for
|
SecureApt
|
||||||
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
~~~~~~~~~
|
||||||
and is suitable for production use. The ``pbs-enterprise`` repository is
|
|
||||||
enabled by default:
|
|
||||||
|
|
||||||
.. code-block:: sources.list
|
The `Release` files in the repositories are signed with GnuPG. APT is using
|
||||||
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
|
these signatures to verify that all packages are from a trusted source.
|
||||||
|
|
||||||
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
If you install Proxmox Backup Server from an official ISO image, the
|
||||||
|
verification key is already installed.
|
||||||
|
|
||||||
|
If you install Proxmox Backup Server on top of Debian, download and install the
|
||||||
|
key with the following commands:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# wget http://download.proxmox.com/debian/proxmox-ve-release-6.x.gpg -O /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||||
|
|
||||||
|
Verify the SHA512 checksum afterwards with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# sha512sum /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||||
|
|
||||||
|
The output should be:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
acca6f416917e8e11490a08a1e2842d500b3a5d9f322c6319db0927b2901c3eae23cfb5cd5df6facf2b57399d3cfa52ad7769ebdd75d9b204549ca147da52626 /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||||
|
|
||||||
|
and the md5sum:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# md5sum /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||||
|
|
||||||
|
Here, the output should be:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||||
|
|
||||||
|
.. comment
|
||||||
|
`Proxmox Backup`_ Enterprise Repository
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This will be the default, stable, and recommended repository. It is available for
|
||||||
|
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
||||||
|
and is suitable for production use. The ``pbs-enterprise`` repository is
|
||||||
|
enabled by default:
|
||||||
|
|
||||||
|
.. note:: During the Proxmox Backup beta phase only one repository (pbstest)
|
||||||
|
will be available.
|
||||||
|
|
||||||
|
.. code-block:: sources.list
|
||||||
|
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
|
||||||
|
|
||||||
|
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
||||||
|
|
||||||
|
|
||||||
To never miss important security fixes, the superuser (``root@pam`` user) is
|
To never miss important security fixes, the superuser (``root@pam`` user) is
|
||||||
notified via email about new packages as soon as they are available. The
|
notified via email about new packages as soon as they are available. The
|
||||||
change-log and details of each package can be viewed in the GUI (if available).
|
change-log and details of each package can be viewed in the GUI (if available).
|
||||||
|
|
||||||
Please note that you need a valid subscription key to access this
|
Please note that you need a valid subscription key to access this
|
||||||
repository. More information regarding subscription levels and pricing can be
|
repository. More information regarding subscription levels and pricing can be
|
||||||
found at https://www.proxmox.com/en/proxmox-backup/pricing.
|
found at https://www.proxmox.com/en/proxmox-backup/pricing.
|
||||||
|
|
||||||
.. note:: You can disable this repository by commenting out the above
|
.. note:: You can disable this repository by commenting out the above
|
||||||
line using a `#` (at the start of the line). This prevents error
|
line using a `#` (at the start of the line). This prevents error
|
||||||
messages if you do not have a subscription key. Please configure the
|
messages if you do not have a subscription key. Please configure the
|
||||||
``pbs-no-subscription`` repository in that case.
|
``pbs-no-subscription`` repository in that case.
|
||||||
|
|
||||||
|
|
||||||
`Proxmox Backup`_ No-Subscription Repository
|
`Proxmox Backup`_ No-Subscription Repository
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
As the name suggests, you do not need a subscription key to access
|
As the name suggests, you do not need a subscription key to access
|
||||||
this repository. It can be used for testing and non-production
|
this repository. It can be used for testing and non-production
|
||||||
use. It is not recommended to use it on production servers, because these
|
use. It is not recommended to use it on production servers, because these
|
||||||
packages are not always heavily tested and validated.
|
packages are not always heavily tested and validated.
|
||||||
|
|
||||||
We recommend to configure this repository in ``/etc/apt/sources.list``.
|
We recommend to configure this repository in ``/etc/apt/sources.list``.
|
||||||
|
|
||||||
.. code-block:: sources.list
|
.. code-block:: sources.list
|
||||||
:caption: File: ``/etc/apt/sources.list``
|
:caption: File: ``/etc/apt/sources.list``
|
||||||
|
|
||||||
deb http://ftp.debian.org/debian buster main contrib
|
deb http://ftp.debian.org/debian buster main contrib
|
||||||
deb http://ftp.debian.org/debian buster-updates main contrib
|
deb http://ftp.debian.org/debian buster-updates main contrib
|
||||||
|
|
||||||
# PBS pbs-no-subscription repository provided by proxmox.com,
|
# PBS pbs-no-subscription repository provided by proxmox.com,
|
||||||
# NOT recommended for production use
|
# NOT recommended for production use
|
||||||
deb http://download.proxmox.com/debian/bps buster pbs-no-subscription
|
deb http://download.proxmox.com/debian/pbs buster pbs-no-subscription
|
||||||
|
|
||||||
# security updates
|
# security updates
|
||||||
deb http://security.debian.org/debian-security buster/updates main contrib
|
deb http://security.debian.org/debian-security buster/updates main contrib
|
||||||
|
|
||||||
|
|
||||||
`Proxmox Backup`_ Test Repository
|
`Proxmox Backup`_ Beta Repository
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Finally, there is a repository called ``pbstest``. This one contains the
|
During the public beta, there is a repository called ``pbstest``. This one
|
||||||
latest packages and is heavily used by developers to test new
|
contains the latest packages and is heavily used by developers to test new
|
||||||
features.
|
features.
|
||||||
|
|
||||||
.. warning:: the ``pbstest`` repository should (as the name implies)
|
.. .. warning:: the ``pbstest`` repository should (as the name implies)
|
||||||
only be used to test new features or bug fixes.
|
only be used to test new features or bug fixes.
|
||||||
|
|
||||||
You can configure this using ``/etc/apt/sources.list`` by
|
You can access this repository by adding the following line to
|
||||||
adding the following line:
|
``/etc/apt/sources.list``:
|
||||||
|
|
||||||
.. code-block:: sources.list
|
.. code-block:: sources.list
|
||||||
:caption: sources.list entry for ``pbstest``
|
:caption: sources.list entry for ``pbstest``
|
||||||
|
|
||||||
deb http://download.proxmox.com/debian/bps buster pbstest
|
deb http://download.proxmox.com/debian/pbs buster pbstest
|
||||||
|
|
||||||
|
If you installed Proxmox Backup Server from the official beta ISO, you should
|
||||||
|
have this repository already configured in
|
||||||
|
``/etc/apt/sources.list.d/pbstest-beta.list``
|
||||||
|
@ -24,7 +24,7 @@ This daemon is normally started and managed as ``systemd`` service::
|
|||||||
|
|
||||||
systemctl status proxmox-backup-proxy
|
systemctl status proxmox-backup-proxy
|
||||||
|
|
||||||
For debugging, you can start the daemon in forground using::
|
For debugging, you can start the daemon in foreground using::
|
||||||
|
|
||||||
proxmox-backup-proxy
|
proxmox-backup-proxy
|
||||||
|
|
||||||
|
@ -9,7 +9,7 @@ which caters to a similar use-case.
|
|||||||
The ``.pxar`` format is adapted to fulfill the specific needs of the Proxmox
|
The ``.pxar`` format is adapted to fulfill the specific needs of the Proxmox
|
||||||
Backup Server, for example, efficient storage of hardlinks.
|
Backup Server, for example, efficient storage of hardlinks.
|
||||||
The format is designed to reduce storage space needed on the server by achieving
|
The format is designed to reduce storage space needed on the server by achieving
|
||||||
a high level of de-duplication.
|
a high level of deduplication.
|
||||||
|
|
||||||
Creating an Archive
|
Creating an Archive
|
||||||
^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^
|
||||||
@ -18,7 +18,7 @@ Run the following command to create an archive of a folder named ``source``:
|
|||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# pxar create archive.pxar source
|
# pxar create archive.pxar /path/to/source
|
||||||
|
|
||||||
This will create a new archive called ``archive.pxar`` with the contents of the
|
This will create a new archive called ``archive.pxar`` with the contents of the
|
||||||
``source`` folder.
|
``source`` folder.
|
||||||
@ -29,45 +29,44 @@ This will create a new archive called ``archive.pxar`` with the contents of the
|
|||||||
|
|
||||||
By default, ``pxar`` will skip certain mountpoints and will not follow device
|
By default, ``pxar`` will skip certain mountpoints and will not follow device
|
||||||
boundaries. This design decision is based on the primary use case of creating
|
boundaries. This design decision is based on the primary use case of creating
|
||||||
archives for backups. It is sensible to not back up the contents of certain
|
archives for backups. It makes sense to not back up the contents of certain
|
||||||
temporary or system specific files.
|
temporary or system specific files.
|
||||||
To alter this behavior and follow device boundaries, use the
|
To alter this behavior and follow device boundaries, use the
|
||||||
``--all-file-systems`` flag.
|
``--all-file-systems`` flag.
|
||||||
|
|
||||||
It is possible to exclude certain files and/or folders from the archive by
|
It is possible to exclude certain files and/or folders from the archive by
|
||||||
passing glob match patterns as additional parameters. Whenever a file is matched
|
passing the ``--exclude`` parameter with ``gitignore``\-style match patterns.
|
||||||
by one of the patterns, you will get a warning stating that this file is skipped
|
|
||||||
and therefore not included in the archive.
|
|
||||||
|
|
||||||
For example, you can exclude all files ending in ``.txt`` from the archive
|
For example, you can exclude all files ending in ``.txt`` from the archive
|
||||||
by running:
|
by running:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# pxar create archive.pxar source '**/*.txt'
|
# pxar create archive.pxar /path/to/source --exclude '**/*.txt'
|
||||||
|
|
||||||
Be aware that the shell itself will try to expand all of the glob patterns before
|
Be aware that the shell itself will try to expand all of the glob patterns before
|
||||||
invoking ``pxar``.
|
invoking ``pxar``.
|
||||||
In order to avoid this, all globs have to be quoted correctly.
|
In order to avoid this, all globs have to be quoted correctly.
|
||||||
|
|
||||||
It is possible to pass a list of match patterns to fulfill more complex
|
It is possible to pass the ``--exclude`` parameter multiple times, in order to
|
||||||
file exclusion/inclusion behavior, although it is recommended to use the
|
match more than one pattern. This allows you to use more complex
|
||||||
|
file exclusion/inclusion behavior. However, it is recommended to use
|
||||||
``.pxarexclude`` files instead for such cases.
|
``.pxarexclude`` files instead for such cases.
|
||||||
|
|
||||||
For example you might want to exclude all ``.txt`` files except for a specific
|
For example you might want to exclude all ``.txt`` files except for a specific
|
||||||
one from the archive. This is achieved via the negated match pattern, prefixed
|
one from the archive. This is achieved via the negated match pattern, prefixed
|
||||||
by ``!``.
|
by ``!``.
|
||||||
All the glob pattern are relative to the ``source`` directory.
|
All the glob patterns are relative to the ``source`` directory.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# pxar create archive.pxar source '**/*.txt' '!/folder/file.txt'
|
# pxar create archive.pxar /path/to/source --exclude '**/*.txt' --exclude '!/folder/file.txt'
|
||||||
|
|
||||||
.. NOTE:: The order of the glob match patterns matters as later ones win over
|
.. NOTE:: The order of the glob match patterns matters as later ones override
|
||||||
previous ones. Permutations of the same patterns lead to different results.
|
previous ones. Permutations of the same patterns lead to different results.
|
||||||
|
|
||||||
``pxar`` will store the list of glob match patterns passed as parameters via the
|
``pxar`` will store the list of glob match patterns passed as parameters via the
|
||||||
command line in a file called ``.pxarexclude-cli`` and stores it at the root of
|
command line, in a file called ``.pxarexclude-cli`` at the root of
|
||||||
the archive.
|
the archive.
|
||||||
If a file with this name is already present in the source folder during archive
|
If a file with this name is already present in the source folder during archive
|
||||||
creation, this file is not included in the archive and the file containing the
|
creation, this file is not included in the archive and the file containing the
|
||||||
@ -86,23 +85,23 @@ The behavior is the same as described in :ref:`creating-backups`.
|
|||||||
Extracting an Archive
|
Extracting an Archive
|
||||||
^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
An existing archive ``archive.pxar`` is extracted to a ``target`` directory
|
An existing archive, ``archive.pxar``, is extracted to a ``target`` directory
|
||||||
with the following command:
|
with the following command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# pxar extract archive.pxar --target target
|
# pxar extract archive.pxar /path/to/target
|
||||||
|
|
||||||
If no target is provided, the content of the archive is extracted to the current
|
If no target is provided, the content of the archive is extracted to the current
|
||||||
working directory.
|
working directory.
|
||||||
|
|
||||||
In order to restore only parts of an archive, single files and/or folders,
|
In order to restore only parts of an archive, single files, and/or folders,
|
||||||
it is possible to pass the corresponding glob match patterns as additional
|
it is possible to pass the corresponding glob match patterns as additional
|
||||||
parameters or use the patterns stored in a file:
|
parameters or to use the patterns stored in a file:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# pxar extract etc.pxar '**/*.conf' --target /restore/target/etc
|
# pxar extract etc.pxar /restore/target/etc --pattern '**/*.conf'
|
||||||
|
|
||||||
The above example restores all ``.conf`` files encountered in any of the
|
The above example restores all ``.conf`` files encountered in any of the
|
||||||
sub-folders in the archive ``etc.pxar`` to the target ``/restore/target/etc``.
|
sub-folders in the archive ``etc.pxar`` to the target ``/restore/target/etc``.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
Host System Administration
|
Host System Administration
|
||||||
--------------------------
|
==========================
|
||||||
|
|
||||||
`Proxmox Backup`_ is based on the famous Debian_ Linux
|
`Proxmox Backup`_ is based on the famous Debian_ Linux
|
||||||
distribution. That means that you have access to the whole world of
|
distribution. That means that you have access to the whole world of
|
||||||
@ -23,8 +23,4 @@ either explain things which are different on `Proxmox Backup`_, or
|
|||||||
tasks which are commonly used on `Proxmox Backup`_. For other topics,
|
tasks which are commonly used on `Proxmox Backup`_. For other topics,
|
||||||
please refer to the standard Debian documentation.
|
please refer to the standard Debian documentation.
|
||||||
|
|
||||||
ZFS
|
.. include:: local-zfs.rst
|
||||||
~~~
|
|
||||||
|
|
||||||
.. todo:: Add local ZFS admin guide (local.zfs.adoc)
|
|
||||||
|
|
||||||
|
6
docs/todos.rst
Normal file
6
docs/todos.rst
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
Documentation Todo List
|
||||||
|
=======================
|
||||||
|
|
||||||
|
This is an auto-generated list of the todo references in the documentation.
|
||||||
|
|
||||||
|
.. todolist::
|
@ -7,7 +7,7 @@ DYNAMIC_UNITS := \
|
|||||||
proxmox-backup.service \
|
proxmox-backup.service \
|
||||||
proxmox-backup-proxy.service
|
proxmox-backup-proxy.service
|
||||||
|
|
||||||
all: $(UNITS) $(DYNAMIC_UNITS)
|
all: $(UNITS) $(DYNAMIC_UNITS) pbstest-beta.list
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -f $(DYNAMIC_UNITS)
|
rm -f $(DYNAMIC_UNITS)
|
||||||
|
1
etc/pbstest-beta.list
Normal file
1
etc/pbstest-beta.list
Normal file
@ -0,0 +1 @@
|
|||||||
|
deb http://download.proxmox.com/debian/pbs buster pbstest
|
@ -4,6 +4,7 @@ use anyhow::{Error};
|
|||||||
|
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
|
|
||||||
|
use proxmox_backup::api2::types::Userid;
|
||||||
use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader};
|
use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader};
|
||||||
|
|
||||||
pub struct DummyWriter {
|
pub struct DummyWriter {
|
||||||
@ -27,7 +28,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
|
|
||||||
let host = "localhost";
|
let host = "localhost";
|
||||||
|
|
||||||
let username = "root@pam";
|
let username = Userid::root_userid();
|
||||||
|
|
||||||
let options = HttpClientOptions::new()
|
let options = HttpClientOptions::new()
|
||||||
.interactive(true)
|
.interactive(true)
|
||||||
|
@ -1,13 +1,14 @@
|
|||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
|
|
||||||
|
use proxmox_backup::api2::types::Userid;
|
||||||
use proxmox_backup::client::*;
|
use proxmox_backup::client::*;
|
||||||
|
|
||||||
async fn upload_speed() -> Result<usize, Error> {
|
async fn upload_speed() -> Result<f64, Error> {
|
||||||
|
|
||||||
let host = "localhost";
|
let host = "localhost";
|
||||||
let datastore = "store2";
|
let datastore = "store2";
|
||||||
|
|
||||||
let username = "root@pam";
|
let username = Userid::root_userid();
|
||||||
|
|
||||||
let options = HttpClientOptions::new()
|
let options = HttpClientOptions::new()
|
||||||
.interactive(true)
|
.interactive(true)
|
||||||
@ -20,7 +21,7 @@ async fn upload_speed() -> Result<usize, Error> {
|
|||||||
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false).await?;
|
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false).await?;
|
||||||
|
|
||||||
println!("start upload speed test");
|
println!("start upload speed test");
|
||||||
let res = client.upload_speedtest().await?;
|
let res = client.upload_speedtest(true).await?;
|
||||||
|
|
||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,6 @@ pub mod backup;
|
|||||||
pub mod config;
|
pub mod config;
|
||||||
pub mod node;
|
pub mod node;
|
||||||
pub mod reader;
|
pub mod reader;
|
||||||
mod subscription;
|
|
||||||
pub mod status;
|
pub mod status;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
pub mod version;
|
pub mod version;
|
||||||
@ -26,7 +25,6 @@ pub const SUBDIRS: SubdirMap = &[
|
|||||||
("pull", &pull::ROUTER),
|
("pull", &pull::ROUTER),
|
||||||
("reader", &reader::ROUTER),
|
("reader", &reader::ROUTER),
|
||||||
("status", &status::ROUTER),
|
("status", &status::ROUTER),
|
||||||
("subscription", &subscription::ROUTER),
|
|
||||||
("version", &version::ROUTER),
|
("version", &version::ROUTER),
|
||||||
];
|
];
|
||||||
|
|
||||||
|
@ -2,56 +2,110 @@ use anyhow::{bail, format_err, Error};
|
|||||||
|
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
use proxmox::api::{api, RpcEnvironment, Permission, UserInformation};
|
use proxmox::api::{api, RpcEnvironment, Permission};
|
||||||
use proxmox::api::router::{Router, SubdirMap};
|
use proxmox::api::router::{Router, SubdirMap};
|
||||||
use proxmox::{sortable, identity};
|
use proxmox::{sortable, identity};
|
||||||
use proxmox::{http_err, list_subdirs_api_method};
|
use proxmox::{http_err, list_subdirs_api_method};
|
||||||
|
|
||||||
use crate::tools;
|
use crate::tools::ticket::{self, Empty, Ticket};
|
||||||
use crate::tools::ticket::*;
|
|
||||||
use crate::auth_helpers::*;
|
use crate::auth_helpers::*;
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
|
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
use crate::config::acl::PRIV_PERMISSIONS_MODIFY;
|
use crate::config::acl::{PRIVILEGES, PRIV_PERMISSIONS_MODIFY};
|
||||||
|
|
||||||
pub mod user;
|
pub mod user;
|
||||||
pub mod domain;
|
pub mod domain;
|
||||||
pub mod acl;
|
pub mod acl;
|
||||||
pub mod role;
|
pub mod role;
|
||||||
|
|
||||||
fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
/// returns Ok(true) if a ticket has to be created
|
||||||
|
/// and Ok(false) if not
|
||||||
|
fn authenticate_user(
|
||||||
|
userid: &Userid,
|
||||||
|
password: &str,
|
||||||
|
path: Option<String>,
|
||||||
|
privs: Option<String>,
|
||||||
|
port: Option<u16>,
|
||||||
|
) -> Result<bool, Error> {
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
if !user_info.is_active_user(&username) {
|
if !user_info.is_active_user(&userid) {
|
||||||
bail!("user account disabled or expired.");
|
bail!("user account disabled or expired.");
|
||||||
}
|
}
|
||||||
|
|
||||||
let ticket_lifetime = tools::ticket::TICKET_LIFETIME;
|
|
||||||
|
|
||||||
if password.starts_with("PBS:") {
|
if password.starts_with("PBS:") {
|
||||||
if let Ok((_age, Some(ticket_username))) = tools::ticket::verify_rsa_ticket(public_auth_key(), "PBS", password, None, -300, ticket_lifetime) {
|
if let Ok(ticket_userid) = Ticket::<Userid>::parse(password)
|
||||||
if ticket_username == username {
|
.and_then(|ticket| ticket.verify(public_auth_key(), "PBS", None))
|
||||||
return Ok(());
|
{
|
||||||
} else {
|
if *userid == ticket_userid {
|
||||||
bail!("ticket login failed - wrong username");
|
return Ok(true);
|
||||||
}
|
}
|
||||||
|
bail!("ticket login failed - wrong userid");
|
||||||
|
}
|
||||||
|
} else if password.starts_with("PBSTERM:") {
|
||||||
|
if path.is_none() || privs.is_none() || port.is_none() {
|
||||||
|
bail!("cannot check termnal ticket without path, priv and port");
|
||||||
|
}
|
||||||
|
|
||||||
|
let path = path.ok_or_else(|| format_err!("missing path for termproxy ticket"))?;
|
||||||
|
let privilege_name = privs
|
||||||
|
.ok_or_else(|| format_err!("missing privilege name for termproxy ticket"))?;
|
||||||
|
let port = port.ok_or_else(|| format_err!("missing port for termproxy ticket"))?;
|
||||||
|
|
||||||
|
if let Ok(Empty) = Ticket::parse(password)
|
||||||
|
.and_then(|ticket| ticket.verify(
|
||||||
|
public_auth_key(),
|
||||||
|
ticket::TERM_PREFIX,
|
||||||
|
Some(&ticket::term_aad(userid, &path, port)),
|
||||||
|
))
|
||||||
|
{
|
||||||
|
for (name, privilege) in PRIVILEGES {
|
||||||
|
if *name == privilege_name {
|
||||||
|
let mut path_vec = Vec::new();
|
||||||
|
for part in path.split('/') {
|
||||||
|
if part != "" {
|
||||||
|
path_vec.push(part);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
user_info.check_privs(userid, &path_vec, *privilege, false)?;
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bail!("No such privilege");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
crate::auth::authenticate_user(username, password)
|
let _ = crate::auth::authenticate_user(userid, password)?;
|
||||||
|
Ok(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
username: {
|
username: {
|
||||||
schema: PROXMOX_USER_ID_SCHEMA,
|
type: Userid,
|
||||||
},
|
},
|
||||||
password: {
|
password: {
|
||||||
schema: PASSWORD_SCHEMA,
|
schema: PASSWORD_SCHEMA,
|
||||||
},
|
},
|
||||||
|
path: {
|
||||||
|
type: String,
|
||||||
|
description: "Path for verifying terminal tickets.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
privs: {
|
||||||
|
type: String,
|
||||||
|
description: "Privilege for verifying terminal tickets.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
port: {
|
||||||
|
type: Integer,
|
||||||
|
description: "Port for verifying terminal tickets.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
@ -78,11 +132,16 @@ fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
|||||||
/// Create or verify authentication ticket.
|
/// Create or verify authentication ticket.
|
||||||
///
|
///
|
||||||
/// Returns: An authentication ticket with additional infos.
|
/// Returns: An authentication ticket with additional infos.
|
||||||
fn create_ticket(username: String, password: String) -> Result<Value, Error> {
|
fn create_ticket(
|
||||||
match authenticate_user(&username, &password) {
|
username: Userid,
|
||||||
Ok(_) => {
|
password: String,
|
||||||
|
path: Option<String>,
|
||||||
let ticket = assemble_rsa_ticket( private_auth_key(), "PBS", Some(&username), None)?;
|
privs: Option<String>,
|
||||||
|
port: Option<u16>,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
match authenticate_user(&username, &password, path, privs, port) {
|
||||||
|
Ok(true) => {
|
||||||
|
let ticket = Ticket::new("PBS", &username)?.sign(private_auth_key(), None)?;
|
||||||
|
|
||||||
let token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
let token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
||||||
|
|
||||||
@ -94,10 +153,13 @@ fn create_ticket(username: String, password: String) -> Result<Value, Error> {
|
|||||||
"CSRFPreventionToken": token,
|
"CSRFPreventionToken": token,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
Ok(false) => Ok(json!({
|
||||||
|
"username": username,
|
||||||
|
})),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let client_ip = "unknown"; // $rpcenv->get_client_ip() || '';
|
let client_ip = "unknown"; // $rpcenv->get_client_ip() || '';
|
||||||
log::error!("authentication failure; rhost={} user={} msg={}", client_ip, username, err.to_string());
|
log::error!("authentication failure; rhost={} user={} msg={}", client_ip, username, err.to_string());
|
||||||
Err(http_err!(UNAUTHORIZED, "permission check failed.".into()))
|
Err(http_err!(UNAUTHORIZED, "permission check failed."))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -106,7 +168,7 @@ fn create_ticket(username: String, password: String) -> Result<Value, Error> {
|
|||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
userid: {
|
userid: {
|
||||||
schema: PROXMOX_USER_ID_SCHEMA,
|
type: Userid,
|
||||||
},
|
},
|
||||||
password: {
|
password: {
|
||||||
schema: PASSWORD_SCHEMA,
|
schema: PASSWORD_SCHEMA,
|
||||||
@ -124,13 +186,15 @@ fn create_ticket(username: String, password: String) -> Result<Value, Error> {
|
|||||||
/// Each user is allowed to change his own password. Superuser
|
/// Each user is allowed to change his own password. Superuser
|
||||||
/// can change all passwords.
|
/// can change all passwords.
|
||||||
fn change_password(
|
fn change_password(
|
||||||
userid: String,
|
userid: Userid,
|
||||||
password: String,
|
password: String,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let current_user = rpcenv.get_user()
|
let current_user: Userid = rpcenv
|
||||||
.ok_or_else(|| format_err!("unknown user"))?;
|
.get_user()
|
||||||
|
.ok_or_else(|| format_err!("unknown user"))?
|
||||||
|
.parse()?;
|
||||||
|
|
||||||
let mut allowed = userid == current_user;
|
let mut allowed = userid == current_user;
|
||||||
|
|
||||||
@ -146,9 +210,8 @@ fn change_password(
|
|||||||
bail!("you are not authorized to change the password.");
|
bail!("you are not authorized to change the password.");
|
||||||
}
|
}
|
||||||
|
|
||||||
let (username, realm) = crate::auth::parse_userid(&userid)?;
|
let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
|
||||||
let authenticator = crate::auth::lookup_authenticator(&realm)?;
|
authenticator.store_password(userid.name(), &password)?;
|
||||||
authenticator.store_password(&username, &password)?;
|
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ use anyhow::{bail, Error};
|
|||||||
use ::serde::{Deserialize, Serialize};
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||||
|
use proxmox::tools::fs::open_file_locked;
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::config::acl;
|
use crate::config::acl;
|
||||||
@ -141,7 +142,7 @@ pub fn read_acl(
|
|||||||
},
|
},
|
||||||
userid: {
|
userid: {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: PROXMOX_USER_ID_SCHEMA,
|
type: Userid,
|
||||||
},
|
},
|
||||||
group: {
|
group: {
|
||||||
optional: true,
|
optional: true,
|
||||||
@ -167,14 +168,14 @@ pub fn update_acl(
|
|||||||
path: String,
|
path: String,
|
||||||
role: String,
|
role: String,
|
||||||
propagate: Option<bool>,
|
propagate: Option<bool>,
|
||||||
userid: Option<String>,
|
userid: Option<Userid>,
|
||||||
group: Option<String>,
|
group: Option<String>,
|
||||||
delete: Option<bool>,
|
delete: Option<bool>,
|
||||||
digest: Option<String>,
|
digest: Option<String>,
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = crate::tools::open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||||
|
|
||||||
let (mut tree, expected_digest) = acl::config()?;
|
let (mut tree, expected_digest) = acl::config()?;
|
||||||
|
|
||||||
@ -192,7 +193,7 @@ pub fn update_acl(
|
|||||||
} else if let Some(ref userid) = userid {
|
} else if let Some(ref userid) = userid {
|
||||||
if !delete { // Note: we allow to delete non-existent users
|
if !delete { // Note: we allow to delete non-existent users
|
||||||
let user_cfg = crate::config::user::cached_config()?;
|
let user_cfg = crate::config::user::cached_config()?;
|
||||||
if user_cfg.sections.get(userid).is_none() {
|
if user_cfg.sections.get(&userid.to_string()).is_none() {
|
||||||
bail!("no such user.");
|
bail!("no such user.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ use serde_json::Value;
|
|||||||
|
|
||||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||||
use proxmox::api::schema::{Schema, StringSchema};
|
use proxmox::api::schema::{Schema, StringSchema};
|
||||||
|
use proxmox::tools::fs::open_file_locked;
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::config::user;
|
use crate::config::user;
|
||||||
@ -48,7 +49,7 @@ pub fn list_users(
|
|||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
userid: {
|
userid: {
|
||||||
schema: PROXMOX_USER_ID_SCHEMA,
|
type: Userid,
|
||||||
},
|
},
|
||||||
comment: {
|
comment: {
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
@ -87,25 +88,24 @@ pub fn list_users(
|
|||||||
/// Create new user.
|
/// Create new user.
|
||||||
pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error> {
|
pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||||
|
|
||||||
let user: user::User = serde_json::from_value(param)?;
|
let user: user::User = serde_json::from_value(param)?;
|
||||||
|
|
||||||
let (mut config, _digest) = user::config()?;
|
let (mut config, _digest) = user::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&user.userid) {
|
if let Some(_) = config.sections.get(user.userid.as_str()) {
|
||||||
bail!("user '{}' already exists.", user.userid);
|
bail!("user '{}' already exists.", user.userid);
|
||||||
}
|
}
|
||||||
|
|
||||||
let (username, realm) = crate::auth::parse_userid(&user.userid)?;
|
let authenticator = crate::auth::lookup_authenticator(&user.userid.realm())?;
|
||||||
let authenticator = crate::auth::lookup_authenticator(&realm)?;
|
|
||||||
|
|
||||||
config.set_data(&user.userid, "user", &user)?;
|
config.set_data(user.userid.as_str(), "user", &user)?;
|
||||||
|
|
||||||
user::save_config(&config)?;
|
user::save_config(&config)?;
|
||||||
|
|
||||||
if let Some(password) = password {
|
if let Some(password) = password {
|
||||||
authenticator.store_password(&username, &password)?;
|
authenticator.store_password(user.userid.name(), &password)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -115,7 +115,7 @@ pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error>
|
|||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
userid: {
|
userid: {
|
||||||
schema: PROXMOX_USER_ID_SCHEMA,
|
type: Userid,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -128,9 +128,9 @@ pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error>
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Read user configuration data.
|
/// Read user configuration data.
|
||||||
pub fn read_user(userid: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<user::User, Error> {
|
pub fn read_user(userid: Userid, mut rpcenv: &mut dyn RpcEnvironment) -> Result<user::User, Error> {
|
||||||
let (config, digest) = user::config()?;
|
let (config, digest) = user::config()?;
|
||||||
let user = config.lookup("user", &userid)?;
|
let user = config.lookup("user", userid.as_str())?;
|
||||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
Ok(user)
|
Ok(user)
|
||||||
}
|
}
|
||||||
@ -140,7 +140,7 @@ pub fn read_user(userid: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
|
|||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
userid: {
|
userid: {
|
||||||
schema: PROXMOX_USER_ID_SCHEMA,
|
type: Userid,
|
||||||
},
|
},
|
||||||
comment: {
|
comment: {
|
||||||
optional: true,
|
optional: true,
|
||||||
@ -182,7 +182,7 @@ pub fn read_user(userid: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
|
|||||||
)]
|
)]
|
||||||
/// Update user configuration.
|
/// Update user configuration.
|
||||||
pub fn update_user(
|
pub fn update_user(
|
||||||
userid: String,
|
userid: Userid,
|
||||||
comment: Option<String>,
|
comment: Option<String>,
|
||||||
enable: Option<bool>,
|
enable: Option<bool>,
|
||||||
expire: Option<i64>,
|
expire: Option<i64>,
|
||||||
@ -193,7 +193,7 @@ pub fn update_user(
|
|||||||
digest: Option<String>,
|
digest: Option<String>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = user::config()?;
|
let (mut config, expected_digest) = user::config()?;
|
||||||
|
|
||||||
@ -202,7 +202,7 @@ pub fn update_user(
|
|||||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut data: user::User = config.lookup("user", &userid)?;
|
let mut data: user::User = config.lookup("user", userid.as_str())?;
|
||||||
|
|
||||||
if let Some(comment) = comment {
|
if let Some(comment) = comment {
|
||||||
let comment = comment.trim().to_string();
|
let comment = comment.trim().to_string();
|
||||||
@ -222,9 +222,8 @@ pub fn update_user(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(password) = password {
|
if let Some(password) = password {
|
||||||
let (username, realm) = crate::auth::parse_userid(&userid)?;
|
let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
|
||||||
let authenticator = crate::auth::lookup_authenticator(&realm)?;
|
authenticator.store_password(userid.name(), &password)?;
|
||||||
authenticator.store_password(&username, &password)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(firstname) = firstname {
|
if let Some(firstname) = firstname {
|
||||||
@ -238,7 +237,7 @@ pub fn update_user(
|
|||||||
data.email = if email.is_empty() { None } else { Some(email) };
|
data.email = if email.is_empty() { None } else { Some(email) };
|
||||||
}
|
}
|
||||||
|
|
||||||
config.set_data(&userid, "user", &data)?;
|
config.set_data(userid.as_str(), "user", &data)?;
|
||||||
|
|
||||||
user::save_config(&config)?;
|
user::save_config(&config)?;
|
||||||
|
|
||||||
@ -250,7 +249,7 @@ pub fn update_user(
|
|||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
userid: {
|
userid: {
|
||||||
schema: PROXMOX_USER_ID_SCHEMA,
|
type: Userid,
|
||||||
},
|
},
|
||||||
digest: {
|
digest: {
|
||||||
optional: true,
|
optional: true,
|
||||||
@ -263,9 +262,9 @@ pub fn update_user(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Remove a user from the configuration file.
|
/// Remove a user from the configuration file.
|
||||||
pub fn delete_user(userid: String, digest: Option<String>) -> Result<(), Error> {
|
pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = user::config()?;
|
let (mut config, expected_digest) = user::config()?;
|
||||||
|
|
||||||
@ -274,8 +273,8 @@ pub fn delete_user(userid: String, digest: Option<String>) -> Result<(), Error>
|
|||||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
match config.sections.get(&userid) {
|
match config.sections.get(userid.as_str()) {
|
||||||
Some(_) => { config.sections.remove(&userid); },
|
Some(_) => { config.sections.remove(userid.as_str()); },
|
||||||
None => bail!("user '{}' does not exist.", userid),
|
None => bail!("user '{}' does not exist.", userid),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,7 +10,8 @@ use serde_json::{json, Value};
|
|||||||
|
|
||||||
use proxmox::api::{
|
use proxmox::api::{
|
||||||
api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
|
api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
|
||||||
RpcEnvironment, RpcEnvironmentType, Permission, UserInformation};
|
RpcEnvironment, RpcEnvironmentType, Permission
|
||||||
|
};
|
||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
use proxmox::api::schema::*;
|
use proxmox::api::schema::*;
|
||||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||||
@ -36,7 +37,11 @@ use crate::config::acl::{
|
|||||||
PRIV_DATASTORE_BACKUP,
|
PRIV_DATASTORE_BACKUP,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> Result<(), Error> {
|
fn check_backup_owner(
|
||||||
|
store: &DataStore,
|
||||||
|
group: &BackupGroup,
|
||||||
|
userid: &Userid,
|
||||||
|
) -> Result<(), Error> {
|
||||||
let owner = store.get_owner(group)?;
|
let owner = store.get_owner(group)?;
|
||||||
if &owner != userid {
|
if &owner != userid {
|
||||||
bail!("backup owner check failed ({} != {})", userid, owner);
|
bail!("backup owner check failed ({} != {})", userid, owner);
|
||||||
@ -44,7 +49,10 @@ fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> R
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
|
fn read_backup_index(
|
||||||
|
store: &DataStore,
|
||||||
|
backup_dir: &BackupDir,
|
||||||
|
) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
|
||||||
|
|
||||||
let (manifest, index_size) = store.load_manifest(backup_dir)?;
|
let (manifest, index_size) = store.load_manifest(backup_dir)?;
|
||||||
|
|
||||||
@ -52,25 +60,29 @@ fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<Ba
|
|||||||
for item in manifest.files() {
|
for item in manifest.files() {
|
||||||
result.push(BackupContent {
|
result.push(BackupContent {
|
||||||
filename: item.filename.clone(),
|
filename: item.filename.clone(),
|
||||||
encrypted: item.encrypted,
|
crypt_mode: Some(item.crypt_mode),
|
||||||
size: Some(item.size),
|
size: Some(item.size),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
result.push(BackupContent {
|
result.push(BackupContent {
|
||||||
filename: MANIFEST_BLOB_NAME.to_string(),
|
filename: MANIFEST_BLOB_NAME.to_string(),
|
||||||
encrypted: Some(false),
|
crypt_mode: match manifest.signature {
|
||||||
|
Some(_) => Some(CryptMode::SignOnly),
|
||||||
|
None => Some(CryptMode::None),
|
||||||
|
},
|
||||||
size: Some(index_size),
|
size: Some(index_size),
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(result)
|
Ok((manifest, result))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_all_snapshot_files(
|
fn get_all_snapshot_files(
|
||||||
store: &DataStore,
|
store: &DataStore,
|
||||||
info: &BackupInfo,
|
info: &BackupInfo,
|
||||||
) -> Result<Vec<BackupContent>, Error> {
|
) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
|
||||||
let mut files = read_backup_index(&store, &info.backup_dir)?;
|
|
||||||
|
let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
|
||||||
|
|
||||||
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
|
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
|
||||||
acc.insert(item.filename.clone());
|
acc.insert(item.filename.clone());
|
||||||
@ -79,10 +91,14 @@ fn get_all_snapshot_files(
|
|||||||
|
|
||||||
for file in &info.files {
|
for file in &info.files {
|
||||||
if file_set.contains(file) { continue; }
|
if file_set.contains(file) { continue; }
|
||||||
files.push(BackupContent { filename: file.to_string(), size: None, encrypted: None });
|
files.push(BackupContent {
|
||||||
|
filename: file.to_string(),
|
||||||
|
size: None,
|
||||||
|
crypt_mode: None,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(files)
|
Ok((manifest, files))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
|
fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
|
||||||
@ -126,9 +142,9 @@ fn list_groups(
|
|||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<GroupListItem>, Error> {
|
) -> Result<Vec<GroupListItem>, Error> {
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
@ -149,7 +165,7 @@ fn list_groups(
|
|||||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||||
let owner = datastore.get_owner(group)?;
|
let owner = datastore.get_owner(group)?;
|
||||||
if !list_all {
|
if !list_all {
|
||||||
if owner != username { continue; }
|
if owner != userid { continue; }
|
||||||
}
|
}
|
||||||
|
|
||||||
let result_item = GroupListItem {
|
let result_item = GroupListItem {
|
||||||
@ -207,20 +223,22 @@ pub fn list_snapshot_files(
|
|||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<BackupContent>, Error> {
|
) -> Result<Vec<BackupContent>, Error> {
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||||
|
|
||||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
|
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
|
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
|
||||||
|
|
||||||
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
|
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
|
||||||
|
|
||||||
get_all_snapshot_files(&datastore, &info)
|
let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
|
||||||
|
|
||||||
|
Ok(files)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -257,18 +275,18 @@ fn delete_snapshot(
|
|||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||||
|
|
||||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
|
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
|
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
|
||||||
|
|
||||||
datastore.remove_backup_dir(&snapshot)?;
|
datastore.remove_backup_dir(&snapshot, false)?;
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
}
|
}
|
||||||
@ -313,9 +331,9 @@ pub fn list_snapshots (
|
|||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<SnapshotListItem>, Error> {
|
) -> Result<Vec<SnapshotListItem>, Error> {
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
@ -338,19 +356,46 @@ pub fn list_snapshots (
|
|||||||
let owner = datastore.get_owner(group)?;
|
let owner = datastore.get_owner(group)?;
|
||||||
|
|
||||||
if !list_all {
|
if !list_all {
|
||||||
if owner != username { continue; }
|
if owner != userid { continue; }
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut size = None;
|
let mut size = None;
|
||||||
|
|
||||||
let files = match get_all_snapshot_files(&datastore, &info) {
|
let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
|
||||||
Ok(files) => {
|
Ok((manifest, files)) => {
|
||||||
size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
|
size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
|
||||||
files
|
// extract the first line from notes
|
||||||
|
let comment: Option<String> = manifest.unprotected["notes"]
|
||||||
|
.as_str()
|
||||||
|
.and_then(|notes| notes.lines().next())
|
||||||
|
.map(String::from);
|
||||||
|
|
||||||
|
let verify = manifest.unprotected["verify_state"].clone();
|
||||||
|
let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
|
||||||
|
Ok(verify) => verify,
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("error parsing verification state : '{}'", err);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
(comment, verify, files)
|
||||||
},
|
},
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("error during snapshot file listing: '{}'", err);
|
eprintln!("error during snapshot file listing: '{}'", err);
|
||||||
info.files.iter().map(|x| BackupContent { filename: x.to_string(), size: None, encrypted: None }).collect()
|
(
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
info
|
||||||
|
.files
|
||||||
|
.iter()
|
||||||
|
.map(|x| BackupContent {
|
||||||
|
filename: x.to_string(),
|
||||||
|
size: None,
|
||||||
|
crypt_mode: None,
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
)
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -358,6 +403,8 @@ pub fn list_snapshots (
|
|||||||
backup_type: group.backup_type().to_string(),
|
backup_type: group.backup_type().to_string(),
|
||||||
backup_id: group.backup_id().to_string(),
|
backup_id: group.backup_id().to_string(),
|
||||||
backup_time: info.backup_dir.backup_time().timestamp(),
|
backup_time: info.backup_dir.backup_time().timestamp(),
|
||||||
|
comment,
|
||||||
|
verification,
|
||||||
files,
|
files,
|
||||||
size,
|
size,
|
||||||
owner: Some(owner),
|
owner: Some(owner),
|
||||||
@ -453,27 +500,41 @@ pub fn verify(
|
|||||||
(None, None, None) => {
|
(None, None, None) => {
|
||||||
worker_id = store.clone();
|
worker_id = store.clone();
|
||||||
}
|
}
|
||||||
_ => bail!("parameters do not spefify a backup group or snapshot"),
|
_ => bail!("parameters do not specify a backup group or snapshot"),
|
||||||
}
|
}
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"verify", Some(worker_id.clone()), &username, to_stdout, move |worker|
|
"verify",
|
||||||
{
|
Some(worker_id.clone()),
|
||||||
let success = if let Some(backup_dir) = backup_dir {
|
userid,
|
||||||
verify_backup_dir(&datastore, &backup_dir, &worker)?
|
to_stdout,
|
||||||
|
move |worker| {
|
||||||
|
let failed_dirs = if let Some(backup_dir) = backup_dir {
|
||||||
|
let mut verified_chunks = HashSet::with_capacity(1024*16);
|
||||||
|
let mut corrupt_chunks = HashSet::with_capacity(64);
|
||||||
|
let mut res = Vec::new();
|
||||||
|
if !verify_backup_dir(&datastore, &backup_dir, &mut verified_chunks, &mut corrupt_chunks, &worker)? {
|
||||||
|
res.push(backup_dir.to_string());
|
||||||
|
}
|
||||||
|
res
|
||||||
} else if let Some(backup_group) = backup_group {
|
} else if let Some(backup_group) = backup_group {
|
||||||
verify_backup_group(&datastore, &backup_group, &worker)?
|
verify_backup_group(&datastore, &backup_group, &worker)?
|
||||||
} else {
|
} else {
|
||||||
verify_all_backups(&datastore, &worker)?
|
verify_all_backups(&datastore, &worker)?
|
||||||
};
|
};
|
||||||
if !success {
|
if failed_dirs.len() > 0 {
|
||||||
bail!("verfication failed - please check the log for details");
|
worker.log("Failed to verify following snapshots:");
|
||||||
|
for dir in failed_dirs {
|
||||||
|
worker.log(format!("\t{}", dir));
|
||||||
|
}
|
||||||
|
bail!("verification failed - please check the log for details");
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
})?;
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
Ok(json!(upid_str))
|
Ok(json!(upid_str))
|
||||||
}
|
}
|
||||||
@ -523,7 +584,7 @@ macro_rules! add_common_prune_prameters {
|
|||||||
|
|
||||||
pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
|
pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
|
||||||
"Returns the list of snapshots and a flag indicating if there are kept or removed.",
|
"Returns the list of snapshots and a flag indicating if there are kept or removed.",
|
||||||
PruneListItem::API_SCHEMA
|
&PruneListItem::API_SCHEMA
|
||||||
).schema();
|
).schema();
|
||||||
|
|
||||||
const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
|
const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
|
||||||
@ -558,9 +619,9 @@ fn prune(
|
|||||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||||
|
|
||||||
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
|
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
|
||||||
|
|
||||||
@ -569,7 +630,7 @@ fn prune(
|
|||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
|
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, &group, &username)?; }
|
if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
|
||||||
|
|
||||||
let prune_options = PruneOptions {
|
let prune_options = PruneOptions {
|
||||||
keep_last: param["keep-last"].as_u64(),
|
keep_last: param["keep-last"].as_u64(),
|
||||||
@ -611,7 +672,7 @@ fn prune(
|
|||||||
|
|
||||||
|
|
||||||
// We use a WorkerTask just to have a task log, but run synchrounously
|
// We use a WorkerTask just to have a task log, but run synchrounously
|
||||||
let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
|
let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
|
||||||
|
|
||||||
let result = try_block! {
|
let result = try_block! {
|
||||||
if keep_all {
|
if keep_all {
|
||||||
@ -648,7 +709,7 @@ fn prune(
|
|||||||
}));
|
}));
|
||||||
|
|
||||||
if !(dry_run || keep) {
|
if !(dry_run || keep) {
|
||||||
datastore.remove_backup_dir(&info.backup_dir)?;
|
datastore.remove_backup_dir(&info.backup_dir, true)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -693,11 +754,15 @@ fn start_garbage_collection(
|
|||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
|
"garbage_collection",
|
||||||
{
|
Some(store.clone()),
|
||||||
|
Userid::root_userid().clone(),
|
||||||
|
to_stdout,
|
||||||
|
move |worker| {
|
||||||
worker.log(format!("starting garbage collection on store {}", store));
|
worker.log(format!("starting garbage collection on store {}", store));
|
||||||
datastore.garbage_collection(&worker)
|
datastore.garbage_collection(&worker)
|
||||||
})?;
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
Ok(json!(upid_str))
|
Ok(json!(upid_str))
|
||||||
}
|
}
|
||||||
@ -761,13 +826,13 @@ fn get_datastore_list(
|
|||||||
|
|
||||||
let (config, _digest) = datastore::config()?;
|
let (config, _digest) = datastore::config()?;
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
||||||
for (store, (_, data)) in &config.sections {
|
for (store, (_, data)) in &config.sections {
|
||||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||||
if allowed {
|
if allowed {
|
||||||
let mut entry = json!({ "store": store });
|
let mut entry = json!({ "store": store });
|
||||||
@ -812,9 +877,9 @@ fn download_file(
|
|||||||
let store = tools::required_string_param(¶m, "store")?;
|
let store = tools::required_string_param(¶m, "store")?;
|
||||||
let datastore = DataStore::lookup_datastore(store)?;
|
let datastore = DataStore::lookup_datastore(store)?;
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||||
|
|
||||||
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
||||||
|
|
||||||
@ -825,7 +890,7 @@ fn download_file(
|
|||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||||
|
|
||||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||||
|
|
||||||
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
|
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
|
||||||
|
|
||||||
@ -834,8 +899,8 @@ fn download_file(
|
|||||||
path.push(&file_name);
|
path.push(&file_name);
|
||||||
|
|
||||||
let file = tokio::fs::File::open(&path)
|
let file = tokio::fs::File::open(&path)
|
||||||
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
|
.await
|
||||||
.await?;
|
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
||||||
|
|
||||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
|
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
|
||||||
@ -885,9 +950,9 @@ fn download_file_decoded(
|
|||||||
let store = tools::required_string_param(¶m, "store")?;
|
let store = tools::required_string_param(¶m, "store")?;
|
||||||
let datastore = DataStore::lookup_datastore(store)?;
|
let datastore = DataStore::lookup_datastore(store)?;
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||||
|
|
||||||
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
||||||
|
|
||||||
@ -898,11 +963,11 @@ fn download_file_decoded(
|
|||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||||
|
|
||||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||||
|
|
||||||
let files = read_backup_index(&datastore, &backup_dir)?;
|
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
||||||
for file in files {
|
for file in files {
|
||||||
if file.filename == file_name && file.encrypted == Some(true) {
|
if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
||||||
bail!("cannot decode '{}' - is encrypted", file_name);
|
bail!("cannot decode '{}' - is encrypted", file_name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -919,8 +984,10 @@ fn download_file_decoded(
|
|||||||
"didx" => {
|
"didx" => {
|
||||||
let index = DynamicIndexReader::open(&path)
|
let index = DynamicIndexReader::open(&path)
|
||||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||||
|
let (csum, size) = index.compute_csum();
|
||||||
|
manifest.verify_file(&file_name, &csum, size)?;
|
||||||
|
|
||||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||||
Body::wrap_stream(AsyncReaderStream::new(reader)
|
Body::wrap_stream(AsyncReaderStream::new(reader)
|
||||||
.map_err(move |err| {
|
.map_err(move |err| {
|
||||||
@ -932,7 +999,10 @@ fn download_file_decoded(
|
|||||||
let index = FixedIndexReader::open(&path)
|
let index = FixedIndexReader::open(&path)
|
||||||
.map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
|
.map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
|
||||||
|
|
||||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
let (csum, size) = index.compute_csum();
|
||||||
|
manifest.verify_file(&file_name, &csum, size)?;
|
||||||
|
|
||||||
|
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||||
Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
|
Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
|
||||||
.map_err(move |err| {
|
.map_err(move |err| {
|
||||||
@ -942,7 +1012,9 @@ fn download_file_decoded(
|
|||||||
},
|
},
|
||||||
"blob" => {
|
"blob" => {
|
||||||
let file = std::fs::File::open(&path)
|
let file = std::fs::File::open(&path)
|
||||||
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))?;
|
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
||||||
|
|
||||||
|
// FIXME: load full blob to verify index checksum?
|
||||||
|
|
||||||
Body::wrap_stream(
|
Body::wrap_stream(
|
||||||
WrappedReaderStream::new(DataBlobReader::new(file, None)?)
|
WrappedReaderStream::new(DataBlobReader::new(file, None)?)
|
||||||
@ -1003,8 +1075,8 @@ fn upload_backup_log(
|
|||||||
|
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
check_backup_owner(&datastore, backup_dir.group(), &username)?;
|
check_backup_owner(&datastore, backup_dir.group(), &userid)?;
|
||||||
|
|
||||||
let mut path = datastore.base_path();
|
let mut path = datastore.base_path();
|
||||||
path.push(backup_dir.relative_path());
|
path.push(backup_dir.relative_path());
|
||||||
@ -1025,11 +1097,10 @@ fn upload_backup_log(
|
|||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let blob = DataBlob::from_raw(data)?;
|
// always verify blob/CRC at server side
|
||||||
// always verify CRC at server side
|
let blob = DataBlob::load_from_reader(&mut &data[..])?;
|
||||||
blob.verify_crc()?;
|
|
||||||
let raw_data = blob.raw_data();
|
replace_file(&path, blob.raw_data(), CreateOptions::new())?;
|
||||||
replace_file(&path, raw_data, CreateOptions::new())?;
|
|
||||||
|
|
||||||
// fixme: use correct formatter
|
// fixme: use correct formatter
|
||||||
Ok(crate::server::formatter::json_response(Ok(Value::Null)))
|
Ok(crate::server::formatter::json_response(Ok(Value::Null)))
|
||||||
@ -1074,23 +1145,35 @@ fn catalog(
|
|||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||||
|
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||||
|
|
||||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||||
|
|
||||||
|
let file_name = CATALOG_NAME;
|
||||||
|
|
||||||
|
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
||||||
|
for file in files {
|
||||||
|
if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
||||||
|
bail!("cannot decode '{}' - is encrypted", file_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let mut path = datastore.base_path();
|
let mut path = datastore.base_path();
|
||||||
path.push(backup_dir.relative_path());
|
path.push(backup_dir.relative_path());
|
||||||
path.push(CATALOG_NAME);
|
path.push(file_name);
|
||||||
|
|
||||||
let index = DynamicIndexReader::open(&path)
|
let index = DynamicIndexReader::open(&path)
|
||||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||||
|
|
||||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
let (csum, size) = index.compute_csum();
|
||||||
|
manifest.verify_file(&file_name, &csum, size)?;
|
||||||
|
|
||||||
|
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
|
||||||
let mut catalog_reader = CatalogReader::new(reader);
|
let mut catalog_reader = CatalogReader::new(reader);
|
||||||
@ -1146,7 +1229,7 @@ fn catalog(
|
|||||||
pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
|
pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
|
||||||
&ApiHandler::AsyncHttp(&pxar_file_download),
|
&ApiHandler::AsyncHttp(&pxar_file_download),
|
||||||
&ObjectSchema::new(
|
&ObjectSchema::new(
|
||||||
"Download single file from pxar file of a bacup snapshot. Only works if it's not encrypted.",
|
"Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
|
||||||
&sorted!([
|
&sorted!([
|
||||||
("store", false, &DATASTORE_SCHEMA),
|
("store", false, &DATASTORE_SCHEMA),
|
||||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||||
@ -1173,9 +1256,9 @@ fn pxar_file_download(
|
|||||||
let store = tools::required_string_param(¶m, "store")?;
|
let store = tools::required_string_param(¶m, "store")?;
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||||
|
|
||||||
let filepath = tools::required_string_param(¶m, "filepath")?.to_owned();
|
let filepath = tools::required_string_param(¶m, "filepath")?.to_owned();
|
||||||
|
|
||||||
@ -1186,10 +1269,7 @@ fn pxar_file_download(
|
|||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||||
|
|
||||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||||
|
|
||||||
let mut path = datastore.base_path();
|
|
||||||
path.push(backup_dir.relative_path());
|
|
||||||
|
|
||||||
let mut components = base64::decode(&filepath)?;
|
let mut components = base64::decode(&filepath)?;
|
||||||
if components.len() > 0 && components[0] == '/' as u8 {
|
if components.len() > 0 && components[0] == '/' as u8 {
|
||||||
@ -1197,15 +1277,26 @@ fn pxar_file_download(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut split = components.splitn(2, |c| *c == '/' as u8);
|
let mut split = components.splitn(2, |c| *c == '/' as u8);
|
||||||
let pxar_name = split.next().unwrap();
|
let pxar_name = std::str::from_utf8(split.next().unwrap())?;
|
||||||
let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
|
let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
|
||||||
|
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
||||||
|
for file in files {
|
||||||
|
if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
||||||
|
bail!("cannot decode '{}' - is encrypted", pxar_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
path.push(OsStr::from_bytes(&pxar_name));
|
let mut path = datastore.base_path();
|
||||||
|
path.push(backup_dir.relative_path());
|
||||||
|
path.push(pxar_name);
|
||||||
|
|
||||||
let index = DynamicIndexReader::open(&path)
|
let index = DynamicIndexReader::open(&path)
|
||||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||||
|
|
||||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
let (csum, size) = index.compute_csum();
|
||||||
|
manifest.verify_file(&pxar_name, &csum, size)?;
|
||||||
|
|
||||||
|
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
let archive_size = reader.archive_size();
|
let archive_size = reader.archive_size();
|
||||||
let reader = LocalDynamicReadAt::new(reader);
|
let reader = LocalDynamicReadAt::new(reader);
|
||||||
@ -1281,6 +1372,108 @@ fn get_rrd_stats(
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-type": {
|
||||||
|
schema: BACKUP_TYPE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-id": {
|
||||||
|
schema: BACKUP_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-time": {
|
||||||
|
schema: BACKUP_TIME_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get "notes" for a specific backup
|
||||||
|
fn get_notes(
|
||||||
|
store: String,
|
||||||
|
backup_type: String,
|
||||||
|
backup_id: String,
|
||||||
|
backup_time: i64,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||||
|
|
||||||
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||||
|
|
||||||
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||||
|
|
||||||
|
let manifest = datastore.load_manifest_json(&backup_dir)?;
|
||||||
|
|
||||||
|
let notes = manifest["unprotected"]["notes"]
|
||||||
|
.as_str()
|
||||||
|
.unwrap_or("");
|
||||||
|
|
||||||
|
Ok(String::from(notes))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-type": {
|
||||||
|
schema: BACKUP_TYPE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-id": {
|
||||||
|
schema: BACKUP_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-time": {
|
||||||
|
schema: BACKUP_TIME_SCHEMA,
|
||||||
|
},
|
||||||
|
notes: {
|
||||||
|
description: "A multiline text.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Set "notes" for a specific backup
|
||||||
|
fn set_notes(
|
||||||
|
store: String,
|
||||||
|
backup_type: String,
|
||||||
|
backup_id: String,
|
||||||
|
backup_time: i64,
|
||||||
|
notes: String,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||||
|
|
||||||
|
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||||
|
|
||||||
|
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||||
|
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||||
|
|
||||||
|
let mut manifest = datastore.load_manifest_json(&backup_dir)?;
|
||||||
|
|
||||||
|
manifest["unprotected"]["notes"] = notes.into();
|
||||||
|
|
||||||
|
datastore.store_manifest(&backup_dir, manifest)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[sortable]
|
#[sortable]
|
||||||
const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||||
(
|
(
|
||||||
@ -1314,6 +1507,12 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
|||||||
&Router::new()
|
&Router::new()
|
||||||
.get(&API_METHOD_LIST_GROUPS)
|
.get(&API_METHOD_LIST_GROUPS)
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
"notes",
|
||||||
|
&Router::new()
|
||||||
|
.get(&API_METHOD_GET_NOTES)
|
||||||
|
.put(&API_METHOD_SET_NOTES)
|
||||||
|
),
|
||||||
(
|
(
|
||||||
"prune",
|
"prune",
|
||||||
&Router::new()
|
&Router::new()
|
||||||
|
@ -1,15 +1,15 @@
|
|||||||
use anyhow::{Error};
|
use anyhow::{format_err, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
use proxmox::{list_subdirs_api_method, sortable};
|
use proxmox::{list_subdirs_api_method, sortable};
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::api2::pull::{get_pull_parameters};
|
use crate::api2::pull::do_sync_job;
|
||||||
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
|
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
|
||||||
use crate::server::{self, TaskListInfo, WorkerTask};
|
use crate::server::UPID;
|
||||||
|
use crate::config::jobstate::{Job, JobState};
|
||||||
use crate::tools::systemd::time::{
|
use crate::tools::systemd::time::{
|
||||||
parse_calendar_event, compute_next_event};
|
parse_calendar_event, compute_next_event};
|
||||||
|
|
||||||
@ -33,33 +33,26 @@ pub fn list_sync_jobs(
|
|||||||
|
|
||||||
let mut list: Vec<SyncJobStatus> = config.convert_to_typed_array("sync")?;
|
let mut list: Vec<SyncJobStatus> = config.convert_to_typed_array("sync")?;
|
||||||
|
|
||||||
let mut last_tasks: HashMap<String, &TaskListInfo> = HashMap::new();
|
|
||||||
let tasks = server::read_task_list()?;
|
|
||||||
|
|
||||||
for info in tasks.iter() {
|
|
||||||
let worker_id = match &info.upid.worker_id {
|
|
||||||
Some(id) => id,
|
|
||||||
_ => { continue; },
|
|
||||||
};
|
|
||||||
if let Some(last) = last_tasks.get(worker_id) {
|
|
||||||
if last.upid.starttime < info.upid.starttime {
|
|
||||||
last_tasks.insert(worker_id.to_string(), &info);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
last_tasks.insert(worker_id.to_string(), &info);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for job in &mut list {
|
for job in &mut list {
|
||||||
let mut last = 0;
|
let last_state = JobState::load("syncjob", &job.id)
|
||||||
if let Some(task) = last_tasks.get(&job.id) {
|
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
||||||
job.last_run_upid = Some(task.upid_str.clone());
|
let (upid, endtime, state, starttime) = match last_state {
|
||||||
if let Some((endtime, status)) = &task.state {
|
JobState::Created { time } => (None, None, None, time),
|
||||||
job.last_run_state = Some(String::from(status));
|
JobState::Started { upid } => {
|
||||||
job.last_run_endtime = Some(*endtime);
|
let parsed_upid: UPID = upid.parse()?;
|
||||||
last = *endtime;
|
(Some(upid), None, None, parsed_upid.starttime)
|
||||||
}
|
},
|
||||||
}
|
JobState::Finished { upid, state } => {
|
||||||
|
let parsed_upid: UPID = upid.parse()?;
|
||||||
|
(Some(upid), Some(state.endtime()), Some(state.to_string()), parsed_upid.starttime)
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
job.last_run_upid = upid;
|
||||||
|
job.last_run_state = state;
|
||||||
|
job.last_run_endtime = endtime;
|
||||||
|
|
||||||
|
let last = job.last_run_endtime.unwrap_or_else(|| starttime);
|
||||||
|
|
||||||
job.next_run = (|| -> Option<i64> {
|
job.next_run = (|| -> Option<i64> {
|
||||||
let schedule = job.schedule.as_ref()?;
|
let schedule = job.schedule.as_ref()?;
|
||||||
@ -83,7 +76,7 @@ pub fn list_sync_jobs(
|
|||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
/// Runs the sync jobs manually.
|
/// Runs the sync jobs manually.
|
||||||
async fn run_sync_job(
|
fn run_sync_job(
|
||||||
id: String,
|
id: String,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
@ -92,21 +85,11 @@ async fn run_sync_job(
|
|||||||
let (config, _digest) = sync::config()?;
|
let (config, _digest) = sync::config()?;
|
||||||
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
|
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
|
||||||
let delete = sync_job.remove_vanished.unwrap_or(true);
|
let job = Job::new("syncjob", &id)?;
|
||||||
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
|
|
||||||
|
|
||||||
let upid_str = WorkerTask::spawn("syncjob", Some(id.clone()), &username.clone(), false, move |worker| async move {
|
let upid_str = do_sync_job(job, sync_job, &userid, None)?;
|
||||||
|
|
||||||
worker.log(format!("sync job '{}' start", &id));
|
|
||||||
|
|
||||||
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, String::from("backup@pam")).await?;
|
|
||||||
|
|
||||||
worker.log(format!("sync job '{}' end", &id));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(upid_str)
|
Ok(upid_str)
|
||||||
}
|
}
|
||||||
|
@ -16,6 +16,7 @@ use crate::backup::*;
|
|||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::config::acl::PRIV_DATASTORE_BACKUP;
|
use crate::config::acl::PRIV_DATASTORE_BACKUP;
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
|
use crate::tools::fs::lock_dir_noblock;
|
||||||
|
|
||||||
mod environment;
|
mod environment;
|
||||||
use environment::*;
|
use environment::*;
|
||||||
@ -56,12 +57,12 @@ fn upgrade_to_backup_protocol(
|
|||||||
async move {
|
async move {
|
||||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
|
||||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||||
|
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
@ -88,30 +89,36 @@ async move {
|
|||||||
let env_type = rpcenv.env_type();
|
let env_type = rpcenv.env_type();
|
||||||
|
|
||||||
let backup_group = BackupGroup::new(backup_type, backup_id);
|
let backup_group = BackupGroup::new(backup_type, backup_id);
|
||||||
let owner = datastore.create_backup_group(&backup_group, &username)?;
|
|
||||||
|
// lock backup group to only allow one backup per group at a time
|
||||||
|
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &userid)?;
|
||||||
|
|
||||||
// permission check
|
// permission check
|
||||||
if owner != username { // only the owner is allowed to create additional snapshots
|
if owner != userid { // only the owner is allowed to create additional snapshots
|
||||||
bail!("backup owner check failed ({} != {})", username, owner);
|
bail!("backup owner check failed ({} != {})", userid, owner);
|
||||||
}
|
}
|
||||||
|
|
||||||
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group).unwrap_or(None);
|
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None);
|
||||||
let backup_dir = BackupDir::new_with_group(backup_group, backup_time);
|
let backup_dir = BackupDir::new_with_group(backup_group.clone(), backup_time);
|
||||||
|
|
||||||
if let Some(last) = &last_backup {
|
let _last_guard = if let Some(last) = &last_backup {
|
||||||
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
|
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
|
||||||
bail!("backup timestamp is older than last backup.");
|
bail!("backup timestamp is older than last backup.");
|
||||||
}
|
}
|
||||||
// fixme: abort if last backup is still running - howto test?
|
|
||||||
// Idea: write upid into a file inside snapshot dir. then test if
|
|
||||||
// it is still running here.
|
|
||||||
}
|
|
||||||
|
|
||||||
let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
|
// lock last snapshot to prevent forgetting/pruning it during backup
|
||||||
|
let full_path = datastore.snapshot_path(&last.backup_dir);
|
||||||
|
Some(lock_dir_noblock(&full_path, "snapshot", "base snapshot is already locked by another operation")?)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let (path, is_new, _snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
|
||||||
if !is_new { bail!("backup directory already exists."); }
|
if !is_new { bail!("backup directory already exists."); }
|
||||||
|
|
||||||
WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| {
|
WorkerTask::spawn("backup", Some(worker_id), userid.clone(), true, move |worker| {
|
||||||
let mut env = BackupEnvironment::new(
|
let mut env = BackupEnvironment::new(
|
||||||
env_type, username.clone(), worker.clone(), datastore, backup_dir);
|
env_type, userid, worker.clone(), datastore, backup_dir);
|
||||||
|
|
||||||
env.debug = debug;
|
env.debug = debug;
|
||||||
env.last_backup = last_backup;
|
env.last_backup = last_backup;
|
||||||
@ -144,6 +151,11 @@ async move {
|
|||||||
.map(|_| Err(format_err!("task aborted")));
|
.map(|_| Err(format_err!("task aborted")));
|
||||||
|
|
||||||
async move {
|
async move {
|
||||||
|
// keep flock until task ends
|
||||||
|
let _group_guard = _group_guard;
|
||||||
|
let _snap_guard = _snap_guard;
|
||||||
|
let _last_guard = _last_guard;
|
||||||
|
|
||||||
let res = select!{
|
let res = select!{
|
||||||
req = req_fut => req,
|
req = req_fut => req,
|
||||||
abrt = abort_future => abrt,
|
abrt = abort_future => abrt,
|
||||||
|
@ -1,18 +1,21 @@
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use ::serde::{Serialize};
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
use proxmox::tools::digest_to_hex;
|
use proxmox::tools::digest_to_hex;
|
||||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||||
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
||||||
|
|
||||||
use crate::server::WorkerTask;
|
use crate::api2::types::Userid;
|
||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
|
use crate::server::WorkerTask;
|
||||||
use crate::server::formatter::*;
|
use crate::server::formatter::*;
|
||||||
use hyper::{Body, Response};
|
use hyper::{Body, Response};
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, Serialize)]
|
||||||
struct UploadStatistic {
|
struct UploadStatistic {
|
||||||
count: u64,
|
count: u64,
|
||||||
size: u64,
|
size: u64,
|
||||||
@ -31,6 +34,19 @@ impl UploadStatistic {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl std::ops::Add for UploadStatistic {
|
||||||
|
type Output = Self;
|
||||||
|
|
||||||
|
fn add(self, other: Self) -> Self {
|
||||||
|
Self {
|
||||||
|
count: self.count + other.count,
|
||||||
|
size: self.size + other.size,
|
||||||
|
compressed_size: self.compressed_size + other.compressed_size,
|
||||||
|
duplicates: self.duplicates + other.duplicates,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
struct DynamicWriterState {
|
struct DynamicWriterState {
|
||||||
name: String,
|
name: String,
|
||||||
index: DynamicIndexWriter,
|
index: DynamicIndexWriter,
|
||||||
@ -57,6 +73,8 @@ struct SharedBackupState {
|
|||||||
dynamic_writers: HashMap<usize, DynamicWriterState>,
|
dynamic_writers: HashMap<usize, DynamicWriterState>,
|
||||||
fixed_writers: HashMap<usize, FixedWriterState>,
|
fixed_writers: HashMap<usize, FixedWriterState>,
|
||||||
known_chunks: HashMap<[u8;32], u32>,
|
known_chunks: HashMap<[u8;32], u32>,
|
||||||
|
backup_size: u64, // sums up size of all files
|
||||||
|
backup_stat: UploadStatistic,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SharedBackupState {
|
impl SharedBackupState {
|
||||||
@ -82,7 +100,7 @@ impl SharedBackupState {
|
|||||||
pub struct BackupEnvironment {
|
pub struct BackupEnvironment {
|
||||||
env_type: RpcEnvironmentType,
|
env_type: RpcEnvironmentType,
|
||||||
result_attributes: Value,
|
result_attributes: Value,
|
||||||
user: String,
|
user: Userid,
|
||||||
pub debug: bool,
|
pub debug: bool,
|
||||||
pub formatter: &'static OutputFormatter,
|
pub formatter: &'static OutputFormatter,
|
||||||
pub worker: Arc<WorkerTask>,
|
pub worker: Arc<WorkerTask>,
|
||||||
@ -95,7 +113,7 @@ pub struct BackupEnvironment {
|
|||||||
impl BackupEnvironment {
|
impl BackupEnvironment {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
env_type: RpcEnvironmentType,
|
env_type: RpcEnvironmentType,
|
||||||
user: String,
|
user: Userid,
|
||||||
worker: Arc<WorkerTask>,
|
worker: Arc<WorkerTask>,
|
||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
backup_dir: BackupDir,
|
backup_dir: BackupDir,
|
||||||
@ -108,6 +126,8 @@ impl BackupEnvironment {
|
|||||||
dynamic_writers: HashMap::new(),
|
dynamic_writers: HashMap::new(),
|
||||||
fixed_writers: HashMap::new(),
|
fixed_writers: HashMap::new(),
|
||||||
known_chunks: HashMap::new(),
|
known_chunks: HashMap::new(),
|
||||||
|
backup_size: 0,
|
||||||
|
backup_stat: UploadStatistic::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
@ -353,7 +373,6 @@ impl BackupEnvironment {
|
|||||||
|
|
||||||
let expected_csum = data.index.close()?;
|
let expected_csum = data.index.close()?;
|
||||||
|
|
||||||
println!("server checksum {:?} client: {:?}", expected_csum, csum);
|
|
||||||
if csum != expected_csum {
|
if csum != expected_csum {
|
||||||
bail!("dynamic writer '{}' close failed - got unexpected checksum", data.name);
|
bail!("dynamic writer '{}' close failed - got unexpected checksum", data.name);
|
||||||
}
|
}
|
||||||
@ -361,6 +380,8 @@ impl BackupEnvironment {
|
|||||||
self.log_upload_stat(&data.name, &csum, &uuid, size, chunk_count, &data.upload_stat);
|
self.log_upload_stat(&data.name, &csum, &uuid, size, chunk_count, &data.upload_stat);
|
||||||
|
|
||||||
state.file_counter += 1;
|
state.file_counter += 1;
|
||||||
|
state.backup_size += size;
|
||||||
|
state.backup_stat = state.backup_stat + data.upload_stat;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -395,7 +416,6 @@ impl BackupEnvironment {
|
|||||||
let uuid = data.index.uuid;
|
let uuid = data.index.uuid;
|
||||||
let expected_csum = data.index.close()?;
|
let expected_csum = data.index.close()?;
|
||||||
|
|
||||||
println!("server checksum: {:?} client: {:?} (incremental: {})", expected_csum, csum, data.incremental);
|
|
||||||
if csum != expected_csum {
|
if csum != expected_csum {
|
||||||
bail!("fixed writer '{}' close failed - got unexpected checksum", data.name);
|
bail!("fixed writer '{}' close failed - got unexpected checksum", data.name);
|
||||||
}
|
}
|
||||||
@ -403,6 +423,8 @@ impl BackupEnvironment {
|
|||||||
self.log_upload_stat(&data.name, &expected_csum, &uuid, size, chunk_count, &data.upload_stat);
|
self.log_upload_stat(&data.name, &expected_csum, &uuid, size, chunk_count, &data.upload_stat);
|
||||||
|
|
||||||
state.file_counter += 1;
|
state.file_counter += 1;
|
||||||
|
state.backup_size += size;
|
||||||
|
state.backup_stat = state.backup_stat + data.upload_stat;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -416,9 +438,8 @@ impl BackupEnvironment {
|
|||||||
let blob_len = data.len();
|
let blob_len = data.len();
|
||||||
let orig_len = data.len(); // fixme:
|
let orig_len = data.len(); // fixme:
|
||||||
|
|
||||||
let blob = DataBlob::from_raw(data)?;
|
// always verify blob/CRC at server side
|
||||||
// always verify CRC at server side
|
let blob = DataBlob::load_from_reader(&mut &data[..])?;
|
||||||
blob.verify_crc()?;
|
|
||||||
|
|
||||||
let raw_data = blob.raw_data();
|
let raw_data = blob.raw_data();
|
||||||
replace_file(&path, raw_data, CreateOptions::new())?;
|
replace_file(&path, raw_data, CreateOptions::new())?;
|
||||||
@ -427,6 +448,8 @@ impl BackupEnvironment {
|
|||||||
|
|
||||||
let mut state = self.state.lock().unwrap();
|
let mut state = self.state.lock().unwrap();
|
||||||
state.file_counter += 1;
|
state.file_counter += 1;
|
||||||
|
state.backup_size += orig_len as u64;
|
||||||
|
state.backup_stat.size += blob_len as u64;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -446,6 +469,28 @@ impl BackupEnvironment {
|
|||||||
bail!("backup does not contain valid files (file count == 0)");
|
bail!("backup does not contain valid files (file count == 0)");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check manifest
|
||||||
|
let mut manifest = self.datastore.load_manifest_json(&self.backup_dir)
|
||||||
|
.map_err(|err| format_err!("unable to load manifest blob - {}", err))?;
|
||||||
|
|
||||||
|
let stats = serde_json::to_value(state.backup_stat)?;
|
||||||
|
|
||||||
|
manifest["unprotected"]["chunk_upload_stats"] = stats;
|
||||||
|
|
||||||
|
self.datastore.store_manifest(&self.backup_dir, manifest)
|
||||||
|
.map_err(|err| format_err!("unable to store manifest blob - {}", err))?;
|
||||||
|
|
||||||
|
if let Some(base) = &self.last_backup {
|
||||||
|
let path = self.datastore.snapshot_path(&base.backup_dir);
|
||||||
|
if !path.exists() {
|
||||||
|
bail!(
|
||||||
|
"base snapshot {} was removed during backup, cannot finish as chunks might be missing",
|
||||||
|
base.backup_dir
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// marks the backup as successful
|
||||||
state.finished = true;
|
state.finished = true;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -480,7 +525,7 @@ impl BackupEnvironment {
|
|||||||
let mut state = self.state.lock().unwrap();
|
let mut state = self.state.lock().unwrap();
|
||||||
state.finished = true;
|
state.finished = true;
|
||||||
|
|
||||||
self.datastore.remove_backup_dir(&self.backup_dir)?;
|
self.datastore.remove_backup_dir(&self.backup_dir, true)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -505,7 +550,7 @@ impl RpcEnvironment for BackupEnvironment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_user(&self) -> Option<String> {
|
fn get_user(&self) -> Option<String> {
|
||||||
Some(self.user.clone())
|
Some(self.user.to_string())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -243,7 +243,7 @@ pub const API_METHOD_UPLOAD_BLOB: ApiMethod = ApiMethod::new(
|
|||||||
&sorted!([
|
&sorted!([
|
||||||
("file-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA),
|
("file-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA),
|
||||||
("encoded-size", false, &IntegerSchema::new("Encoded blob size.")
|
("encoded-size", false, &IntegerSchema::new("Encoded blob size.")
|
||||||
.minimum((std::mem::size_of::<DataBlobHeader>() as isize) +1)
|
.minimum(std::mem::size_of::<DataBlobHeader>() as isize)
|
||||||
.maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize))
|
.maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize))
|
||||||
.schema()
|
.schema()
|
||||||
)
|
)
|
||||||
|
@ -5,6 +5,7 @@ use serde_json::Value;
|
|||||||
use ::serde::{Deserialize, Serialize};
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||||
|
use proxmox::tools::fs::open_file_locked;
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
@ -99,7 +100,7 @@ pub fn list_datastores(
|
|||||||
/// Create new datastore config.
|
/// Create new datastore config.
|
||||||
pub fn create_datastore(param: Value) -> Result<(), Error> {
|
pub fn create_datastore(param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||||
|
|
||||||
let datastore: datastore::DataStoreConfig = serde_json::from_value(param.clone())?;
|
let datastore: datastore::DataStoreConfig = serde_json::from_value(param.clone())?;
|
||||||
|
|
||||||
@ -253,7 +254,7 @@ pub fn update_datastore(
|
|||||||
digest: Option<String>,
|
digest: Option<String>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||||
|
|
||||||
// pass/compare digest
|
// pass/compare digest
|
||||||
let (mut config, expected_digest) = datastore::config()?;
|
let (mut config, expected_digest) = datastore::config()?;
|
||||||
@ -327,7 +328,7 @@ pub fn update_datastore(
|
|||||||
/// Remove a datastore configuration.
|
/// Remove a datastore configuration.
|
||||||
pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Error> {
|
pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = datastore::config()?;
|
let (mut config, expected_digest) = datastore::config()?;
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@ use ::serde::{Deserialize, Serialize};
|
|||||||
use base64;
|
use base64;
|
||||||
|
|
||||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||||
|
use proxmox::tools::fs::open_file_locked;
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::config::remote;
|
use crate::config::remote;
|
||||||
@ -60,7 +61,7 @@ pub fn list_remotes(
|
|||||||
schema: DNS_NAME_OR_IP_SCHEMA,
|
schema: DNS_NAME_OR_IP_SCHEMA,
|
||||||
},
|
},
|
||||||
userid: {
|
userid: {
|
||||||
schema: PROXMOX_USER_ID_SCHEMA,
|
type: Userid,
|
||||||
},
|
},
|
||||||
password: {
|
password: {
|
||||||
schema: remote::REMOTE_PASSWORD_SCHEMA,
|
schema: remote::REMOTE_PASSWORD_SCHEMA,
|
||||||
@ -78,7 +79,7 @@ pub fn list_remotes(
|
|||||||
/// Create new remote.
|
/// Create new remote.
|
||||||
pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
|
pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = crate::tools::open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||||
|
|
||||||
let mut data = param.clone();
|
let mut data = param.clone();
|
||||||
data["password"] = Value::from(base64::encode(password.as_bytes()));
|
data["password"] = Value::from(base64::encode(password.as_bytes()));
|
||||||
@ -154,7 +155,7 @@ pub enum DeletableProperty {
|
|||||||
},
|
},
|
||||||
userid: {
|
userid: {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: PROXMOX_USER_ID_SCHEMA,
|
type: Userid,
|
||||||
},
|
},
|
||||||
password: {
|
password: {
|
||||||
optional: true,
|
optional: true,
|
||||||
@ -187,14 +188,14 @@ pub fn update_remote(
|
|||||||
name: String,
|
name: String,
|
||||||
comment: Option<String>,
|
comment: Option<String>,
|
||||||
host: Option<String>,
|
host: Option<String>,
|
||||||
userid: Option<String>,
|
userid: Option<Userid>,
|
||||||
password: Option<String>,
|
password: Option<String>,
|
||||||
fingerprint: Option<String>,
|
fingerprint: Option<String>,
|
||||||
delete: Option<Vec<DeletableProperty>>,
|
delete: Option<Vec<DeletableProperty>>,
|
||||||
digest: Option<String>,
|
digest: Option<String>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = crate::tools::open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = remote::config()?;
|
let (mut config, expected_digest) = remote::config()?;
|
||||||
|
|
||||||
@ -255,7 +256,7 @@ pub fn update_remote(
|
|||||||
/// Remove a remote from the configuration file.
|
/// Remove a remote from the configuration file.
|
||||||
pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
|
pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = crate::tools::open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = remote::config()?;
|
let (mut config, expected_digest) = remote::config()?;
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@ use serde_json::Value;
|
|||||||
use ::serde::{Deserialize, Serialize};
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::{api, Router, RpcEnvironment};
|
use proxmox::api::{api, Router, RpcEnvironment};
|
||||||
|
use proxmox::tools::fs::open_file_locked;
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::config::sync::{self, SyncJobConfig};
|
use crate::config::sync::{self, SyncJobConfig};
|
||||||
@ -68,7 +69,7 @@ pub fn list_sync_jobs(
|
|||||||
/// Create a new sync job.
|
/// Create a new sync job.
|
||||||
pub fn create_sync_job(param: Value) -> Result<(), Error> {
|
pub fn create_sync_job(param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||||
|
|
||||||
let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
|
let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
|
||||||
|
|
||||||
@ -82,6 +83,8 @@ pub fn create_sync_job(param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
sync::save_config(&config)?;
|
sync::save_config(&config)?;
|
||||||
|
|
||||||
|
crate::config::jobstate::create_state_file("syncjob", &sync_job.id)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -184,7 +187,7 @@ pub fn update_sync_job(
|
|||||||
digest: Option<String>,
|
digest: Option<String>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||||
|
|
||||||
// pass/compare digest
|
// pass/compare digest
|
||||||
let (mut config, expected_digest) = sync::config()?;
|
let (mut config, expected_digest) = sync::config()?;
|
||||||
@ -247,7 +250,7 @@ pub fn update_sync_job(
|
|||||||
/// Remove a sync job configuration
|
/// Remove a sync job configuration
|
||||||
pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error> {
|
pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = sync::config()?;
|
let (mut config, expected_digest) = sync::config()?;
|
||||||
|
|
||||||
@ -263,6 +266,8 @@ pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error>
|
|||||||
|
|
||||||
sync::save_config(&config)?;
|
sync::save_config(&config)?;
|
||||||
|
|
||||||
|
crate::config::jobstate::remove_state_file("syncjob", &id)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,13 +1,19 @@
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
use futures::*;
|
use futures::stream::TryStreamExt;
|
||||||
use hyper::{Body, Response, StatusCode, header};
|
use hyper::{Body, Response, StatusCode, header};
|
||||||
use proxmox::http_err;
|
|
||||||
|
use proxmox::http_bail;
|
||||||
|
|
||||||
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
|
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
|
||||||
let file = tokio::fs::File::open(path.clone())
|
let file = match tokio::fs::File::open(path.clone()).await {
|
||||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path.clone(), err)))
|
Ok(file) => file,
|
||||||
.await?;
|
Err(ref err) if err.kind() == std::io::ErrorKind::NotFound => {
|
||||||
|
http_bail!(NOT_FOUND, "open file {:?} failed - not found", path);
|
||||||
|
}
|
||||||
|
Err(err) => http_bail!(BAD_REQUEST, "open file {:?} failed: {}", path, err),
|
||||||
|
};
|
||||||
|
|
||||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||||
|
312
src/api2/node.rs
312
src/api2/node.rs
@ -1,18 +1,308 @@
|
|||||||
use proxmox::api::router::{Router, SubdirMap};
|
use std::net::TcpListener;
|
||||||
use proxmox::list_subdirs_api_method;
|
use std::os::unix::io::AsRawFd;
|
||||||
|
|
||||||
pub mod tasks;
|
use anyhow::{bail, format_err, Error};
|
||||||
mod time;
|
use futures::future::{FutureExt, TryFutureExt};
|
||||||
pub mod network;
|
use hyper::body::Body;
|
||||||
|
use hyper::http::request::Parts;
|
||||||
|
use hyper::upgrade::Upgraded;
|
||||||
|
use nix::fcntl::{fcntl, FcntlArg, FdFlag};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
use tokio::io::{AsyncBufReadExt, BufReader};
|
||||||
|
|
||||||
|
use proxmox::api::router::{Router, SubdirMap};
|
||||||
|
use proxmox::api::{
|
||||||
|
api, schema::*, ApiHandler, ApiMethod, ApiResponseFuture, Permission, RpcEnvironment,
|
||||||
|
};
|
||||||
|
use proxmox::list_subdirs_api_method;
|
||||||
|
use proxmox::tools::websocket::WebSocket;
|
||||||
|
use proxmox::{identity, sortable};
|
||||||
|
|
||||||
|
use crate::api2::types::*;
|
||||||
|
use crate::config::acl::PRIV_SYS_CONSOLE;
|
||||||
|
use crate::server::WorkerTask;
|
||||||
|
use crate::tools;
|
||||||
|
use crate::tools::ticket::{self, Empty, Ticket};
|
||||||
|
|
||||||
|
pub mod disks;
|
||||||
pub mod dns;
|
pub mod dns;
|
||||||
mod syslog;
|
pub mod network;
|
||||||
|
pub mod tasks;
|
||||||
|
|
||||||
|
pub(crate) mod rrd;
|
||||||
|
|
||||||
|
mod apt;
|
||||||
mod journal;
|
mod journal;
|
||||||
mod services;
|
mod services;
|
||||||
mod status;
|
mod status;
|
||||||
pub(crate) mod rrd;
|
mod subscription;
|
||||||
pub mod disks;
|
mod syslog;
|
||||||
|
mod time;
|
||||||
|
|
||||||
|
pub const SHELL_CMD_SCHEMA: Schema = StringSchema::new("The command to run.")
|
||||||
|
.format(&ApiStringFormat::Enum(&[
|
||||||
|
EnumEntry::new("login", "Login"),
|
||||||
|
EnumEntry::new("upgrade", "Upgrade"),
|
||||||
|
]))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
cmd: {
|
||||||
|
schema: SHELL_CMD_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: Object,
|
||||||
|
description: "Object with the user, ticket, port and upid",
|
||||||
|
properties: {
|
||||||
|
user: {
|
||||||
|
description: "",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
ticket: {
|
||||||
|
description: "",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
port: {
|
||||||
|
description: "",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
upid: {
|
||||||
|
description: "",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
description: "Restricted to users on realm 'pam'",
|
||||||
|
permission: &Permission::Privilege(&["system"], PRIV_SYS_CONSOLE, false),
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Call termproxy and return shell ticket
|
||||||
|
async fn termproxy(
|
||||||
|
cmd: Option<String>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
let userid: Userid = rpcenv
|
||||||
|
.get_user()
|
||||||
|
.ok_or_else(|| format_err!("unknown user"))?
|
||||||
|
.parse()?;
|
||||||
|
|
||||||
|
if userid.realm() != "pam" {
|
||||||
|
bail!("only pam users can use the console");
|
||||||
|
}
|
||||||
|
|
||||||
|
let path = "/system";
|
||||||
|
|
||||||
|
// use port 0 and let the kernel decide which port is free
|
||||||
|
let listener = TcpListener::bind("localhost:0")?;
|
||||||
|
let port = listener.local_addr()?.port();
|
||||||
|
|
||||||
|
let ticket = Ticket::new(ticket::TERM_PREFIX, &Empty)?
|
||||||
|
.sign(
|
||||||
|
crate::auth_helpers::private_auth_key(),
|
||||||
|
Some(&ticket::term_aad(&userid, &path, port)),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut command = Vec::new();
|
||||||
|
match cmd.as_ref().map(|x| x.as_str()) {
|
||||||
|
Some("login") | None => {
|
||||||
|
command.push("login");
|
||||||
|
if userid == "root@pam" {
|
||||||
|
command.push("-f");
|
||||||
|
command.push("root");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some("upgrade") => {
|
||||||
|
if userid != "root@pam" {
|
||||||
|
bail!("only root@pam can upgrade");
|
||||||
|
}
|
||||||
|
// TODO: add nicer/safer wrapper like in PVE instead
|
||||||
|
command.push("sh");
|
||||||
|
command.push("-c");
|
||||||
|
command.push("apt full-upgrade; bash -l");
|
||||||
|
}
|
||||||
|
_ => bail!("invalid command"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let username = userid.name().to_owned();
|
||||||
|
let upid = WorkerTask::spawn(
|
||||||
|
"termproxy",
|
||||||
|
None,
|
||||||
|
userid,
|
||||||
|
false,
|
||||||
|
move |worker| async move {
|
||||||
|
// move inside the worker so that it survives and does not close the port
|
||||||
|
// remove CLOEXEC from listenere so that we can reuse it in termproxy
|
||||||
|
let fd = listener.as_raw_fd();
|
||||||
|
let mut flags = match fcntl(fd, FcntlArg::F_GETFD) {
|
||||||
|
Ok(bits) => FdFlag::from_bits_truncate(bits),
|
||||||
|
Err(err) => bail!("could not get fd: {}", err),
|
||||||
|
};
|
||||||
|
flags.remove(FdFlag::FD_CLOEXEC);
|
||||||
|
if let Err(err) = fcntl(fd, FcntlArg::F_SETFD(flags)) {
|
||||||
|
bail!("could not set fd: {}", err);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut arguments: Vec<&str> = Vec::new();
|
||||||
|
let fd_string = fd.to_string();
|
||||||
|
arguments.push(&fd_string);
|
||||||
|
arguments.extend_from_slice(&[
|
||||||
|
"--path",
|
||||||
|
&path,
|
||||||
|
"--perm",
|
||||||
|
"Sys.Console",
|
||||||
|
"--authport",
|
||||||
|
"82",
|
||||||
|
"--port-as-fd",
|
||||||
|
"--",
|
||||||
|
]);
|
||||||
|
arguments.extend_from_slice(&command);
|
||||||
|
|
||||||
|
let mut cmd = tokio::process::Command::new("/usr/bin/termproxy");
|
||||||
|
|
||||||
|
cmd.args(&arguments)
|
||||||
|
.stdout(std::process::Stdio::piped())
|
||||||
|
.stderr(std::process::Stdio::piped());
|
||||||
|
|
||||||
|
let mut child = cmd.spawn().expect("error executing termproxy");
|
||||||
|
|
||||||
|
let stdout = child.stdout.take().expect("no child stdout handle");
|
||||||
|
let stderr = child.stderr.take().expect("no child stderr handle");
|
||||||
|
|
||||||
|
let worker_stdout = worker.clone();
|
||||||
|
let stdout_fut = async move {
|
||||||
|
let mut reader = BufReader::new(stdout).lines();
|
||||||
|
while let Some(line) = reader.next_line().await? {
|
||||||
|
worker_stdout.log(line);
|
||||||
|
}
|
||||||
|
Ok::<(), Error>(())
|
||||||
|
};
|
||||||
|
|
||||||
|
let worker_stderr = worker.clone();
|
||||||
|
let stderr_fut = async move {
|
||||||
|
let mut reader = BufReader::new(stderr).lines();
|
||||||
|
while let Some(line) = reader.next_line().await? {
|
||||||
|
worker_stderr.warn(line);
|
||||||
|
}
|
||||||
|
Ok::<(), Error>(())
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut needs_kill = false;
|
||||||
|
let res = tokio::select!{
|
||||||
|
res = &mut child => {
|
||||||
|
let exit_code = res?;
|
||||||
|
if !exit_code.success() {
|
||||||
|
match exit_code.code() {
|
||||||
|
Some(code) => bail!("termproxy exited with {}", code),
|
||||||
|
None => bail!("termproxy exited by signal"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
res = stdout_fut => res,
|
||||||
|
res = stderr_fut => res,
|
||||||
|
res = worker.abort_future() => {
|
||||||
|
needs_kill = true;
|
||||||
|
res.map_err(Error::from)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if needs_kill {
|
||||||
|
if res.is_ok() {
|
||||||
|
child.kill()?;
|
||||||
|
child.await?;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(err) = child.kill() {
|
||||||
|
worker.warn(format!("error killing termproxy: {}", err));
|
||||||
|
} else if let Err(err) = child.await {
|
||||||
|
worker.warn(format!("error awaiting termproxy: {}", err));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// FIXME: We're returning the user NAME only?
|
||||||
|
Ok(json!({
|
||||||
|
"user": username,
|
||||||
|
"ticket": ticket,
|
||||||
|
"port": port,
|
||||||
|
"upid": upid,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[sortable]
|
||||||
|
pub const API_METHOD_WEBSOCKET: ApiMethod = ApiMethod::new(
|
||||||
|
&ApiHandler::AsyncHttp(&upgrade_to_websocket),
|
||||||
|
&ObjectSchema::new(
|
||||||
|
"Upgraded to websocket",
|
||||||
|
&sorted!([
|
||||||
|
("node", false, &NODE_SCHEMA),
|
||||||
|
(
|
||||||
|
"vncticket",
|
||||||
|
false,
|
||||||
|
&StringSchema::new("Terminal ticket").schema()
|
||||||
|
),
|
||||||
|
("port", false, &IntegerSchema::new("Terminal port").schema()),
|
||||||
|
]),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.access(
|
||||||
|
Some("The user needs Sys.Console on /system."),
|
||||||
|
&Permission::Privilege(&["system"], PRIV_SYS_CONSOLE, false),
|
||||||
|
);
|
||||||
|
|
||||||
|
fn upgrade_to_websocket(
|
||||||
|
parts: Parts,
|
||||||
|
req_body: Body,
|
||||||
|
param: Value,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
rpcenv: Box<dyn RpcEnvironment>,
|
||||||
|
) -> ApiResponseFuture {
|
||||||
|
async move {
|
||||||
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
let ticket = tools::required_string_param(¶m, "vncticket")?;
|
||||||
|
let port: u16 = tools::required_integer_param(¶m, "port")? as u16;
|
||||||
|
|
||||||
|
// will be checked again by termproxy
|
||||||
|
Ticket::<Empty>::parse(ticket)?
|
||||||
|
.verify(
|
||||||
|
crate::auth_helpers::public_auth_key(),
|
||||||
|
ticket::TERM_PREFIX,
|
||||||
|
Some(&ticket::term_aad(&userid, "/system", port)),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let (ws, response) = WebSocket::new(parts.headers)?;
|
||||||
|
|
||||||
|
crate::server::spawn_internal_task(async move {
|
||||||
|
let conn: Upgraded = match req_body.on_upgrade().map_err(Error::from).await {
|
||||||
|
Ok(upgraded) => upgraded,
|
||||||
|
_ => bail!("error"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let local = tokio::net::TcpStream::connect(format!("localhost:{}", port)).await?;
|
||||||
|
ws.serve_connection(conn, local).await
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(response)
|
||||||
|
}
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
pub const SUBDIRS: SubdirMap = &[
|
pub const SUBDIRS: SubdirMap = &[
|
||||||
|
("apt", &apt::ROUTER),
|
||||||
("disks", &disks::ROUTER),
|
("disks", &disks::ROUTER),
|
||||||
("dns", &dns::ROUTER),
|
("dns", &dns::ROUTER),
|
||||||
("journal", &journal::ROUTER),
|
("journal", &journal::ROUTER),
|
||||||
@ -20,9 +310,15 @@ pub const SUBDIRS: SubdirMap = &[
|
|||||||
("rrd", &rrd::ROUTER),
|
("rrd", &rrd::ROUTER),
|
||||||
("services", &services::ROUTER),
|
("services", &services::ROUTER),
|
||||||
("status", &status::ROUTER),
|
("status", &status::ROUTER),
|
||||||
|
("subscription", &subscription::ROUTER),
|
||||||
("syslog", &syslog::ROUTER),
|
("syslog", &syslog::ROUTER),
|
||||||
("tasks", &tasks::ROUTER),
|
("tasks", &tasks::ROUTER),
|
||||||
|
("termproxy", &Router::new().post(&API_METHOD_TERMPROXY)),
|
||||||
("time", &time::ROUTER),
|
("time", &time::ROUTER),
|
||||||
|
(
|
||||||
|
"vncwebsocket",
|
||||||
|
&Router::new().upgrade(&API_METHOD_WEBSOCKET),
|
||||||
|
),
|
||||||
];
|
];
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
|
268
src/api2/node/apt.rs
Normal file
268
src/api2/node/apt.rs
Normal file
@ -0,0 +1,268 @@
|
|||||||
|
use apt_pkg_native::Cache;
|
||||||
|
use anyhow::{Error, bail};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
|
use proxmox::{list_subdirs_api_method, const_regex};
|
||||||
|
use proxmox::api::{api, RpcEnvironment, RpcEnvironmentType, Permission};
|
||||||
|
use proxmox::api::router::{Router, SubdirMap};
|
||||||
|
|
||||||
|
use crate::server::WorkerTask;
|
||||||
|
|
||||||
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||||
|
use crate::api2::types::{APTUpdateInfo, NODE_SCHEMA, Userid, UPID_SCHEMA};
|
||||||
|
|
||||||
|
const_regex! {
|
||||||
|
VERSION_EPOCH_REGEX = r"^\d+:";
|
||||||
|
FILENAME_EXTRACT_REGEX = r"^.*/.*?_(.*)_Packages$";
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: Replace with call to 'apt changelog <pkg> --print-uris'. Currently
|
||||||
|
// not possible as our packages do not have a URI set in their Release file
|
||||||
|
fn get_changelog_url(
|
||||||
|
package: &str,
|
||||||
|
filename: &str,
|
||||||
|
source_pkg: &str,
|
||||||
|
version: &str,
|
||||||
|
source_version: &str,
|
||||||
|
origin: &str,
|
||||||
|
component: &str,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
if origin == "" {
|
||||||
|
bail!("no origin available for package {}", package);
|
||||||
|
}
|
||||||
|
|
||||||
|
if origin == "Debian" {
|
||||||
|
let source_version = (VERSION_EPOCH_REGEX.regex_obj)().replace_all(source_version, "");
|
||||||
|
|
||||||
|
let prefix = if source_pkg.starts_with("lib") {
|
||||||
|
source_pkg.get(0..4)
|
||||||
|
} else {
|
||||||
|
source_pkg.get(0..1)
|
||||||
|
};
|
||||||
|
|
||||||
|
let prefix = match prefix {
|
||||||
|
Some(p) => p,
|
||||||
|
None => bail!("cannot get starting characters of package name '{}'", package)
|
||||||
|
};
|
||||||
|
|
||||||
|
// note: security updates seem to not always upload a changelog for
|
||||||
|
// their package version, so this only works *most* of the time
|
||||||
|
return Ok(format!("https://metadata.ftp-master.debian.org/changelogs/main/{}/{}/{}_{}_changelog",
|
||||||
|
prefix, source_pkg, source_pkg, source_version));
|
||||||
|
|
||||||
|
} else if origin == "Proxmox" {
|
||||||
|
let version = (VERSION_EPOCH_REGEX.regex_obj)().replace_all(version, "");
|
||||||
|
|
||||||
|
let base = match (FILENAME_EXTRACT_REGEX.regex_obj)().captures(filename) {
|
||||||
|
Some(captures) => {
|
||||||
|
let base_capture = captures.get(1);
|
||||||
|
match base_capture {
|
||||||
|
Some(base_underscore) => base_underscore.as_str().replace("_", "/"),
|
||||||
|
None => bail!("incompatible filename, cannot find regex group")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => bail!("incompatible filename, doesn't match regex")
|
||||||
|
};
|
||||||
|
|
||||||
|
return Ok(format!("http://download.proxmox.com/{}/{}_{}.changelog",
|
||||||
|
base, package, version));
|
||||||
|
}
|
||||||
|
|
||||||
|
bail!("unknown origin ({}) or component ({})", origin, component)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn list_installed_apt_packages<F: Fn(&str, &str, &str) -> bool>(filter: F)
|
||||||
|
-> Vec<APTUpdateInfo> {
|
||||||
|
|
||||||
|
let mut ret = Vec::new();
|
||||||
|
|
||||||
|
// note: this is not an 'apt update', it just re-reads the cache from disk
|
||||||
|
let mut cache = Cache::get_singleton();
|
||||||
|
cache.reload();
|
||||||
|
|
||||||
|
let mut cache_iter = cache.iter();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let view = match cache_iter.next() {
|
||||||
|
Some(view) => view,
|
||||||
|
None => break
|
||||||
|
};
|
||||||
|
|
||||||
|
let current_version = match view.current_version() {
|
||||||
|
Some(vers) => vers,
|
||||||
|
None => continue
|
||||||
|
};
|
||||||
|
let candidate_version = match view.candidate_version() {
|
||||||
|
Some(vers) => vers,
|
||||||
|
// if there's no candidate (i.e. no update) get info of currently
|
||||||
|
// installed version instead
|
||||||
|
None => current_version.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let package = view.name();
|
||||||
|
if filter(&package, ¤t_version, &candidate_version) {
|
||||||
|
let mut origin_res = "unknown".to_owned();
|
||||||
|
let mut section_res = "unknown".to_owned();
|
||||||
|
let mut priority_res = "unknown".to_owned();
|
||||||
|
let mut change_log_url = "".to_owned();
|
||||||
|
let mut short_desc = package.clone();
|
||||||
|
let mut long_desc = "".to_owned();
|
||||||
|
|
||||||
|
// get additional information via nested APT 'iterators'
|
||||||
|
let mut view_iter = view.versions();
|
||||||
|
while let Some(ver) = view_iter.next() {
|
||||||
|
if ver.version() == candidate_version {
|
||||||
|
if let Some(section) = ver.section() {
|
||||||
|
section_res = section;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(prio) = ver.priority_type() {
|
||||||
|
priority_res = prio;
|
||||||
|
}
|
||||||
|
|
||||||
|
// assume every package has only one origin file (not
|
||||||
|
// origin, but origin *file*, for some reason those seem to
|
||||||
|
// be different concepts in APT)
|
||||||
|
let mut origin_iter = ver.origin_iter();
|
||||||
|
let origin = origin_iter.next();
|
||||||
|
if let Some(origin) = origin {
|
||||||
|
|
||||||
|
if let Some(sd) = origin.short_desc() {
|
||||||
|
short_desc = sd;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ld) = origin.long_desc() {
|
||||||
|
long_desc = ld;
|
||||||
|
}
|
||||||
|
|
||||||
|
// the package files appear in priority order, meaning
|
||||||
|
// the one for the candidate version is first
|
||||||
|
let mut pkg_iter = origin.file();
|
||||||
|
let pkg_file = pkg_iter.next();
|
||||||
|
if let Some(pkg_file) = pkg_file {
|
||||||
|
if let Some(origin_name) = pkg_file.origin() {
|
||||||
|
origin_res = origin_name;
|
||||||
|
}
|
||||||
|
|
||||||
|
let filename = pkg_file.file_name();
|
||||||
|
let source_pkg = ver.source_package();
|
||||||
|
let source_ver = ver.source_version();
|
||||||
|
let component = pkg_file.component();
|
||||||
|
|
||||||
|
// build changelog URL from gathered information
|
||||||
|
// ignore errors, use empty changelog instead
|
||||||
|
let url = get_changelog_url(&package, &filename, &source_pkg,
|
||||||
|
&candidate_version, &source_ver, &origin_res, &component);
|
||||||
|
if let Ok(url) = url {
|
||||||
|
change_log_url = url;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let info = APTUpdateInfo {
|
||||||
|
package,
|
||||||
|
title: short_desc,
|
||||||
|
arch: view.arch(),
|
||||||
|
description: long_desc,
|
||||||
|
change_log_url,
|
||||||
|
origin: origin_res,
|
||||||
|
version: candidate_version,
|
||||||
|
old_version: current_version,
|
||||||
|
priority: priority_res,
|
||||||
|
section: section_res,
|
||||||
|
};
|
||||||
|
ret.push(info);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "A list of packages with available updates.",
|
||||||
|
type: Array,
|
||||||
|
items: { type: APTUpdateInfo },
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List available APT updates
|
||||||
|
fn apt_update_available(_param: Value) -> Result<Value, Error> {
|
||||||
|
let ret = list_installed_apt_packages(|_pkg, cur_ver, can_ver| cur_ver != can_ver);
|
||||||
|
Ok(json!(ret))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
quiet: {
|
||||||
|
description: "Only produces output suitable for logging, omitting progress indicators.",
|
||||||
|
type: bool,
|
||||||
|
default: false,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
schema: UPID_SCHEMA,
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&[], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update the APT database
|
||||||
|
pub fn apt_update_database(
|
||||||
|
quiet: Option<bool>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
|
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
|
||||||
|
|
||||||
|
let upid_str = WorkerTask::new_thread("aptupdate", None, userid, to_stdout, move |worker| {
|
||||||
|
if !quiet { worker.log("starting apt-get update") }
|
||||||
|
|
||||||
|
// TODO: set proxy /etc/apt/apt.conf.d/76pbsproxy like PVE
|
||||||
|
|
||||||
|
let mut command = std::process::Command::new("apt-get");
|
||||||
|
command.arg("update");
|
||||||
|
|
||||||
|
let output = crate::tools::run_command(command, None)?;
|
||||||
|
if !quiet { worker.log(output) }
|
||||||
|
|
||||||
|
// TODO: add mail notify for new updates like PVE
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(upid_str)
|
||||||
|
}
|
||||||
|
|
||||||
|
const SUBDIRS: SubdirMap = &[
|
||||||
|
("update", &Router::new()
|
||||||
|
.get(&API_METHOD_APT_UPDATE_AVAILABLE)
|
||||||
|
.post(&API_METHOD_APT_UPDATE_DATABASE)
|
||||||
|
),
|
||||||
|
];
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||||
|
.subdirs(SUBDIRS);
|
@ -13,7 +13,7 @@ use crate::tools::disks::{
|
|||||||
};
|
};
|
||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
|
|
||||||
use crate::api2::types::{UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
|
use crate::api2::types::{Userid, UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
|
||||||
|
|
||||||
pub mod directory;
|
pub mod directory;
|
||||||
pub mod zfs;
|
pub mod zfs;
|
||||||
@ -26,10 +26,10 @@ pub mod zfs;
|
|||||||
schema: NODE_SCHEMA,
|
schema: NODE_SCHEMA,
|
||||||
},
|
},
|
||||||
skipsmart: {
|
skipsmart: {
|
||||||
description: "Skip smart checks.",
|
description: "Skip smart checks.",
|
||||||
type: bool,
|
type: bool,
|
||||||
optional: true,
|
optional: true,
|
||||||
default: false,
|
default: false,
|
||||||
},
|
},
|
||||||
"usage-type": {
|
"usage-type": {
|
||||||
type: DiskUsageType,
|
type: DiskUsageType,
|
||||||
@ -140,7 +140,7 @@ pub fn initialize_disk(
|
|||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
|
||||||
let info = get_disk_usage_info(&disk, true)?;
|
let info = get_disk_usage_info(&disk, true)?;
|
||||||
|
|
||||||
@ -149,7 +149,7 @@ pub fn initialize_disk(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"diskinit", Some(disk.clone()), &username.clone(), to_stdout, move |worker|
|
"diskinit", Some(disk.clone()), userid, to_stdout, move |worker|
|
||||||
{
|
{
|
||||||
worker.log(format!("initialize disk {}", disk));
|
worker.log(format!("initialize disk {}", disk));
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@ use crate::tools::systemd::{self, types::*};
|
|||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
|
use crate::config::datastore::DataStoreConfig;
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
@ -133,7 +134,7 @@ pub fn create_datastore_disk(
|
|||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
|
||||||
let info = get_disk_usage_info(&disk, true)?;
|
let info = get_disk_usage_info(&disk, true)?;
|
||||||
|
|
||||||
@ -142,7 +143,7 @@ pub fn create_datastore_disk(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"dircreate", Some(name.clone()), &username.clone(), to_stdout, move |worker|
|
"dircreate", Some(name.clone()), userid, to_stdout, move |worker|
|
||||||
{
|
{
|
||||||
worker.log(format!("create datastore '{}' on disk {}", name, disk));
|
worker.log(format!("create datastore '{}' on disk {}", name, disk));
|
||||||
|
|
||||||
@ -175,9 +176,69 @@ pub fn create_datastore_disk(
|
|||||||
Ok(upid_str)
|
Ok(upid_str)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
name: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Remove a Filesystem mounted under '/mnt/datastore/<name>'.".
|
||||||
|
pub fn delete_datastore_disk(name: String) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let path = format!("/mnt/datastore/{}", name);
|
||||||
|
// path of datastore cannot be changed
|
||||||
|
let (config, _) = crate::config::datastore::config()?;
|
||||||
|
let datastores: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
|
||||||
|
let conflicting_datastore: Option<DataStoreConfig> = datastores.into_iter()
|
||||||
|
.filter(|ds| ds.path == path)
|
||||||
|
.next();
|
||||||
|
|
||||||
|
if let Some(conflicting_datastore) = conflicting_datastore {
|
||||||
|
bail!("Can't remove '{}' since it's required by datastore '{}'",
|
||||||
|
conflicting_datastore.path, conflicting_datastore.name);
|
||||||
|
}
|
||||||
|
|
||||||
|
// disable systemd mount-unit
|
||||||
|
let mut mount_unit_name = systemd::escape_unit(&path, true);
|
||||||
|
mount_unit_name.push_str(".mount");
|
||||||
|
systemd::disable_unit(&mount_unit_name)?;
|
||||||
|
|
||||||
|
// delete .mount-file
|
||||||
|
let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
|
||||||
|
let full_path = std::path::Path::new(&mount_unit_path);
|
||||||
|
log::info!("removing systemd mount unit {:?}", full_path);
|
||||||
|
std::fs::remove_file(&full_path)?;
|
||||||
|
|
||||||
|
// try to unmount, if that fails tell the user to reboot or unmount manually
|
||||||
|
let mut command = std::process::Command::new("umount");
|
||||||
|
command.arg(&path);
|
||||||
|
match crate::tools::run_command(command, None) {
|
||||||
|
Err(_) => bail!(
|
||||||
|
"Could not umount '{}' since it is busy. It will stay mounted \
|
||||||
|
until the next reboot or until unmounted manually!",
|
||||||
|
path
|
||||||
|
),
|
||||||
|
Ok(_) => Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const ITEM_ROUTER: Router = Router::new()
|
||||||
|
.delete(&API_METHOD_DELETE_DATASTORE_DISK);
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
.get(&API_METHOD_LIST_DATASTORE_MOUNTS)
|
.get(&API_METHOD_LIST_DATASTORE_MOUNTS)
|
||||||
.post(&API_METHOD_CREATE_DATASTORE_DISK);
|
.post(&API_METHOD_CREATE_DATASTORE_DISK)
|
||||||
|
.match_all("name", &ITEM_ROUTER);
|
||||||
|
|
||||||
|
|
||||||
fn create_datastore_mount_unit(
|
fn create_datastore_mount_unit(
|
||||||
|
@ -41,6 +41,9 @@ pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new(
|
|||||||
.default(12)
|
.default(12)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
pub const ZPOOL_NAME_SCHEMA: Schema =StringSchema::new("ZFS Pool Name")
|
||||||
|
.format(&ApiStringFormat::Pattern(&ZPOOL_NAME_REGEX))
|
||||||
|
.schema();
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
default: "On",
|
default: "On",
|
||||||
@ -157,7 +160,7 @@ pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> {
|
|||||||
schema: NODE_SCHEMA,
|
schema: NODE_SCHEMA,
|
||||||
},
|
},
|
||||||
name: {
|
name: {
|
||||||
schema: DATASTORE_SCHEMA,
|
schema: ZPOOL_NAME_SCHEMA,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -251,7 +254,7 @@ pub fn create_zpool(
|
|||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
|
||||||
let add_datastore = add_datastore.unwrap_or(false);
|
let add_datastore = add_datastore.unwrap_or(false);
|
||||||
|
|
||||||
@ -311,7 +314,7 @@ pub fn create_zpool(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"zfscreate", Some(name.clone()), &username.clone(), to_stdout, move |worker|
|
"zfscreate", Some(name.clone()), userid, to_stdout, move |worker|
|
||||||
{
|
{
|
||||||
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));
|
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@ use ::serde::{Deserialize, Serialize};
|
|||||||
|
|
||||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||||
use proxmox::api::schema::parse_property_string;
|
use proxmox::api::schema::parse_property_string;
|
||||||
|
use proxmox::tools::fs::open_file_locked;
|
||||||
|
|
||||||
use crate::config::network::{self, NetworkConfig};
|
use crate::config::network::{self, NetworkConfig};
|
||||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||||
@ -230,7 +231,7 @@ pub fn create_interface(
|
|||||||
let interface_type = crate::tools::required_string_param(¶m, "type")?;
|
let interface_type = crate::tools::required_string_param(¶m, "type")?;
|
||||||
let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.into())?;
|
let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.into())?;
|
||||||
|
|
||||||
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||||
|
|
||||||
let (mut config, _digest) = network::config()?;
|
let (mut config, _digest) = network::config()?;
|
||||||
|
|
||||||
@ -463,7 +464,7 @@ pub fn update_interface(
|
|||||||
param: Value,
|
param: Value,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = network::config()?;
|
let (mut config, expected_digest) = network::config()?;
|
||||||
|
|
||||||
@ -586,7 +587,7 @@ pub fn update_interface(
|
|||||||
/// Remove network interface configuration.
|
/// Remove network interface configuration.
|
||||||
pub fn delete_interface(iface: String, digest: Option<String>) -> Result<(), Error> {
|
pub fn delete_interface(iface: String, digest: Option<String>) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = network::config()?;
|
let (mut config, expected_digest) = network::config()?;
|
||||||
|
|
||||||
@ -624,9 +625,9 @@ pub async fn reload_network_config(
|
|||||||
|
|
||||||
network::assert_ifupdown2_installed()?;
|
network::assert_ifupdown2_installed()?;
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
|
||||||
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), &username.clone(), true, |_worker| async {
|
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), userid, true, |_worker| async {
|
||||||
|
|
||||||
let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME);
|
let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME);
|
||||||
|
|
||||||
|
@ -4,12 +4,13 @@ use anyhow::{bail, Error};
|
|||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
use proxmox::{sortable, identity, list_subdirs_api_method};
|
use proxmox::{sortable, identity, list_subdirs_api_method};
|
||||||
use proxmox::api::{api, Router, Permission};
|
use proxmox::api::{api, Router, Permission, RpcEnvironment};
|
||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
use proxmox::api::schema::*;
|
use proxmox::api::schema::*;
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||||
|
use crate::server::WorkerTask;
|
||||||
|
|
||||||
static SERVICE_NAME_LIST: [&str; 7] = [
|
static SERVICE_NAME_LIST: [&str; 7] = [
|
||||||
"proxmox-backup",
|
"proxmox-backup",
|
||||||
@ -181,30 +182,43 @@ fn get_service_state(
|
|||||||
Ok(json_service_state(&service, status))
|
Ok(json_service_state(&service, status))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
|
fn run_service_command(service: &str, cmd: &str, userid: Userid) -> Result<Value, Error> {
|
||||||
|
|
||||||
// fixme: run background worker (fork_worker) ???
|
let workerid = format!("srv{}", &cmd);
|
||||||
|
|
||||||
match cmd {
|
let cmd = match cmd {
|
||||||
"start"|"stop"|"restart"|"reload" => {},
|
"start"|"stop"|"restart"=> cmd.to_string(),
|
||||||
|
"reload" => "try-reload-or-restart".to_string(), // some services do not implement reload
|
||||||
_ => bail!("unknown service command '{}'", cmd),
|
_ => bail!("unknown service command '{}'", cmd),
|
||||||
}
|
};
|
||||||
|
let service = service.to_string();
|
||||||
|
|
||||||
if service == "proxmox-backup" && cmd != "restart" {
|
let upid = WorkerTask::new_thread(
|
||||||
bail!("invalid service cmd '{} {}'", service, cmd);
|
&workerid,
|
||||||
}
|
Some(service.clone()),
|
||||||
|
userid,
|
||||||
|
false,
|
||||||
|
move |_worker| {
|
||||||
|
|
||||||
let real_service_name = real_service_name(service);
|
if service == "proxmox-backup" && cmd == "stop" {
|
||||||
|
bail!("invalid service cmd '{} {}' cannot stop essential service!", service, cmd);
|
||||||
|
}
|
||||||
|
|
||||||
let status = Command::new("systemctl")
|
let real_service_name = real_service_name(&service);
|
||||||
.args(&[cmd, real_service_name])
|
|
||||||
.status()?;
|
|
||||||
|
|
||||||
if !status.success() {
|
let status = Command::new("systemctl")
|
||||||
bail!("systemctl {} failed with {}", cmd, status);
|
.args(&[&cmd, real_service_name])
|
||||||
}
|
.status()?;
|
||||||
|
|
||||||
Ok(Value::Null)
|
if !status.success() {
|
||||||
|
bail!("systemctl {} failed with {}", cmd, status);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(upid.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -227,11 +241,14 @@ fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
|
|||||||
fn start_service(
|
fn start_service(
|
||||||
service: String,
|
service: String,
|
||||||
_param: Value,
|
_param: Value,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
|
||||||
log::info!("starting service {}", service);
|
log::info!("starting service {}", service);
|
||||||
|
|
||||||
run_service_command(&service, "start")
|
run_service_command(&service, "start", userid)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -254,11 +271,14 @@ fn start_service(
|
|||||||
fn stop_service(
|
fn stop_service(
|
||||||
service: String,
|
service: String,
|
||||||
_param: Value,
|
_param: Value,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
|
||||||
log::info!("stopping service {}", service);
|
log::info!("stopping service {}", service);
|
||||||
|
|
||||||
run_service_command(&service, "stop")
|
run_service_command(&service, "stop", userid)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -281,15 +301,18 @@ fn stop_service(
|
|||||||
fn restart_service(
|
fn restart_service(
|
||||||
service: String,
|
service: String,
|
||||||
_param: Value,
|
_param: Value,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
|
||||||
log::info!("re-starting service {}", service);
|
log::info!("re-starting service {}", service);
|
||||||
|
|
||||||
if &service == "proxmox-backup-proxy" {
|
if &service == "proxmox-backup-proxy" {
|
||||||
// special case, avoid aborting running tasks
|
// special case, avoid aborting running tasks
|
||||||
run_service_command(&service, "reload")
|
run_service_command(&service, "reload", userid)
|
||||||
} else {
|
} else {
|
||||||
run_service_command(&service, "restart")
|
run_service_command(&service, "restart", userid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -313,11 +336,14 @@ fn restart_service(
|
|||||||
fn reload_service(
|
fn reload_service(
|
||||||
service: String,
|
service: String,
|
||||||
_param: Value,
|
_param: Value,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
|
||||||
log::info!("reloading service {}", service);
|
log::info!("reloading service {}", service);
|
||||||
|
|
||||||
run_service_command(&service, "reload")
|
run_service_command(&service, "reload", userid)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@ use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
|||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
|
||||||
|
use crate::tools::cert::CertInfo;
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
@ -46,14 +47,24 @@ use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
|
|||||||
description: "Total CPU usage since last query.",
|
description: "Total CPU usage since last query.",
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
}
|
info: {
|
||||||
|
type: Object,
|
||||||
|
description: "contains node information",
|
||||||
|
properties: {
|
||||||
|
fingerprint: {
|
||||||
|
description: "The SSL Fingerprint",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_AUDIT, false),
|
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_AUDIT, false),
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Read node memory, CPU and (root) disk usage
|
/// Read node memory, CPU and (root) disk usage
|
||||||
fn get_usage(
|
fn get_status(
|
||||||
_param: Value,
|
_param: Value,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
@ -63,6 +74,10 @@ fn get_usage(
|
|||||||
let kstat: procfs::ProcFsStat = procfs::read_proc_stat()?;
|
let kstat: procfs::ProcFsStat = procfs::read_proc_stat()?;
|
||||||
let disk_usage = crate::tools::disks::disk_usage(Path::new("/"))?;
|
let disk_usage = crate::tools::disks::disk_usage(Path::new("/"))?;
|
||||||
|
|
||||||
|
// get fingerprint
|
||||||
|
let cert = CertInfo::new()?;
|
||||||
|
let fp = cert.fingerprint()?;
|
||||||
|
|
||||||
Ok(json!({
|
Ok(json!({
|
||||||
"memory": {
|
"memory": {
|
||||||
"total": meminfo.memtotal,
|
"total": meminfo.memtotal,
|
||||||
@ -74,7 +89,10 @@ fn get_usage(
|
|||||||
"total": disk_usage.total,
|
"total": disk_usage.total,
|
||||||
"used": disk_usage.used,
|
"used": disk_usage.used,
|
||||||
"free": disk_usage.avail,
|
"free": disk_usage.avail,
|
||||||
}
|
},
|
||||||
|
"info": {
|
||||||
|
"fingerprint": fp,
|
||||||
|
},
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -122,5 +140,5 @@ fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
.get(&API_METHOD_GET_USAGE)
|
.get(&API_METHOD_GET_STATUS)
|
||||||
.post(&API_METHOD_REBOOT_OR_SHUTDOWN);
|
.post(&API_METHOD_REBOOT_OR_SHUTDOWN);
|
||||||
|
@ -5,8 +5,16 @@ use proxmox::api::{api, Router, Permission};
|
|||||||
|
|
||||||
use crate::tools;
|
use crate::tools;
|
||||||
use crate::config::acl::PRIV_SYS_AUDIT;
|
use crate::config::acl::PRIV_SYS_AUDIT;
|
||||||
|
use crate::api2::types::NODE_SCHEMA;
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: {
|
||||||
|
schema: NODE_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
returns: {
|
returns: {
|
||||||
description: "Subscription status.",
|
description: "Subscription status.",
|
||||||
properties: {
|
properties: {
|
@ -4,13 +4,13 @@ use std::io::{BufRead, BufReader};
|
|||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
use proxmox::api::{api, Router, RpcEnvironment, Permission, UserInformation};
|
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
use proxmox::{identity, list_subdirs_api_method, sortable};
|
use proxmox::{identity, list_subdirs_api_method, sortable};
|
||||||
|
|
||||||
use crate::tools;
|
use crate::tools;
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::server::{self, UPID};
|
use crate::server::{self, UPID, TaskState};
|
||||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
|
|
||||||
@ -84,11 +84,11 @@ async fn get_task_status(
|
|||||||
|
|
||||||
let upid = extract_upid(¶m)?;
|
let upid = extract_upid(¶m)?;
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
|
||||||
if username != upid.username {
|
if userid != upid.userid {
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut result = json!({
|
let mut result = json!({
|
||||||
@ -99,15 +99,15 @@ async fn get_task_status(
|
|||||||
"starttime": upid.starttime,
|
"starttime": upid.starttime,
|
||||||
"type": upid.worker_type,
|
"type": upid.worker_type,
|
||||||
"id": upid.worker_id,
|
"id": upid.worker_id,
|
||||||
"user": upid.username,
|
"user": upid.userid,
|
||||||
});
|
});
|
||||||
|
|
||||||
if crate::server::worker_is_active(&upid).await? {
|
if crate::server::worker_is_active(&upid).await? {
|
||||||
result["status"] = Value::from("running");
|
result["status"] = Value::from("running");
|
||||||
} else {
|
} else {
|
||||||
let exitstatus = crate::server::upid_read_status(&upid).unwrap_or(String::from("unknown"));
|
let exitstatus = crate::server::upid_read_status(&upid).unwrap_or(TaskState::Unknown { endtime: 0 });
|
||||||
result["status"] = Value::from("stopped");
|
result["status"] = Value::from("stopped");
|
||||||
result["exitstatus"] = Value::from(exitstatus);
|
result["exitstatus"] = Value::from(exitstatus.to_string());
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(result)
|
Ok(result)
|
||||||
@ -161,11 +161,11 @@ async fn read_task_log(
|
|||||||
|
|
||||||
let upid = extract_upid(¶m)?;
|
let upid = extract_upid(¶m)?;
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
|
||||||
if username != upid.username {
|
if userid != upid.userid {
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let test_status = param["test-status"].as_bool().unwrap_or(false);
|
let test_status = param["test-status"].as_bool().unwrap_or(false);
|
||||||
@ -234,11 +234,11 @@ fn stop_task(
|
|||||||
|
|
||||||
let upid = extract_upid(¶m)?;
|
let upid = extract_upid(¶m)?;
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
|
|
||||||
if username != upid.username {
|
if userid != upid.userid {
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
|
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
server::abort_worker_async(upid);
|
server::abort_worker_async(upid);
|
||||||
@ -281,7 +281,7 @@ fn stop_task(
|
|||||||
default: false,
|
default: false,
|
||||||
},
|
},
|
||||||
userfilter: {
|
userfilter: {
|
||||||
optional:true,
|
optional: true,
|
||||||
type: String,
|
type: String,
|
||||||
description: "Only list tasks from this user.",
|
description: "Only list tasks from this user.",
|
||||||
},
|
},
|
||||||
@ -307,9 +307,9 @@ pub fn list_tasks(
|
|||||||
mut rpcenv: &mut dyn RpcEnvironment,
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<TaskListItem>, Error> {
|
) -> Result<Vec<TaskListItem>, Error> {
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&username, &["system", "tasks"]);
|
let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
|
||||||
|
|
||||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||||
|
|
||||||
@ -324,11 +324,11 @@ pub fn list_tasks(
|
|||||||
let mut count = 0;
|
let mut count = 0;
|
||||||
|
|
||||||
for info in list {
|
for info in list {
|
||||||
if !list_all && info.upid.username != username { continue; }
|
if !list_all && info.upid.userid != userid { continue; }
|
||||||
|
|
||||||
|
|
||||||
if let Some(username) = userfilter {
|
if let Some(userid) = userfilter {
|
||||||
if !info.upid.username.contains(username) { continue; }
|
if !info.upid.userid.as_str().contains(userid) { continue; }
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(store) = store {
|
if let Some(store) = store {
|
||||||
@ -352,8 +352,9 @@ pub fn list_tasks(
|
|||||||
|
|
||||||
if let Some(ref state) = info.state {
|
if let Some(ref state) = info.state {
|
||||||
if running { continue; }
|
if running { continue; }
|
||||||
if errors && state.1 == "OK" {
|
match state {
|
||||||
continue;
|
crate::server::TaskState::OK { .. } if errors => continue,
|
||||||
|
_ => {},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
use std::sync::{Arc};
|
use std::sync::{Arc};
|
||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
|
use futures::{select, future::FutureExt};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox::api::api;
|
||||||
use proxmox::api::{ApiMethod, Router, RpcEnvironment, Permission};
|
use proxmox::api::{ApiMethod, Router, RpcEnvironment, Permission};
|
||||||
@ -12,13 +13,15 @@ use crate::client::{HttpClient, HttpClientOptions, BackupRepository, pull::pull_
|
|||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::config::{
|
use crate::config::{
|
||||||
remote,
|
remote,
|
||||||
|
sync::SyncJobConfig,
|
||||||
|
jobstate::Job,
|
||||||
acl::{PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ},
|
acl::{PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ},
|
||||||
cached_user_info::CachedUserInfo,
|
cached_user_info::CachedUserInfo,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
pub fn check_pull_privs(
|
pub fn check_pull_privs(
|
||||||
username: &str,
|
userid: &Userid,
|
||||||
store: &str,
|
store: &str,
|
||||||
remote: &str,
|
remote: &str,
|
||||||
remote_store: &str,
|
remote_store: &str,
|
||||||
@ -27,11 +30,11 @@ pub fn check_pull_privs(
|
|||||||
|
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
|
user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
|
||||||
user_info.check_privs(username, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
|
user_info.check_privs(userid, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
|
||||||
|
|
||||||
if delete {
|
if delete {
|
||||||
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
|
user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -62,6 +65,68 @@ pub async fn get_pull_parameters(
|
|||||||
Ok((client, src_repo, tgt_store))
|
Ok((client, src_repo, tgt_store))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn do_sync_job(
|
||||||
|
mut job: Job,
|
||||||
|
sync_job: SyncJobConfig,
|
||||||
|
userid: &Userid,
|
||||||
|
schedule: Option<String>,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
|
let job_id = job.jobname().to_string();
|
||||||
|
let worker_type = job.jobtype().to_string();
|
||||||
|
|
||||||
|
let upid_str = WorkerTask::spawn(
|
||||||
|
&worker_type,
|
||||||
|
Some(job.jobname().to_string()),
|
||||||
|
userid.clone(),
|
||||||
|
false,
|
||||||
|
move |worker| async move {
|
||||||
|
|
||||||
|
job.start(&worker.upid().to_string())?;
|
||||||
|
|
||||||
|
let worker2 = worker.clone();
|
||||||
|
|
||||||
|
let worker_future = async move {
|
||||||
|
|
||||||
|
let delete = sync_job.remove_vanished.unwrap_or(true);
|
||||||
|
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
|
||||||
|
|
||||||
|
worker.log(format!("Starting datastore sync job '{}'", job_id));
|
||||||
|
if let Some(event_str) = schedule {
|
||||||
|
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||||
|
}
|
||||||
|
worker.log(format!("Sync datastore '{}' from '{}/{}'",
|
||||||
|
sync_job.store, sync_job.remote, sync_job.remote_store));
|
||||||
|
|
||||||
|
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, Userid::backup_userid().clone()).await?;
|
||||||
|
|
||||||
|
worker.log(format!("sync job '{}' end", &job_id));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut abort_future = worker2.abort_future().map(|_| Err(format_err!("sync aborted")));
|
||||||
|
|
||||||
|
let res = select!{
|
||||||
|
worker = worker_future.fuse() => worker,
|
||||||
|
abort = abort_future => abort,
|
||||||
|
};
|
||||||
|
|
||||||
|
let status = worker2.create_state(&res);
|
||||||
|
|
||||||
|
match job.finish(status) {
|
||||||
|
Ok(_) => {},
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("could not finish job state: {}", err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(upid_str)
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -99,19 +164,19 @@ async fn pull (
|
|||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
let delete = remove_vanished.unwrap_or(true);
|
let delete = remove_vanished.unwrap_or(true);
|
||||||
|
|
||||||
check_pull_privs(&username, &store, &remote, &remote_store, delete)?;
|
check_pull_privs(&userid, &store, &remote, &remote_store, delete)?;
|
||||||
|
|
||||||
let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?;
|
let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?;
|
||||||
|
|
||||||
// fixme: set to_stdout to false?
|
// fixme: set to_stdout to false?
|
||||||
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), &username.clone(), true, move |worker| async move {
|
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), userid.clone(), true, move |worker| async move {
|
||||||
|
|
||||||
worker.log(format!("sync datastore '{}' start", store));
|
worker.log(format!("sync datastore '{}' start", store));
|
||||||
|
|
||||||
pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, username).await?;
|
pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, userid).await?;
|
||||||
|
|
||||||
worker.log(format!("sync datastore '{}' end", store));
|
worker.log(format!("sync datastore '{}' end", store));
|
||||||
|
|
||||||
|
@ -55,11 +55,11 @@ fn upgrade_to_backup_reader_protocol(
|
|||||||
async move {
|
async move {
|
||||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||||
|
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_READ, false)?;
|
user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_READ, false)?;
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
@ -90,9 +90,14 @@ fn upgrade_to_backup_reader_protocol(
|
|||||||
|
|
||||||
let worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_dir.backup_time().timestamp());
|
let worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_dir.backup_time().timestamp());
|
||||||
|
|
||||||
WorkerTask::spawn("reader", Some(worker_id), &username.clone(), true, move |worker| {
|
WorkerTask::spawn("reader", Some(worker_id), userid.clone(), true, move |worker| {
|
||||||
let mut env = ReaderEnvironment::new(
|
let mut env = ReaderEnvironment::new(
|
||||||
env_type, username.clone(), worker.clone(), datastore, backup_dir);
|
env_type,
|
||||||
|
userid,
|
||||||
|
worker.clone(),
|
||||||
|
datastore,
|
||||||
|
backup_dir,
|
||||||
|
);
|
||||||
|
|
||||||
env.debug = debug;
|
env.debug = debug;
|
||||||
|
|
||||||
@ -225,8 +230,8 @@ fn download_chunk(
|
|||||||
env.debug(format!("download chunk {:?}", path));
|
env.debug(format!("download chunk {:?}", path));
|
||||||
|
|
||||||
let data = tokio::fs::read(path)
|
let data = tokio::fs::read(path)
|
||||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("reading file {:?} failed: {}", path2, err)))
|
.await
|
||||||
.await?;
|
.map_err(move |err| http_err!(BAD_REQUEST, "reading file {:?} failed: {}", path2, err))?;
|
||||||
|
|
||||||
let body = Body::from(data);
|
let body = Body::from(data);
|
||||||
|
|
||||||
@ -260,7 +265,7 @@ fn download_chunk_old(
|
|||||||
let path3 = path.clone();
|
let path3 = path.clone();
|
||||||
|
|
||||||
let response_future = tokio::fs::File::open(path)
|
let response_future = tokio::fs::File::open(path)
|
||||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path2, err)))
|
.map_err(move |err| http_err!(BAD_REQUEST, "open file {:?} failed: {}", path2, err))
|
||||||
.and_then(move |file| {
|
.and_then(move |file| {
|
||||||
env2.debug(format!("download chunk {:?}", path3));
|
env2.debug(format!("download chunk {:?}", path3));
|
||||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||||
|
@ -5,9 +5,10 @@ use serde_json::{json, Value};
|
|||||||
|
|
||||||
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
||||||
|
|
||||||
use crate::server::WorkerTask;
|
use crate::api2::types::Userid;
|
||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
use crate::server::formatter::*;
|
use crate::server::formatter::*;
|
||||||
|
use crate::server::WorkerTask;
|
||||||
|
|
||||||
//use proxmox::tools;
|
//use proxmox::tools;
|
||||||
|
|
||||||
@ -16,7 +17,7 @@ use crate::server::formatter::*;
|
|||||||
pub struct ReaderEnvironment {
|
pub struct ReaderEnvironment {
|
||||||
env_type: RpcEnvironmentType,
|
env_type: RpcEnvironmentType,
|
||||||
result_attributes: Value,
|
result_attributes: Value,
|
||||||
user: String,
|
user: Userid,
|
||||||
pub debug: bool,
|
pub debug: bool,
|
||||||
pub formatter: &'static OutputFormatter,
|
pub formatter: &'static OutputFormatter,
|
||||||
pub worker: Arc<WorkerTask>,
|
pub worker: Arc<WorkerTask>,
|
||||||
@ -28,7 +29,7 @@ pub struct ReaderEnvironment {
|
|||||||
impl ReaderEnvironment {
|
impl ReaderEnvironment {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
env_type: RpcEnvironmentType,
|
env_type: RpcEnvironmentType,
|
||||||
user: String,
|
user: Userid,
|
||||||
worker: Arc<WorkerTask>,
|
worker: Arc<WorkerTask>,
|
||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
backup_dir: BackupDir,
|
backup_dir: BackupDir,
|
||||||
@ -77,7 +78,7 @@ impl RpcEnvironment for ReaderEnvironment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_user(&self) -> Option<String> {
|
fn get_user(&self) -> Option<String> {
|
||||||
Some(self.user.clone())
|
Some(self.user.to_string())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,14 +10,14 @@ use proxmox::api::{
|
|||||||
Router,
|
Router,
|
||||||
RpcEnvironment,
|
RpcEnvironment,
|
||||||
SubdirMap,
|
SubdirMap,
|
||||||
UserInformation,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::api2::types::{
|
use crate::api2::types::{
|
||||||
DATASTORE_SCHEMA,
|
DATASTORE_SCHEMA,
|
||||||
RRDMode,
|
RRDMode,
|
||||||
RRDTimeFrameResolution,
|
RRDTimeFrameResolution,
|
||||||
TaskListItem
|
TaskListItem,
|
||||||
|
Userid,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::server;
|
use crate::server;
|
||||||
@ -84,13 +84,13 @@ fn datastore_status(
|
|||||||
|
|
||||||
let (config, _digest) = datastore::config()?;
|
let (config, _digest) = datastore::config()?;
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
||||||
for (store, (_, _)) in &config.sections {
|
for (store, (_, _)) in &config.sections {
|
||||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||||
if !allowed {
|
if !allowed {
|
||||||
continue;
|
continue;
|
||||||
@ -161,6 +161,8 @@ fn datastore_status(
|
|||||||
if b != 0.0 {
|
if b != 0.0 {
|
||||||
let estimate = (1.0 - a) / b;
|
let estimate = (1.0 - a) / b;
|
||||||
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
||||||
|
} else {
|
||||||
|
entry["estimated-full-date"] = Value::from(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -200,9 +202,9 @@ pub fn list_tasks(
|
|||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<TaskListItem>, Error> {
|
) -> Result<Vec<TaskListItem>, Error> {
|
||||||
|
|
||||||
let username = rpcenv.get_user().unwrap();
|
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(&username, &["system", "tasks"]);
|
let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
|
||||||
|
|
||||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||||
|
|
||||||
@ -210,7 +212,7 @@ pub fn list_tasks(
|
|||||||
let list: Vec<TaskListItem> = server::read_task_list()?
|
let list: Vec<TaskListItem> = server::read_task_list()?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(TaskListItem::from)
|
.map(TaskListItem::from)
|
||||||
.filter(|entry| list_all || entry.user == username)
|
.filter(|entry| list_all || entry.user == userid)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
Ok(list.into())
|
Ok(list.into())
|
||||||
|
4
src/api2/types/macros.rs
Normal file
4
src/api2/types/macros.rs
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
//! Macros exported from api2::types.
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => (r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)") }
|
@ -1,10 +1,23 @@
|
|||||||
use anyhow::{bail};
|
use anyhow::bail;
|
||||||
use ::serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::{api, schema::*};
|
use proxmox::api::{api, schema::*};
|
||||||
use proxmox::const_regex;
|
use proxmox::const_regex;
|
||||||
use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
||||||
|
|
||||||
|
use crate::backup::CryptMode;
|
||||||
|
use crate::server::UPID;
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
mod macros;
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
mod userid;
|
||||||
|
pub use userid::{Realm, RealmRef};
|
||||||
|
pub use userid::{Username, UsernameRef};
|
||||||
|
pub use userid::Userid;
|
||||||
|
pub use userid::PROXMOX_GROUP_ID_SCHEMA;
|
||||||
|
|
||||||
// File names: may not contain slashes, may not start with "."
|
// File names: may not contain slashes, may not start with "."
|
||||||
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
||||||
if name.starts_with('.') {
|
if name.starts_with('.') {
|
||||||
@ -19,19 +32,6 @@ pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
|||||||
macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
|
macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
|
||||||
macro_rules! DNS_NAME { () => (concat!(r"(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!())) }
|
macro_rules! DNS_NAME { () => (concat!(r"(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!())) }
|
||||||
|
|
||||||
// we only allow a limited set of characters
|
|
||||||
// colon is not allowed, because we store usernames in
|
|
||||||
// colon separated lists)!
|
|
||||||
// slash is not allowed because it is used as pve API delimiter
|
|
||||||
// also see "man useradd"
|
|
||||||
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
|
|
||||||
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
|
|
||||||
|
|
||||||
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
|
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => (r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)") }
|
|
||||||
|
|
||||||
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
|
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
|
||||||
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
|
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
|
||||||
|
|
||||||
@ -65,17 +65,15 @@ const_regex!{
|
|||||||
|
|
||||||
pub DNS_NAME_OR_IP_REGEX = concat!(r"^", DNS_NAME!(), "|", IPRE!(), r"$");
|
pub DNS_NAME_OR_IP_REGEX = concat!(r"^", DNS_NAME!(), "|", IPRE!(), r"$");
|
||||||
|
|
||||||
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$");
|
|
||||||
|
|
||||||
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE!() ,"):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE!() ,"):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
||||||
|
|
||||||
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
|
|
||||||
|
|
||||||
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
||||||
|
|
||||||
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
||||||
|
|
||||||
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
|
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
|
||||||
|
|
||||||
|
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
||||||
@ -111,12 +109,6 @@ pub const DNS_NAME_FORMAT: ApiStringFormat =
|
|||||||
pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
|
pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
|
||||||
ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
|
ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
|
||||||
|
|
||||||
pub const PROXMOX_USER_ID_FORMAT: ApiStringFormat =
|
|
||||||
ApiStringFormat::Pattern(&PROXMOX_USER_ID_REGEX);
|
|
||||||
|
|
||||||
pub const PROXMOX_GROUP_ID_FORMAT: ApiStringFormat =
|
|
||||||
ApiStringFormat::Pattern(&PROXMOX_GROUP_ID_REGEX);
|
|
||||||
|
|
||||||
pub const PASSWORD_FORMAT: ApiStringFormat =
|
pub const PASSWORD_FORMAT: ApiStringFormat =
|
||||||
ApiStringFormat::Pattern(&PASSWORD_REGEX);
|
ApiStringFormat::Pattern(&PASSWORD_REGEX);
|
||||||
|
|
||||||
@ -339,24 +331,6 @@ pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP addr
|
|||||||
.format(&DNS_NAME_OR_IP_FORMAT)
|
.format(&DNS_NAME_OR_IP_FORMAT)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = StringSchema::new("Authentication domain ID")
|
|
||||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
|
||||||
.min_length(3)
|
|
||||||
.max_length(32)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const PROXMOX_USER_ID_SCHEMA: Schema = StringSchema::new("User ID")
|
|
||||||
.format(&PROXMOX_USER_ID_FORMAT)
|
|
||||||
.min_length(3)
|
|
||||||
.max_length(64)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID")
|
|
||||||
.format(&PROXMOX_GROUP_ID_FORMAT)
|
|
||||||
.min_length(3)
|
|
||||||
.max_length(64)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
|
pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
|
||||||
.format(&BLOCKDEVICE_NAME_FORMAT)
|
.format(&BLOCKDEVICE_NAME_FORMAT)
|
||||||
.min_length(3)
|
.min_length(3)
|
||||||
@ -384,6 +358,10 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name
|
|||||||
schema: BACKUP_ARCHIVE_NAME_SCHEMA
|
schema: BACKUP_ARCHIVE_NAME_SCHEMA
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
owner: {
|
||||||
|
type: Userid,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
@ -399,7 +377,26 @@ pub struct GroupListItem {
|
|||||||
pub files: Vec<String>,
|
pub files: Vec<String>,
|
||||||
/// The owner of group
|
/// The owner of group
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub owner: Option<String>,
|
pub owner: Option<Userid>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
upid: {
|
||||||
|
schema: UPID_SCHEMA
|
||||||
|
},
|
||||||
|
state: {
|
||||||
|
type: String
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
/// Task properties.
|
||||||
|
pub struct SnapshotVerifyState {
|
||||||
|
/// UPID of the verify task
|
||||||
|
pub upid: UPID,
|
||||||
|
/// State of the verification. "failed" or "ok"
|
||||||
|
pub state: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -413,11 +410,23 @@ pub struct GroupListItem {
|
|||||||
"backup-time": {
|
"backup-time": {
|
||||||
schema: BACKUP_TIME_SCHEMA,
|
schema: BACKUP_TIME_SCHEMA,
|
||||||
},
|
},
|
||||||
|
comment: {
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
verification: {
|
||||||
|
type: SnapshotVerifyState,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
files: {
|
files: {
|
||||||
items: {
|
items: {
|
||||||
schema: BACKUP_ARCHIVE_NAME_SCHEMA
|
schema: BACKUP_ARCHIVE_NAME_SCHEMA
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
owner: {
|
||||||
|
type: Userid,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
@ -427,6 +436,12 @@ pub struct SnapshotListItem {
|
|||||||
pub backup_type: String, // enum
|
pub backup_type: String, // enum
|
||||||
pub backup_id: String,
|
pub backup_id: String,
|
||||||
pub backup_time: i64,
|
pub backup_time: i64,
|
||||||
|
/// The first line from manifest "notes"
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
/// The result of the last run verify task
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub verification: Option<SnapshotVerifyState>,
|
||||||
/// List of contained archive files.
|
/// List of contained archive files.
|
||||||
pub files: Vec<BackupContent>,
|
pub files: Vec<BackupContent>,
|
||||||
/// Overall snapshot size (sum of all archive sizes).
|
/// Overall snapshot size (sum of all archive sizes).
|
||||||
@ -434,7 +449,7 @@ pub struct SnapshotListItem {
|
|||||||
pub size: Option<u64>,
|
pub size: Option<u64>,
|
||||||
/// The owner of the snapshots group
|
/// The owner of the snapshots group
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub owner: Option<String>,
|
pub owner: Option<Userid>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -496,6 +511,10 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
|
|||||||
"filename": {
|
"filename": {
|
||||||
schema: BACKUP_ARCHIVE_NAME_SCHEMA,
|
schema: BACKUP_ARCHIVE_NAME_SCHEMA,
|
||||||
},
|
},
|
||||||
|
"crypt-mode": {
|
||||||
|
type: CryptMode,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
@ -503,9 +522,9 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
|
|||||||
/// Basic information about archive files inside a backup snapshot.
|
/// Basic information about archive files inside a backup snapshot.
|
||||||
pub struct BackupContent {
|
pub struct BackupContent {
|
||||||
pub filename: String,
|
pub filename: String,
|
||||||
/// Info if file is encrypted (or empty if we do not have that info)
|
/// Info if file is encrypted, signed, or neither.
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub encrypted: Option<bool>,
|
pub crypt_mode: Option<CryptMode>,
|
||||||
/// Archive size (from backup manifest).
|
/// Archive size (from backup manifest).
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub size: Option<u64>,
|
pub size: Option<u64>,
|
||||||
@ -573,7 +592,8 @@ pub struct StorageStatus {
|
|||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
"upid": { schema: UPID_SCHEMA },
|
upid: { schema: UPID_SCHEMA },
|
||||||
|
user: { type: Userid },
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
@ -593,7 +613,7 @@ pub struct TaskListItem {
|
|||||||
/// Worker ID (arbitrary ASCII string)
|
/// Worker ID (arbitrary ASCII string)
|
||||||
pub worker_id: Option<String>,
|
pub worker_id: Option<String>,
|
||||||
/// The user who started the task
|
/// The user who started the task
|
||||||
pub user: String,
|
pub user: Userid,
|
||||||
/// The task end time (Epoch)
|
/// The task end time (Epoch)
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub endtime: Option<i64>,
|
pub endtime: Option<i64>,
|
||||||
@ -606,7 +626,7 @@ impl From<crate::server::TaskListInfo> for TaskListItem {
|
|||||||
fn from(info: crate::server::TaskListInfo) -> Self {
|
fn from(info: crate::server::TaskListInfo) -> Self {
|
||||||
let (endtime, status) = info
|
let (endtime, status) = info
|
||||||
.state
|
.state
|
||||||
.map_or_else(|| (None, None), |(a,b)| (Some(a), Some(b)));
|
.map_or_else(|| (None, None), |a| (Some(a.endtime()), Some(a.to_string())));
|
||||||
|
|
||||||
TaskListItem {
|
TaskListItem {
|
||||||
upid: info.upid_str,
|
upid: info.upid_str,
|
||||||
@ -616,7 +636,7 @@ impl From<crate::server::TaskListInfo> for TaskListItem {
|
|||||||
starttime: info.upid.starttime,
|
starttime: info.upid.starttime,
|
||||||
worker_type: info.upid.worker_type,
|
worker_type: info.upid.worker_type,
|
||||||
worker_id: info.upid.worker_id,
|
worker_id: info.upid.worker_id,
|
||||||
user: info.upid.username,
|
user: info.upid.userid,
|
||||||
endtime,
|
endtime,
|
||||||
status,
|
status,
|
||||||
}
|
}
|
||||||
@ -882,9 +902,6 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
||||||
|
|
||||||
let schema = PROXMOX_USER_ID_SCHEMA;
|
|
||||||
|
|
||||||
let invalid_user_ids = [
|
let invalid_user_ids = [
|
||||||
"x", // too short
|
"x", // too short
|
||||||
"xx", // too short
|
"xx", // too short
|
||||||
@ -898,7 +915,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
|||||||
];
|
];
|
||||||
|
|
||||||
for name in invalid_user_ids.iter() {
|
for name in invalid_user_ids.iter() {
|
||||||
if let Ok(_) = parse_simple_value(name, &schema) {
|
if let Ok(_) = parse_simple_value(name, &Userid::API_SCHEMA) {
|
||||||
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
|
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -912,7 +929,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
|||||||
];
|
];
|
||||||
|
|
||||||
for name in valid_user_ids.iter() {
|
for name in valid_user_ids.iter() {
|
||||||
let v = match parse_simple_value(name, &schema) {
|
let v = match parse_simple_value(name, &Userid::API_SCHEMA) {
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
bail!("unable to parse userid '{}' - {}", name, err);
|
bail!("unable to parse userid '{}' - {}", name, err);
|
||||||
@ -954,3 +971,30 @@ pub enum RRDTimeFrameResolution {
|
|||||||
/// 1 week => last 490 days
|
/// 1 week => last 490 days
|
||||||
Year = 60*10080,
|
Year = 60*10080,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "PascalCase")]
|
||||||
|
/// Describes a package for which an update is available.
|
||||||
|
pub struct APTUpdateInfo {
|
||||||
|
/// Package name
|
||||||
|
pub package: String,
|
||||||
|
/// Package title
|
||||||
|
pub title: String,
|
||||||
|
/// Package architecture
|
||||||
|
pub arch: String,
|
||||||
|
/// Human readable package description
|
||||||
|
pub description: String,
|
||||||
|
/// New version to be updated to
|
||||||
|
pub version: String,
|
||||||
|
/// Old version currently installed
|
||||||
|
pub old_version: String,
|
||||||
|
/// Package origin
|
||||||
|
pub origin: String,
|
||||||
|
/// Package priority in human-readable form
|
||||||
|
pub priority: String,
|
||||||
|
/// Package section
|
||||||
|
pub section: String,
|
||||||
|
/// URL under which the package's changelog can be retrieved
|
||||||
|
pub change_log_url: String,
|
||||||
|
}
|
420
src/api2/types/userid.rs
Normal file
420
src/api2/types/userid.rs
Normal file
@ -0,0 +1,420 @@
|
|||||||
|
//! Types for user handling.
|
||||||
|
//!
|
||||||
|
//! We have [`Username`]s and [`Realm`]s. To uniquely identify a user, they must be combined into a [`Userid`].
|
||||||
|
//!
|
||||||
|
//! Since they're all string types, they're organized as follows:
|
||||||
|
//!
|
||||||
|
//! * [`Username`]: an owned user name. Internally a `String`.
|
||||||
|
//! * [`UsernameRef`]: a borrowed user name. Pairs with a `Username` the same way a `str` pairs
|
||||||
|
//! with `String`, meaning you can only make references to it.
|
||||||
|
//! * [`Realm`]: an owned realm (`String` equivalent).
|
||||||
|
//! * [`RealmRef`]: a borrowed realm (`str` equivalent).
|
||||||
|
//! * [`Userid`]: an owned user id (`"user@realm"`). Note that this does not have a separate
|
||||||
|
//! borrowed type.
|
||||||
|
//!
|
||||||
|
//! Note that `Username`s are not unique, therefore they do not implement `Eq` and cannot be
|
||||||
|
//! compared directly. If a direct comparison is really required, they can be compared as strings
|
||||||
|
//! via the `as_str()` method. [`Realm`]s and [`Userid`]s on the other hand can be compared with
|
||||||
|
//! each other, as in those two cases the comparison has meaning.
|
||||||
|
|
||||||
|
use std::borrow::Borrow;
|
||||||
|
use std::convert::TryFrom;
|
||||||
|
use std::fmt;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema};
|
||||||
|
use proxmox::const_regex;
|
||||||
|
|
||||||
|
// we only allow a limited set of characters
|
||||||
|
// colon is not allowed, because we store usernames in
|
||||||
|
// colon separated lists)!
|
||||||
|
// slash is not allowed because it is used as pve API delimiter
|
||||||
|
// also see "man useradd"
|
||||||
|
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
|
||||||
|
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
|
||||||
|
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
|
||||||
|
|
||||||
|
const_regex! {
|
||||||
|
pub PROXMOX_USER_NAME_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"$");
|
||||||
|
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$");
|
||||||
|
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const PROXMOX_USER_NAME_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&PROXMOX_USER_NAME_REGEX);
|
||||||
|
|
||||||
|
pub const PROXMOX_USER_ID_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&PROXMOX_USER_ID_REGEX);
|
||||||
|
|
||||||
|
pub const PROXMOX_GROUP_ID_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&PROXMOX_GROUP_ID_REGEX);
|
||||||
|
|
||||||
|
pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID")
|
||||||
|
.format(&PROXMOX_GROUP_ID_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PROXMOX_AUTH_REALM_STRING_SCHEMA: StringSchema =
|
||||||
|
StringSchema::new("Authentication domain ID")
|
||||||
|
.format(&super::PROXMOX_SAFE_ID_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(32);
|
||||||
|
pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = PROXMOX_AUTH_REALM_STRING_SCHEMA.schema();
|
||||||
|
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
type: String,
|
||||||
|
format: &PROXMOX_USER_NAME_FORMAT,
|
||||||
|
)]
|
||||||
|
/// The user name part of a user id.
|
||||||
|
///
|
||||||
|
/// This alone does NOT uniquely identify the user and therefore does not implement `Eq`. In order
|
||||||
|
/// to compare user names directly, they need to be explicitly compared as strings by calling
|
||||||
|
/// `.as_str()`.
|
||||||
|
///
|
||||||
|
/// ```compile_fail
|
||||||
|
/// fn test(a: Username, b: Username) -> bool {
|
||||||
|
/// a == b // illegal and does not compile
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
#[derive(Clone, Debug, Hash, Deserialize, Serialize)]
|
||||||
|
pub struct Username(String);
|
||||||
|
|
||||||
|
/// A reference to a user name part of a user id. This alone does NOT uniquely identify the user.
|
||||||
|
///
|
||||||
|
/// This is like a `str` to the `String` of a [`Username`].
|
||||||
|
#[derive(Debug, Hash)]
|
||||||
|
pub struct UsernameRef(str);
|
||||||
|
|
||||||
|
#[doc(hidden)]
|
||||||
|
/// ```compile_fail
|
||||||
|
/// let a: Username = unsafe { std::mem::zeroed() };
|
||||||
|
/// let b: Username = unsafe { std::mem::zeroed() };
|
||||||
|
/// let _ = <Username as PartialEq>::eq(&a, &b);
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// ```compile_fail
|
||||||
|
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||||
|
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||||
|
/// let _ = <&UsernameRef as PartialEq>::eq(a, b);
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// ```compile_fail
|
||||||
|
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||||
|
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||||
|
/// let _ = <&UsernameRef as PartialEq>::eq(&a, &b);
|
||||||
|
/// ```
|
||||||
|
struct _AssertNoEqImpl;
|
||||||
|
|
||||||
|
impl UsernameRef {
|
||||||
|
fn new(s: &str) -> &Self {
|
||||||
|
unsafe { &*(s as *const str as *const UsernameRef) }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn as_str(&self) -> &str {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::ops::Deref for Username {
|
||||||
|
type Target = UsernameRef;
|
||||||
|
|
||||||
|
fn deref(&self) -> &UsernameRef {
|
||||||
|
self.borrow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Borrow<UsernameRef> for Username {
|
||||||
|
fn borrow(&self) -> &UsernameRef {
|
||||||
|
UsernameRef::new(self.as_str())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<UsernameRef> for Username {
|
||||||
|
fn as_ref(&self) -> &UsernameRef {
|
||||||
|
UsernameRef::new(self.as_str())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ToOwned for UsernameRef {
|
||||||
|
type Owned = Username;
|
||||||
|
|
||||||
|
fn to_owned(&self) -> Self::Owned {
|
||||||
|
Username(self.0.to_owned())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<String> for Username {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn try_from(s: String) -> Result<Self, Error> {
|
||||||
|
if !PROXMOX_USER_NAME_REGEX.is_match(&s) {
|
||||||
|
bail!("invalid user name");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self(s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> TryFrom<&'a str> for &'a UsernameRef {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn try_from(s: &'a str) -> Result<&'a UsernameRef, Error> {
|
||||||
|
if !PROXMOX_USER_NAME_REGEX.is_match(s) {
|
||||||
|
bail!("invalid name in user id");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(UsernameRef::new(s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(schema: PROXMOX_AUTH_REALM_SCHEMA)]
|
||||||
|
/// An authentication realm.
|
||||||
|
#[derive(Clone, Debug, Eq, PartialEq, Hash, Deserialize, Serialize)]
|
||||||
|
pub struct Realm(String);
|
||||||
|
|
||||||
|
/// A reference to an authentication realm.
|
||||||
|
///
|
||||||
|
/// This is like a `str` to the `String` of a `Realm`.
|
||||||
|
#[derive(Debug, Hash, Eq, PartialEq)]
|
||||||
|
pub struct RealmRef(str);
|
||||||
|
|
||||||
|
impl RealmRef {
|
||||||
|
fn new(s: &str) -> &Self {
|
||||||
|
unsafe { &*(s as *const str as *const RealmRef) }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn as_str(&self) -> &str {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::ops::Deref for Realm {
|
||||||
|
type Target = RealmRef;
|
||||||
|
|
||||||
|
fn deref(&self) -> &RealmRef {
|
||||||
|
self.borrow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Borrow<RealmRef> for Realm {
|
||||||
|
fn borrow(&self) -> &RealmRef {
|
||||||
|
RealmRef::new(self.as_str())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<RealmRef> for Realm {
|
||||||
|
fn as_ref(&self) -> &RealmRef {
|
||||||
|
RealmRef::new(self.as_str())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ToOwned for RealmRef {
|
||||||
|
type Owned = Realm;
|
||||||
|
|
||||||
|
fn to_owned(&self) -> Self::Owned {
|
||||||
|
Realm(self.0.to_owned())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<String> for Realm {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn try_from(s: String) -> Result<Self, Error> {
|
||||||
|
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(&s)
|
||||||
|
.map_err(|_| format_err!("invalid realm"))?;
|
||||||
|
|
||||||
|
Ok(Self(s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> TryFrom<&'a str> for &'a RealmRef {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn try_from(s: &'a str) -> Result<&'a RealmRef, Error> {
|
||||||
|
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(s)
|
||||||
|
.map_err(|_| format_err!("invalid realm"))?;
|
||||||
|
|
||||||
|
Ok(RealmRef::new(s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq<str> for Realm {
|
||||||
|
fn eq(&self, rhs: &str) -> bool {
|
||||||
|
self.0 == rhs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq<&str> for Realm {
|
||||||
|
fn eq(&self, rhs: &&str) -> bool {
|
||||||
|
self.0 == *rhs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq<str> for RealmRef {
|
||||||
|
fn eq(&self, rhs: &str) -> bool {
|
||||||
|
self.0 == *rhs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq<&str> for RealmRef {
|
||||||
|
fn eq(&self, rhs: &&str) -> bool {
|
||||||
|
self.0 == **rhs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq<RealmRef> for Realm {
|
||||||
|
fn eq(&self, rhs: &RealmRef) -> bool {
|
||||||
|
self.0 == &rhs.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq<Realm> for RealmRef {
|
||||||
|
fn eq(&self, rhs: &Realm) -> bool {
|
||||||
|
self.0 == rhs.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq<Realm> for &RealmRef {
|
||||||
|
fn eq(&self, rhs: &Realm) -> bool {
|
||||||
|
(*self).0 == rhs.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A complete user id consting of a user name and a realm.
|
||||||
|
#[derive(Clone, Debug, Hash)]
|
||||||
|
pub struct Userid {
|
||||||
|
data: String,
|
||||||
|
name_len: usize,
|
||||||
|
//name: Username,
|
||||||
|
//realm: Realm,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Userid {
|
||||||
|
pub const API_SCHEMA: Schema = StringSchema::new("User ID")
|
||||||
|
.format(&PROXMOX_USER_ID_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
const fn new(data: String, name_len: usize) -> Self {
|
||||||
|
Self { data, name_len }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn name(&self) -> &UsernameRef {
|
||||||
|
UsernameRef::new(&self.data[..self.name_len])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn realm(&self) -> &RealmRef {
|
||||||
|
RealmRef::new(&self.data[(self.name_len + 1)..])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn as_str(&self) -> &str {
|
||||||
|
&self.data
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the "backup@pam" user id.
|
||||||
|
pub fn backup_userid() -> &'static Self {
|
||||||
|
&*BACKUP_USERID
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the "root@pam" user id.
|
||||||
|
pub fn root_userid() -> &'static Self {
|
||||||
|
&*ROOT_USERID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
pub static ref BACKUP_USERID: Userid = Userid::new("backup@pam".to_string(), 6);
|
||||||
|
pub static ref ROOT_USERID: Userid = Userid::new("root@pam".to_string(), 4);
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Eq for Userid {}
|
||||||
|
|
||||||
|
impl PartialEq for Userid {
|
||||||
|
fn eq(&self, rhs: &Self) -> bool {
|
||||||
|
self.data == rhs.data && self.name_len == rhs.name_len
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<(Username, Realm)> for Userid {
|
||||||
|
fn from(parts: (Username, Realm)) -> Self {
|
||||||
|
Self::from((parts.0.as_ref(), parts.1.as_ref()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<(&UsernameRef, &RealmRef)> for Userid {
|
||||||
|
fn from(parts: (&UsernameRef, &RealmRef)) -> Self {
|
||||||
|
let data = format!("{}@{}", parts.0.as_str(), parts.1.as_str());
|
||||||
|
let name_len = parts.0.as_str().len();
|
||||||
|
Self { data, name_len }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for Userid {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
self.data.fmt(f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::str::FromStr for Userid {
|
||||||
|
type Err = Error;
|
||||||
|
|
||||||
|
fn from_str(id: &str) -> Result<Self, Error> {
|
||||||
|
let (name, realm) = match id.as_bytes().iter().rposition(|&b| b == b'@') {
|
||||||
|
Some(pos) => (&id[..pos], &id[(pos + 1)..]),
|
||||||
|
None => bail!("not a valid user id"),
|
||||||
|
};
|
||||||
|
|
||||||
|
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(realm)
|
||||||
|
.map_err(|_| format_err!("invalid realm in user id"))?;
|
||||||
|
|
||||||
|
Ok(Self::from((UsernameRef::new(name), RealmRef::new(realm))))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<String> for Userid {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn try_from(data: String) -> Result<Self, Error> {
|
||||||
|
let name_len = data
|
||||||
|
.as_bytes()
|
||||||
|
.iter()
|
||||||
|
.rposition(|&b| b == b'@')
|
||||||
|
.ok_or_else(|| format_err!("not a valid user id"))?;
|
||||||
|
|
||||||
|
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(&data[(name_len + 1)..])
|
||||||
|
.map_err(|_| format_err!("invalid realm in user id"))?;
|
||||||
|
|
||||||
|
Ok(Self { data, name_len })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq<str> for Userid {
|
||||||
|
fn eq(&self, rhs: &str) -> bool {
|
||||||
|
rhs.len() > self.name_len + 2 // make sure range access below is allowed
|
||||||
|
&& rhs.starts_with(self.name().as_str())
|
||||||
|
&& rhs.as_bytes()[self.name_len] == b'@'
|
||||||
|
&& &rhs[(self.name_len + 1)..] == self.realm().as_str()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq<&str> for Userid {
|
||||||
|
fn eq(&self, rhs: &&str) -> bool {
|
||||||
|
*self == **rhs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq<String> for Userid {
|
||||||
|
fn eq(&self, rhs: &String) -> bool {
|
||||||
|
self == rhs.as_str()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
proxmox::forward_deserialize_to_from_str!(Userid);
|
||||||
|
proxmox::forward_serialize_to_display!(Userid);
|
67
src/auth.rs
67
src/auth.rs
@ -10,39 +10,54 @@ use base64;
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
|
|
||||||
|
use crate::api2::types::{Userid, UsernameRef, RealmRef};
|
||||||
|
|
||||||
pub trait ProxmoxAuthenticator {
|
pub trait ProxmoxAuthenticator {
|
||||||
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error>;
|
fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
|
||||||
fn store_password(&self, username: &str, password: &str) -> Result<(), Error>;
|
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct PAM();
|
pub struct PAM();
|
||||||
|
|
||||||
impl ProxmoxAuthenticator for PAM {
|
impl ProxmoxAuthenticator for PAM {
|
||||||
|
|
||||||
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error> {
|
fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
|
||||||
let mut auth = pam::Authenticator::with_password("proxmox-backup-auth").unwrap();
|
let mut auth = pam::Authenticator::with_password("proxmox-backup-auth").unwrap();
|
||||||
auth.get_handler().set_credentials(username, password);
|
auth.get_handler().set_credentials(username.as_str(), password);
|
||||||
auth.authenticate()?;
|
auth.authenticate()?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn store_password(&self, username: &str, password: &str) -> Result<(), Error> {
|
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
|
||||||
let mut child = Command::new("passwd")
|
let mut child = Command::new("passwd")
|
||||||
.arg(username)
|
.arg(username.as_str())
|
||||||
.stdin(Stdio::piped())
|
.stdin(Stdio::piped())
|
||||||
.stderr(Stdio::piped())
|
.stderr(Stdio::piped())
|
||||||
.spawn()
|
.spawn()
|
||||||
.or_else(|err| Err(format_err!("unable to set password for '{}' - execute passwd failed: {}", username, err)))?;
|
.map_err(|err| format_err!(
|
||||||
|
"unable to set password for '{}' - execute passwd failed: {}",
|
||||||
|
username.as_str(),
|
||||||
|
err,
|
||||||
|
))?;
|
||||||
|
|
||||||
// Note: passwd reads password twice from stdin (for verify)
|
// Note: passwd reads password twice from stdin (for verify)
|
||||||
writeln!(child.stdin.as_mut().unwrap(), "{}\n{}", password, password)?;
|
writeln!(child.stdin.as_mut().unwrap(), "{}\n{}", password, password)?;
|
||||||
|
|
||||||
let output = child.wait_with_output()
|
let output = child
|
||||||
.or_else(|err| Err(format_err!("unable to set password for '{}' - wait failed: {}", username, err)))?;
|
.wait_with_output()
|
||||||
|
.map_err(|err| format_err!(
|
||||||
|
"unable to set password for '{}' - wait failed: {}",
|
||||||
|
username.as_str(),
|
||||||
|
err,
|
||||||
|
))?;
|
||||||
|
|
||||||
if !output.status.success() {
|
if !output.status.success() {
|
||||||
bail!("unable to set password for '{}' - {}", username, String::from_utf8_lossy(&output.stderr));
|
bail!(
|
||||||
|
"unable to set password for '{}' - {}",
|
||||||
|
username.as_str(),
|
||||||
|
String::from_utf8_lossy(&output.stderr),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -90,23 +105,23 @@ pub fn verify_crypt_pw(password: &str, enc_password: &str) -> Result<(), Error>
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
const SHADOW_CONFIG_FILENAME: &str = "/etc/proxmox-backup/shadow.json";
|
const SHADOW_CONFIG_FILENAME: &str = configdir!("/shadow.json");
|
||||||
|
|
||||||
impl ProxmoxAuthenticator for PBS {
|
impl ProxmoxAuthenticator for PBS {
|
||||||
|
|
||||||
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error> {
|
fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
|
||||||
let data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
let data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
||||||
match data[username].as_str() {
|
match data[username.as_str()].as_str() {
|
||||||
None => bail!("no password set"),
|
None => bail!("no password set"),
|
||||||
Some(enc_password) => verify_crypt_pw(password, enc_password)?,
|
Some(enc_password) => verify_crypt_pw(password, enc_password)?,
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn store_password(&self, username: &str, password: &str) -> Result<(), Error> {
|
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
|
||||||
let enc_password = encrypt_pw(password)?;
|
let enc_password = encrypt_pw(password)?;
|
||||||
let mut data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
let mut data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
||||||
data[username] = enc_password.into();
|
data[username.as_str()] = enc_password.into();
|
||||||
|
|
||||||
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
|
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
|
||||||
let options = proxmox::tools::fs::CreateOptions::new()
|
let options = proxmox::tools::fs::CreateOptions::new()
|
||||||
@ -121,28 +136,18 @@ impl ProxmoxAuthenticator for PBS {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse_userid(userid: &str) -> Result<(String, String), Error> {
|
|
||||||
let data: Vec<&str> = userid.rsplitn(2, '@').collect();
|
|
||||||
|
|
||||||
if data.len() != 2 {
|
|
||||||
bail!("userid '{}' has no realm", userid);
|
|
||||||
}
|
|
||||||
Ok((data[1].to_owned(), data[0].to_owned()))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Lookup the autenticator for the specified realm
|
/// Lookup the autenticator for the specified realm
|
||||||
pub fn lookup_authenticator(realm: &str) -> Result<Box<dyn ProxmoxAuthenticator>, Error> {
|
pub fn lookup_authenticator(realm: &RealmRef) -> Result<Box<dyn ProxmoxAuthenticator>, Error> {
|
||||||
match realm {
|
match realm.as_str() {
|
||||||
"pam" => Ok(Box::new(PAM())),
|
"pam" => Ok(Box::new(PAM())),
|
||||||
"pbs" => Ok(Box::new(PBS())),
|
"pbs" => Ok(Box::new(PBS())),
|
||||||
_ => bail!("unknown realm '{}'", realm),
|
_ => bail!("unknown realm '{}'", realm.as_str()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Authenticate users
|
/// Authenticate users
|
||||||
pub fn authenticate_user(userid: &str, password: &str) -> Result<(), Error> {
|
pub fn authenticate_user(userid: &Userid, password: &str) -> Result<(), Error> {
|
||||||
let (username, realm) = parse_userid(userid)?;
|
|
||||||
|
|
||||||
lookup_authenticator(&realm)?
|
lookup_authenticator(userid.realm())?
|
||||||
.authenticate_user(&username, password)
|
.authenticate_user(userid.name(), password)
|
||||||
}
|
}
|
||||||
|
@ -10,16 +10,17 @@ use std::path::PathBuf;
|
|||||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||||
use proxmox::try_block;
|
use proxmox::try_block;
|
||||||
|
|
||||||
|
use crate::api2::types::Userid;
|
||||||
use crate::tools::epoch_now_u64;
|
use crate::tools::epoch_now_u64;
|
||||||
|
|
||||||
fn compute_csrf_secret_digest(
|
fn compute_csrf_secret_digest(
|
||||||
timestamp: i64,
|
timestamp: i64,
|
||||||
secret: &[u8],
|
secret: &[u8],
|
||||||
username: &str,
|
userid: &Userid,
|
||||||
) -> String {
|
) -> String {
|
||||||
|
|
||||||
let mut hasher = sha::Sha256::new();
|
let mut hasher = sha::Sha256::new();
|
||||||
let data = format!("{:08X}:{}:", timestamp, username);
|
let data = format!("{:08X}:{}:", timestamp, userid);
|
||||||
hasher.update(data.as_bytes());
|
hasher.update(data.as_bytes());
|
||||||
hasher.update(secret);
|
hasher.update(secret);
|
||||||
|
|
||||||
@ -28,19 +29,19 @@ fn compute_csrf_secret_digest(
|
|||||||
|
|
||||||
pub fn assemble_csrf_prevention_token(
|
pub fn assemble_csrf_prevention_token(
|
||||||
secret: &[u8],
|
secret: &[u8],
|
||||||
username: &str,
|
userid: &Userid,
|
||||||
) -> String {
|
) -> String {
|
||||||
|
|
||||||
let epoch = epoch_now_u64().unwrap() as i64;
|
let epoch = epoch_now_u64().unwrap() as i64;
|
||||||
|
|
||||||
let digest = compute_csrf_secret_digest(epoch, secret, username);
|
let digest = compute_csrf_secret_digest(epoch, secret, userid);
|
||||||
|
|
||||||
format!("{:08X}:{}", epoch, digest)
|
format!("{:08X}:{}", epoch, digest)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn verify_csrf_prevention_token(
|
pub fn verify_csrf_prevention_token(
|
||||||
secret: &[u8],
|
secret: &[u8],
|
||||||
username: &str,
|
userid: &Userid,
|
||||||
token: &str,
|
token: &str,
|
||||||
min_age: i64,
|
min_age: i64,
|
||||||
max_age: i64,
|
max_age: i64,
|
||||||
@ -62,7 +63,7 @@ pub fn verify_csrf_prevention_token(
|
|||||||
let ttime = i64::from_str_radix(timestamp, 16).
|
let ttime = i64::from_str_radix(timestamp, 16).
|
||||||
map_err(|err| format_err!("timestamp format error - {}", err))?;
|
map_err(|err| format_err!("timestamp format error - {}", err))?;
|
||||||
|
|
||||||
let digest = compute_csrf_secret_digest(ttime, secret, username);
|
let digest = compute_csrf_secret_digest(ttime, secret, userid);
|
||||||
|
|
||||||
if digest != sig {
|
if digest != sig {
|
||||||
bail!("invalid signature.");
|
bail!("invalid signature.");
|
||||||
|
@ -40,21 +40,21 @@
|
|||||||
//!
|
//!
|
||||||
//! Acquire shared lock for ChunkStore (process wide).
|
//! Acquire shared lock for ChunkStore (process wide).
|
||||||
//!
|
//!
|
||||||
//! Note: When creating .idx files, we create temporary (.tmp) file,
|
//! Note: When creating .idx files, we create temporary a (.tmp) file,
|
||||||
//! then do an atomic rename ...
|
//! then do an atomic rename ...
|
||||||
//!
|
//!
|
||||||
//!
|
//!
|
||||||
//! * Garbage Collect:
|
//! * Garbage Collect:
|
||||||
//!
|
//!
|
||||||
//! Acquire exclusive lock for ChunkStore (process wide). If we have
|
//! Acquire exclusive lock for ChunkStore (process wide). If we have
|
||||||
//! already an shared lock for ChunkStore, try to updraged that
|
//! already a shared lock for the ChunkStore, try to upgrade that
|
||||||
//! lock.
|
//! lock.
|
||||||
//!
|
//!
|
||||||
//!
|
//!
|
||||||
//! * Server Restart
|
//! * Server Restart
|
||||||
//!
|
//!
|
||||||
//! Try to abort running garbage collection to release exclusive
|
//! Try to abort the running garbage collection to release exclusive
|
||||||
//! ChunkStore lock asap. Start new service with existing listening
|
//! ChunkStore locks ASAP. Start the new service with the existing listening
|
||||||
//! socket.
|
//! socket.
|
||||||
//!
|
//!
|
||||||
//!
|
//!
|
||||||
@ -62,10 +62,10 @@
|
|||||||
//!
|
//!
|
||||||
//! Deleting backups is as easy as deleting the corresponding .idx
|
//! Deleting backups is as easy as deleting the corresponding .idx
|
||||||
//! files. Unfortunately, this does not free up any storage, because
|
//! files. Unfortunately, this does not free up any storage, because
|
||||||
//! those files just contains references to chunks.
|
//! those files just contain references to chunks.
|
||||||
//!
|
//!
|
||||||
//! To free up some storage, we run a garbage collection process at
|
//! To free up some storage, we run a garbage collection process at
|
||||||
//! regular intervals. The collector uses an mark and sweep
|
//! regular intervals. The collector uses a mark and sweep
|
||||||
//! approach. In the first phase, it scans all .idx files to mark used
|
//! approach. In the first phase, it scans all .idx files to mark used
|
||||||
//! chunks. The second phase then removes all unmarked chunks from the
|
//! chunks. The second phase then removes all unmarked chunks from the
|
||||||
//! store.
|
//! store.
|
||||||
@ -90,12 +90,12 @@
|
|||||||
//! amount of time ago (by default 24h). So we may only delete chunks
|
//! amount of time ago (by default 24h). So we may only delete chunks
|
||||||
//! with `atime` older than 24 hours.
|
//! with `atime` older than 24 hours.
|
||||||
//!
|
//!
|
||||||
//! Another problem arise from running backups. The mark phase does
|
//! Another problem arises from running backups. The mark phase does
|
||||||
//! not find any chunks from those backups, because there is no .idx
|
//! not find any chunks from those backups, because there is no .idx
|
||||||
//! file for them (created after the backup). Chunks created or
|
//! file for them (created after the backup). Chunks created or
|
||||||
//! touched by those backups may have an `atime` as old as the start
|
//! touched by those backups may have an `atime` as old as the start
|
||||||
//! time of those backup. Please not that the backup start time may
|
//! time of those backups. Please note that the backup start time may
|
||||||
//! predate the GC start time. Se we may only delete chunk older than
|
//! predate the GC start time. So we may only delete chunks older than
|
||||||
//! the start time of those running backup jobs.
|
//! the start time of those running backup jobs.
|
||||||
//!
|
//!
|
||||||
//!
|
//!
|
||||||
@ -120,6 +120,8 @@ macro_rules! PROXMOX_BACKUP_READER_PROTOCOL_ID_V1 {
|
|||||||
|
|
||||||
/// Unix system user used by proxmox-backup-proxy
|
/// Unix system user used by proxmox-backup-proxy
|
||||||
pub const BACKUP_USER_NAME: &str = "backup";
|
pub const BACKUP_USER_NAME: &str = "backup";
|
||||||
|
/// Unix system group used by proxmox-backup-proxy
|
||||||
|
pub const BACKUP_GROUP_NAME: &str = "backup";
|
||||||
|
|
||||||
/// Return User info for the 'backup' user (``getpwnam_r(3)``)
|
/// Return User info for the 'backup' user (``getpwnam_r(3)``)
|
||||||
pub fn backup_user() -> Result<nix::unistd::User, Error> {
|
pub fn backup_user() -> Result<nix::unistd::User, Error> {
|
||||||
@ -129,6 +131,14 @@ pub fn backup_user() -> Result<nix::unistd::User, Error> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return Group info for the 'backup' group (``getgrnam(3)``)
|
||||||
|
pub fn backup_group() -> Result<nix::unistd::Group, Error> {
|
||||||
|
match nix::unistd::Group::from_name(BACKUP_GROUP_NAME)? {
|
||||||
|
Some(group) => Ok(group),
|
||||||
|
None => bail!("Unable to lookup backup user."),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
mod file_formats;
|
mod file_formats;
|
||||||
pub use file_formats::*;
|
pub use file_formats::*;
|
||||||
|
|
||||||
|
@ -1,30 +1,35 @@
|
|||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::task::{Poll, Context};
|
use std::task::{Poll, Context};
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
|
use std::io::SeekFrom;
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
use futures::future::FutureExt;
|
use futures::future::FutureExt;
|
||||||
use futures::ready;
|
use futures::ready;
|
||||||
use tokio::io::AsyncRead;
|
use tokio::io::{AsyncRead, AsyncSeek};
|
||||||
|
|
||||||
use proxmox::sys::error::io_err_other;
|
use proxmox::sys::error::io_err_other;
|
||||||
use proxmox::io_format_err;
|
use proxmox::io_format_err;
|
||||||
|
|
||||||
use super::IndexFile;
|
use super::IndexFile;
|
||||||
use super::read_chunk::AsyncReadChunk;
|
use super::read_chunk::AsyncReadChunk;
|
||||||
|
use super::index::ChunkReadInfo;
|
||||||
|
|
||||||
enum AsyncIndexReaderState<S> {
|
enum AsyncIndexReaderState<S> {
|
||||||
NoData,
|
NoData,
|
||||||
WaitForData(Pin<Box<dyn Future<Output = Result<(S, Vec<u8>), Error>> + Send + 'static>>),
|
WaitForData(Pin<Box<dyn Future<Output = Result<(S, Vec<u8>), Error>> + Send + 'static>>),
|
||||||
HaveData(usize),
|
HaveData,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct AsyncIndexReader<S, I: IndexFile> {
|
pub struct AsyncIndexReader<S, I: IndexFile> {
|
||||||
store: Option<S>,
|
store: Option<S>,
|
||||||
index: I,
|
index: I,
|
||||||
read_buffer: Vec<u8>,
|
read_buffer: Vec<u8>,
|
||||||
|
current_chunk_offset: u64,
|
||||||
current_chunk_idx: usize,
|
current_chunk_idx: usize,
|
||||||
current_chunk_digest: [u8; 32],
|
current_chunk_info: Option<ChunkReadInfo>,
|
||||||
|
position: u64,
|
||||||
|
seek_to_pos: i64,
|
||||||
state: AsyncIndexReaderState<S>,
|
state: AsyncIndexReaderState<S>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -36,17 +41,21 @@ impl<S: AsyncReadChunk, I: IndexFile> AsyncIndexReader<S, I> {
|
|||||||
Self {
|
Self {
|
||||||
store: Some(store),
|
store: Some(store),
|
||||||
index,
|
index,
|
||||||
read_buffer: Vec::with_capacity(1024*1024),
|
read_buffer: Vec::with_capacity(1024 * 1024),
|
||||||
|
current_chunk_offset: 0,
|
||||||
current_chunk_idx: 0,
|
current_chunk_idx: 0,
|
||||||
current_chunk_digest: [0u8; 32],
|
current_chunk_info: None,
|
||||||
|
position: 0,
|
||||||
|
seek_to_pos: 0,
|
||||||
state: AsyncIndexReaderState::NoData,
|
state: AsyncIndexReaderState::NoData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S, I> AsyncRead for AsyncIndexReader<S, I> where
|
impl<S, I> AsyncRead for AsyncIndexReader<S, I>
|
||||||
S: AsyncReadChunk + Unpin + Sync + 'static,
|
where
|
||||||
I: IndexFile + Unpin
|
S: AsyncReadChunk + Unpin + Sync + 'static,
|
||||||
|
I: IndexFile + Unpin,
|
||||||
{
|
{
|
||||||
fn poll_read(
|
fn poll_read(
|
||||||
self: Pin<&mut Self>,
|
self: Pin<&mut Self>,
|
||||||
@ -57,53 +66,71 @@ I: IndexFile + Unpin
|
|||||||
loop {
|
loop {
|
||||||
match &mut this.state {
|
match &mut this.state {
|
||||||
AsyncIndexReaderState::NoData => {
|
AsyncIndexReaderState::NoData => {
|
||||||
if this.current_chunk_idx >= this.index.index_count() {
|
let (idx, offset) = if this.current_chunk_info.is_some() &&
|
||||||
|
this.position == this.current_chunk_info.as_ref().unwrap().range.end
|
||||||
|
{
|
||||||
|
// optimization for sequential chunk read
|
||||||
|
let next_idx = this.current_chunk_idx + 1;
|
||||||
|
(next_idx, 0)
|
||||||
|
} else {
|
||||||
|
match this.index.chunk_from_offset(this.position) {
|
||||||
|
Some(res) => res,
|
||||||
|
None => return Poll::Ready(Ok(0))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if idx >= this.index.index_count() {
|
||||||
return Poll::Ready(Ok(0));
|
return Poll::Ready(Ok(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
let digest = this
|
let info = this
|
||||||
.index
|
.index
|
||||||
.index_digest(this.current_chunk_idx)
|
.chunk_info(idx)
|
||||||
.ok_or(io_format_err!("could not get digest"))?
|
.ok_or(io_format_err!("could not get digest"))?;
|
||||||
.clone();
|
|
||||||
|
|
||||||
if digest == this.current_chunk_digest {
|
this.current_chunk_offset = offset;
|
||||||
this.state = AsyncIndexReaderState::HaveData(0);
|
this.current_chunk_idx = idx;
|
||||||
continue;
|
let old_info = this.current_chunk_info.replace(info.clone());
|
||||||
|
|
||||||
|
if let Some(old_info) = old_info {
|
||||||
|
if old_info.digest == info.digest {
|
||||||
|
// hit, chunk is currently in cache
|
||||||
|
this.state = AsyncIndexReaderState::HaveData;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
this.current_chunk_digest = digest;
|
// miss, need to download new chunk
|
||||||
|
|
||||||
let store = match this.store.take() {
|
let store = match this.store.take() {
|
||||||
Some(store) => store,
|
Some(store) => store,
|
||||||
None => {
|
None => {
|
||||||
return Poll::Ready(Err(io_format_err!("could not find store")));
|
return Poll::Ready(Err(io_format_err!("could not find store")));
|
||||||
},
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let future = async move {
|
let future = async move {
|
||||||
store.read_chunk(&digest)
|
store.read_chunk(&info.digest)
|
||||||
.await
|
.await
|
||||||
.map(move |x| (store, x))
|
.map(move |x| (store, x))
|
||||||
};
|
};
|
||||||
|
|
||||||
this.state = AsyncIndexReaderState::WaitForData(future.boxed());
|
this.state = AsyncIndexReaderState::WaitForData(future.boxed());
|
||||||
},
|
}
|
||||||
AsyncIndexReaderState::WaitForData(ref mut future) => {
|
AsyncIndexReaderState::WaitForData(ref mut future) => {
|
||||||
match ready!(future.as_mut().poll(cx)) {
|
match ready!(future.as_mut().poll(cx)) {
|
||||||
Ok((store, mut chunk_data)) => {
|
Ok((store, mut chunk_data)) => {
|
||||||
this.read_buffer.clear();
|
this.read_buffer.clear();
|
||||||
this.read_buffer.append(&mut chunk_data);
|
this.read_buffer.append(&mut chunk_data);
|
||||||
this.state = AsyncIndexReaderState::HaveData(0);
|
this.state = AsyncIndexReaderState::HaveData;
|
||||||
this.store = Some(store);
|
this.store = Some(store);
|
||||||
},
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
return Poll::Ready(Err(io_err_other(err)));
|
return Poll::Ready(Err(io_err_other(err)));
|
||||||
},
|
}
|
||||||
};
|
};
|
||||||
},
|
}
|
||||||
AsyncIndexReaderState::HaveData(offset) => {
|
AsyncIndexReaderState::HaveData => {
|
||||||
let offset = *offset;
|
let offset = this.current_chunk_offset as usize;
|
||||||
let len = this.read_buffer.len();
|
let len = this.read_buffer.len();
|
||||||
let n = if len - offset < buf.len() {
|
let n = if len - offset < buf.len() {
|
||||||
len - offset
|
len - offset
|
||||||
@ -111,17 +138,67 @@ I: IndexFile + Unpin
|
|||||||
buf.len()
|
buf.len()
|
||||||
};
|
};
|
||||||
|
|
||||||
buf[0..n].copy_from_slice(&this.read_buffer[offset..offset+n]);
|
buf[0..n].copy_from_slice(&this.read_buffer[offset..(offset + n)]);
|
||||||
|
this.position += n as u64;
|
||||||
|
|
||||||
if offset + n == len {
|
if offset + n == len {
|
||||||
this.state = AsyncIndexReaderState::NoData;
|
this.state = AsyncIndexReaderState::NoData;
|
||||||
this.current_chunk_idx += 1;
|
|
||||||
} else {
|
} else {
|
||||||
this.state = AsyncIndexReaderState::HaveData(offset + n);
|
this.current_chunk_offset += n as u64;
|
||||||
|
this.state = AsyncIndexReaderState::HaveData;
|
||||||
}
|
}
|
||||||
|
|
||||||
return Poll::Ready(Ok(n));
|
return Poll::Ready(Ok(n));
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<S, I> AsyncSeek for AsyncIndexReader<S, I>
|
||||||
|
where
|
||||||
|
S: AsyncReadChunk + Unpin + Sync + 'static,
|
||||||
|
I: IndexFile + Unpin,
|
||||||
|
{
|
||||||
|
fn start_seek(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
_cx: &mut Context<'_>,
|
||||||
|
pos: SeekFrom,
|
||||||
|
) -> Poll<tokio::io::Result<()>> {
|
||||||
|
let this = Pin::get_mut(self);
|
||||||
|
this.seek_to_pos = match pos {
|
||||||
|
SeekFrom::Start(offset) => {
|
||||||
|
offset as i64
|
||||||
|
},
|
||||||
|
SeekFrom::End(offset) => {
|
||||||
|
this.index.index_bytes() as i64 + offset
|
||||||
|
},
|
||||||
|
SeekFrom::Current(offset) => {
|
||||||
|
this.position as i64 + offset
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_complete(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
_cx: &mut Context<'_>,
|
||||||
|
) -> Poll<tokio::io::Result<u64>> {
|
||||||
|
let this = Pin::get_mut(self);
|
||||||
|
|
||||||
|
let index_bytes = this.index.index_bytes();
|
||||||
|
if this.seek_to_pos < 0 {
|
||||||
|
return Poll::Ready(Err(io_format_err!("cannot seek to negative values")));
|
||||||
|
} else if this.seek_to_pos > index_bytes as i64 {
|
||||||
|
this.position = index_bytes;
|
||||||
|
} else {
|
||||||
|
this.position = this.seek_to_pos as u64;
|
||||||
|
}
|
||||||
|
|
||||||
|
// even if seeking within one chunk, we need to go to NoData to
|
||||||
|
// recalculate the current_chunk_offset (data is cached anyway)
|
||||||
|
this.state = AsyncIndexReaderState::NoData;
|
||||||
|
|
||||||
|
Poll::Ready(Ok(this.position))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -45,6 +45,31 @@ pub struct BackupGroup {
|
|||||||
backup_id: String,
|
backup_id: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl std::cmp::Ord for BackupGroup {
|
||||||
|
|
||||||
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||||
|
let type_order = self.backup_type.cmp(&other.backup_type);
|
||||||
|
if type_order != std::cmp::Ordering::Equal {
|
||||||
|
return type_order;
|
||||||
|
}
|
||||||
|
// try to compare IDs numerically
|
||||||
|
let id_self = self.backup_id.parse::<u64>();
|
||||||
|
let id_other = other.backup_id.parse::<u64>();
|
||||||
|
match (id_self, id_other) {
|
||||||
|
(Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other),
|
||||||
|
(Ok(_), Err(_)) => std::cmp::Ordering::Less,
|
||||||
|
(Err(_), Ok(_)) => std::cmp::Ordering::Greater,
|
||||||
|
_ => self.backup_id.cmp(&other.backup_id),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::cmp::PartialOrd for BackupGroup {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||||
|
Some(self.cmp(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl BackupGroup {
|
impl BackupGroup {
|
||||||
|
|
||||||
pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
|
pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
|
||||||
@ -106,7 +131,11 @@ impl BackupGroup {
|
|||||||
|
|
||||||
use nix::fcntl::{openat, OFlag};
|
use nix::fcntl::{openat, OFlag};
|
||||||
match openat(l2_fd, &manifest_path, OFlag::O_RDONLY, nix::sys::stat::Mode::empty()) {
|
match openat(l2_fd, &manifest_path, OFlag::O_RDONLY, nix::sys::stat::Mode::empty()) {
|
||||||
Ok(_) => { /* manifest exists --> assume backup was successful */ },
|
Ok(rawfd) => {
|
||||||
|
/* manifest exists --> assume backup was successful */
|
||||||
|
/* close else this leaks! */
|
||||||
|
nix::unistd::close(rawfd)?;
|
||||||
|
},
|
||||||
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => { return Ok(()); }
|
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => { return Ok(()); }
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
bail!("last_successful_backup: unexpected error - {}", err);
|
bail!("last_successful_backup: unexpected error - {}", err);
|
||||||
@ -169,7 +198,7 @@ impl std::str::FromStr for BackupGroup {
|
|||||||
/// Uniquely identify a Backup (relative to data store)
|
/// Uniquely identify a Backup (relative to data store)
|
||||||
///
|
///
|
||||||
/// We also call this a backup snaphost.
|
/// We also call this a backup snaphost.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||||
pub struct BackupDir {
|
pub struct BackupDir {
|
||||||
/// Backup group
|
/// Backup group
|
||||||
group: BackupGroup,
|
group: BackupGroup,
|
||||||
@ -268,9 +297,13 @@ impl BackupInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Finds the latest backup inside a backup group
|
/// Finds the latest backup inside a backup group
|
||||||
pub fn last_backup(base_path: &Path, group: &BackupGroup) -> Result<Option<BackupInfo>, Error> {
|
pub fn last_backup(base_path: &Path, group: &BackupGroup, only_finished: bool)
|
||||||
|
-> Result<Option<BackupInfo>, Error>
|
||||||
|
{
|
||||||
let backups = group.list_backups(base_path)?;
|
let backups = group.list_backups(base_path)?;
|
||||||
Ok(backups.into_iter().max_by_key(|item| item.backup_dir.backup_time()))
|
Ok(backups.into_iter()
|
||||||
|
.filter(|item| !only_finished || item.is_finished())
|
||||||
|
.max_by_key(|item| item.backup_dir.backup_time()))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
|
pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
|
||||||
@ -313,6 +346,11 @@ impl BackupInfo {
|
|||||||
})?;
|
})?;
|
||||||
Ok(list)
|
Ok(list)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_finished(&self) -> bool {
|
||||||
|
// backup is considered unfinished if there is no manifest
|
||||||
|
self.files.iter().any(|name| name == super::MANIFEST_BLOB_NAME)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn list_backup_files<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> Result<Vec<String>, Error> {
|
fn list_backup_files<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> Result<Vec<String>, Error> {
|
||||||
|
@ -3,7 +3,7 @@ use std::ffi::{CStr, CString, OsStr, OsString};
|
|||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::os::unix::ffi::OsStrExt;
|
use std::os::unix::ffi::{OsStrExt, OsStringExt};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
|
|
||||||
@ -89,6 +89,10 @@ pub fn catalog_shell_cli() -> CommandLineInterface {
|
|||||||
"find",
|
"find",
|
||||||
CliCommand::new(&API_METHOD_FIND_COMMAND).arg_param(&["pattern"]),
|
CliCommand::new(&API_METHOD_FIND_COMMAND).arg_param(&["pattern"]),
|
||||||
)
|
)
|
||||||
|
.insert(
|
||||||
|
"exit",
|
||||||
|
CliCommand::new(&API_METHOD_EXIT),
|
||||||
|
)
|
||||||
.insert_help(),
|
.insert_help(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -104,6 +108,14 @@ fn complete_path(complete_me: &str, _map: &HashMap<String, String>) -> Vec<Strin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// just an empty wrapper so that it is displayed in help/docs, we check
|
||||||
|
// in the readloop for 'exit' again break
|
||||||
|
#[api(input: { properties: {} })]
|
||||||
|
/// Exit the shell
|
||||||
|
async fn exit() -> Result<(), Error> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[api(input: { properties: {} })]
|
#[api(input: { properties: {} })]
|
||||||
/// List the current working directory.
|
/// List the current working directory.
|
||||||
async fn pwd_command() -> Result<(), Error> {
|
async fn pwd_command() -> Result<(), Error> {
|
||||||
@ -439,6 +451,9 @@ impl Shell {
|
|||||||
SHELL = Some(this as *mut Shell as usize);
|
SHELL = Some(this as *mut Shell as usize);
|
||||||
}
|
}
|
||||||
while let Ok(line) = this.rl.readline(&this.prompt) {
|
while let Ok(line) = this.rl.readline(&this.prompt) {
|
||||||
|
if line == "exit" {
|
||||||
|
break;
|
||||||
|
}
|
||||||
let helper = this.rl.helper().unwrap();
|
let helper = this.rl.helper().unwrap();
|
||||||
let args = match cli::shellword_split(&line) {
|
let args = match cli::shellword_split(&line) {
|
||||||
Ok(args) => args,
|
Ok(args) => args,
|
||||||
@ -1058,6 +1073,7 @@ impl<'a> ExtractorState<'a> {
|
|||||||
}
|
}
|
||||||
self.path.extend(&entry.name);
|
self.path.extend(&entry.name);
|
||||||
|
|
||||||
|
self.extractor.set_path(OsString::from_vec(self.path.clone()));
|
||||||
self.handle_entry(entry).await?;
|
self.handle_entry(entry).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,8 +80,9 @@ impl ChunkStore {
|
|||||||
|
|
||||||
let default_options = CreateOptions::new();
|
let default_options = CreateOptions::new();
|
||||||
|
|
||||||
if let Err(err) = create_path(&base, Some(default_options.clone()), Some(options.clone())) {
|
match create_path(&base, Some(default_options.clone()), Some(options.clone())) {
|
||||||
bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err);
|
Err(err) => bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err),
|
||||||
|
Ok(res) => if ! res { nix::unistd::chown(&base, Some(uid), Some(gid))? },
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(err) = create_dir(&chunk_dir, options.clone()) {
|
if let Err(err) = create_dir(&chunk_dir, options.clone()) {
|
||||||
@ -103,7 +104,7 @@ impl ChunkStore {
|
|||||||
}
|
}
|
||||||
let percentage = (i*100)/(64*1024);
|
let percentage = (i*100)/(64*1024);
|
||||||
if percentage != last_percentage {
|
if percentage != last_percentage {
|
||||||
eprintln!("Percentage done: {}", percentage);
|
eprintln!("{}%", percentage);
|
||||||
last_percentage = percentage;
|
last_percentage = percentage;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -177,28 +178,12 @@ impl ChunkStore {
|
|||||||
return Ok(false);
|
return Ok(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
bail!("updata atime failed for chunk {:?} - {}", chunk_path, err);
|
bail!("update atime failed for chunk {:?} - {}", chunk_path, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn read_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
|
||||||
|
|
||||||
let (chunk_path, digest_str) = self.chunk_path(digest);
|
|
||||||
let mut file = std::fs::File::open(&chunk_path)
|
|
||||||
.map_err(|err| {
|
|
||||||
format_err!(
|
|
||||||
"store '{}', unable to read chunk '{}' - {}",
|
|
||||||
self.name,
|
|
||||||
digest_str,
|
|
||||||
err,
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
DataBlob::load(&mut file)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_chunk_iterator(
|
pub fn get_chunk_iterator(
|
||||||
&self,
|
&self,
|
||||||
) -> Result<
|
) -> Result<
|
||||||
@ -290,14 +275,13 @@ impl ChunkStore {
|
|||||||
pub fn sweep_unused_chunks(
|
pub fn sweep_unused_chunks(
|
||||||
&self,
|
&self,
|
||||||
oldest_writer: i64,
|
oldest_writer: i64,
|
||||||
|
phase1_start_time: i64,
|
||||||
status: &mut GarbageCollectionStatus,
|
status: &mut GarbageCollectionStatus,
|
||||||
worker: &WorkerTask,
|
worker: &WorkerTask,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
use nix::sys::stat::fstatat;
|
use nix::sys::stat::fstatat;
|
||||||
|
|
||||||
let now = unsafe { libc::time(std::ptr::null_mut()) };
|
let mut min_atime = phase1_start_time - 3600*24; // at least 24h (see mount option relatime)
|
||||||
|
|
||||||
let mut min_atime = now - 3600*24; // at least 24h (see mount option relatime)
|
|
||||||
|
|
||||||
if oldest_writer < min_atime {
|
if oldest_writer < min_atime {
|
||||||
min_atime = oldest_writer;
|
min_atime = oldest_writer;
|
||||||
@ -311,7 +295,7 @@ impl ChunkStore {
|
|||||||
for (entry, percentage) in self.get_chunk_iterator()? {
|
for (entry, percentage) in self.get_chunk_iterator()? {
|
||||||
if last_percentage != percentage {
|
if last_percentage != percentage {
|
||||||
last_percentage = percentage;
|
last_percentage = percentage;
|
||||||
worker.log(format!("percentage done: {}, chunk count: {}", percentage, chunk_count));
|
worker.log(format!("{}%, processed {} chunks", percentage, chunk_count));
|
||||||
}
|
}
|
||||||
|
|
||||||
worker.fail_on_abort()?;
|
worker.fail_on_abort()?;
|
||||||
|
@ -5,15 +5,15 @@
|
|||||||
/// use hash value 0 to detect a boundary.
|
/// use hash value 0 to detect a boundary.
|
||||||
const CA_CHUNKER_WINDOW_SIZE: usize = 64;
|
const CA_CHUNKER_WINDOW_SIZE: usize = 64;
|
||||||
|
|
||||||
/// Slinding window chunker (Buzhash)
|
/// Sliding window chunker (Buzhash)
|
||||||
///
|
///
|
||||||
/// This is a rewrite of *casync* chunker (cachunker.h) in rust.
|
/// This is a rewrite of *casync* chunker (cachunker.h) in rust.
|
||||||
///
|
///
|
||||||
/// Hashing by cyclic polynomial (also called Buzhash) has the benefit
|
/// Hashing by cyclic polynomial (also called Buzhash) has the benefit
|
||||||
/// of avoiding multiplications, using barrel shifts instead. For more
|
/// of avoiding multiplications, using barrel shifts instead. For more
|
||||||
/// information please take a look at the [Rolling
|
/// information please take a look at the [Rolling
|
||||||
/// Hash](https://en.wikipedia.org/wiki/Rolling_hash) artikel from
|
/// Hash](https://en.wikipedia.org/wiki/Rolling_hash) article from
|
||||||
/// wikipedia.
|
/// Wikipedia.
|
||||||
|
|
||||||
pub struct Chunker {
|
pub struct Chunker {
|
||||||
h: u32,
|
h: u32,
|
||||||
|
@ -6,12 +6,30 @@
|
|||||||
//! See the Wikipedia Artikel for [Authenticated
|
//! See the Wikipedia Artikel for [Authenticated
|
||||||
//! encryption](https://en.wikipedia.org/wiki/Authenticated_encryption)
|
//! encryption](https://en.wikipedia.org/wiki/Authenticated_encryption)
|
||||||
//! for a short introduction.
|
//! for a short introduction.
|
||||||
use anyhow::{bail, Error};
|
|
||||||
use openssl::pkcs5::pbkdf2_hmac;
|
|
||||||
use openssl::hash::MessageDigest;
|
|
||||||
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
|
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
|
use anyhow::{bail, Error};
|
||||||
use chrono::{Local, TimeZone, DateTime};
|
use chrono::{Local, TimeZone, DateTime};
|
||||||
|
use openssl::hash::MessageDigest;
|
||||||
|
use openssl::pkcs5::pbkdf2_hmac;
|
||||||
|
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
|
#[api(default: "encrypt")]
|
||||||
|
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Defines whether data is encrypted (using an AEAD cipher), only signed, or neither.
|
||||||
|
pub enum CryptMode {
|
||||||
|
/// Don't encrypt.
|
||||||
|
None,
|
||||||
|
/// Encrypt.
|
||||||
|
Encrypt,
|
||||||
|
/// Only sign.
|
||||||
|
SignOnly,
|
||||||
|
}
|
||||||
|
|
||||||
/// Encryption Configuration with secret key
|
/// Encryption Configuration with secret key
|
||||||
///
|
///
|
||||||
@ -26,7 +44,6 @@ pub struct CryptConfig {
|
|||||||
id_pkey: openssl::pkey::PKey<openssl::pkey::Private>,
|
id_pkey: openssl::pkey::PKey<openssl::pkey::Private>,
|
||||||
// The private key used by the cipher.
|
// The private key used by the cipher.
|
||||||
enc_key: [u8; 32],
|
enc_key: [u8; 32],
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CryptConfig {
|
impl CryptConfig {
|
||||||
@ -63,10 +80,9 @@ impl CryptConfig {
|
|||||||
/// chunk digest values do not clash with values computed for
|
/// chunk digest values do not clash with values computed for
|
||||||
/// other sectret keys.
|
/// other sectret keys.
|
||||||
pub fn compute_digest(&self, data: &[u8]) -> [u8; 32] {
|
pub fn compute_digest(&self, data: &[u8]) -> [u8; 32] {
|
||||||
// FIXME: use HMAC-SHA256 instead??
|
|
||||||
let mut hasher = openssl::sha::Sha256::new();
|
let mut hasher = openssl::sha::Sha256::new();
|
||||||
hasher.update(&self.id_key);
|
|
||||||
hasher.update(data);
|
hasher.update(data);
|
||||||
|
hasher.update(&self.id_key); // at the end, to avoid length extensions attacks
|
||||||
hasher.finish()
|
hasher.finish()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,7 +219,7 @@ impl CryptConfig {
|
|||||||
created: DateTime<Local>,
|
created: DateTime<Local>,
|
||||||
) -> Result<Vec<u8>, Error> {
|
) -> Result<Vec<u8>, Error> {
|
||||||
|
|
||||||
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
||||||
let key_config = super::KeyConfig { kdf: None, created, modified, data: self.enc_key.to_vec() };
|
let key_config = super::KeyConfig { kdf: None, created, modified, data: self.enc_key.to_vec() };
|
||||||
let data = serde_json::to_string(&key_config)?.as_bytes().to_vec();
|
let data = serde_json::to_string(&key_config)?.as_bytes().to_vec();
|
||||||
|
|
||||||
|
@ -3,10 +3,10 @@ use std::convert::TryInto;
|
|||||||
|
|
||||||
use proxmox::tools::io::{ReadExt, WriteExt};
|
use proxmox::tools::io::{ReadExt, WriteExt};
|
||||||
|
|
||||||
const MAX_BLOB_SIZE: usize = 128*1024*1024;
|
|
||||||
|
|
||||||
use super::file_formats::*;
|
use super::file_formats::*;
|
||||||
use super::CryptConfig;
|
use super::{CryptConfig, CryptMode};
|
||||||
|
|
||||||
|
const MAX_BLOB_SIZE: usize = 128*1024*1024;
|
||||||
|
|
||||||
/// Encoded data chunk with digest and positional information
|
/// Encoded data chunk with digest and positional information
|
||||||
pub struct ChunkInfo {
|
pub struct ChunkInfo {
|
||||||
@ -36,6 +36,11 @@ impl DataBlob {
|
|||||||
&self.raw_data
|
&self.raw_data
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns raw_data size
|
||||||
|
pub fn raw_size(&self) -> u64 {
|
||||||
|
self.raw_data.len() as u64
|
||||||
|
}
|
||||||
|
|
||||||
/// Consume self and returns raw_data
|
/// Consume self and returns raw_data
|
||||||
pub fn into_inner(self) -> Vec<u8> {
|
pub fn into_inner(self) -> Vec<u8> {
|
||||||
self.raw_data
|
self.raw_data
|
||||||
@ -66,8 +71,8 @@ impl DataBlob {
|
|||||||
hasher.finalize()
|
hasher.finalize()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// verify the CRC32 checksum
|
// verify the CRC32 checksum
|
||||||
pub fn verify_crc(&self) -> Result<(), Error> {
|
fn verify_crc(&self) -> Result<(), Error> {
|
||||||
let expected_crc = self.compute_crc();
|
let expected_crc = self.compute_crc();
|
||||||
if expected_crc != self.crc() {
|
if expected_crc != self.crc() {
|
||||||
bail!("Data blob has wrong CRC checksum.");
|
bail!("Data blob has wrong CRC checksum.");
|
||||||
@ -166,17 +171,37 @@ impl DataBlob {
|
|||||||
Ok(blob)
|
Ok(blob)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the encryption mode for this blob.
|
||||||
|
pub fn crypt_mode(&self) -> Result<CryptMode, Error> {
|
||||||
|
let magic = self.magic();
|
||||||
|
|
||||||
|
Ok(if magic == &UNCOMPRESSED_BLOB_MAGIC_1_0 || magic == &COMPRESSED_BLOB_MAGIC_1_0 {
|
||||||
|
CryptMode::None
|
||||||
|
} else if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||||
|
CryptMode::Encrypt
|
||||||
|
} else {
|
||||||
|
bail!("Invalid blob magic number.");
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
/// Decode blob data
|
/// Decode blob data
|
||||||
pub fn decode(&self, config: Option<&CryptConfig>) -> Result<Vec<u8>, Error> {
|
pub fn decode(&self, config: Option<&CryptConfig>, digest: Option<&[u8; 32]>) -> Result<Vec<u8>, Error> {
|
||||||
|
|
||||||
let magic = self.magic();
|
let magic = self.magic();
|
||||||
|
|
||||||
if magic == &UNCOMPRESSED_BLOB_MAGIC_1_0 {
|
if magic == &UNCOMPRESSED_BLOB_MAGIC_1_0 {
|
||||||
let data_start = std::mem::size_of::<DataBlobHeader>();
|
let data_start = std::mem::size_of::<DataBlobHeader>();
|
||||||
Ok(self.raw_data[data_start..].to_vec())
|
let data = self.raw_data[data_start..].to_vec();
|
||||||
|
if let Some(digest) = digest {
|
||||||
|
Self::verify_digest(&data, None, digest)?;
|
||||||
|
}
|
||||||
|
Ok(data)
|
||||||
} else if magic == &COMPRESSED_BLOB_MAGIC_1_0 {
|
} else if magic == &COMPRESSED_BLOB_MAGIC_1_0 {
|
||||||
let data_start = std::mem::size_of::<DataBlobHeader>();
|
let data_start = std::mem::size_of::<DataBlobHeader>();
|
||||||
let data = zstd::block::decompress(&self.raw_data[data_start..], MAX_BLOB_SIZE)?;
|
let data = zstd::block::decompress(&self.raw_data[data_start..], MAX_BLOB_SIZE)?;
|
||||||
|
if let Some(digest) = digest {
|
||||||
|
Self::verify_digest(&data, None, digest)?;
|
||||||
|
}
|
||||||
Ok(data)
|
Ok(data)
|
||||||
} else if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
} else if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||||
let header_len = std::mem::size_of::<EncryptedDataBlobHeader>();
|
let header_len = std::mem::size_of::<EncryptedDataBlobHeader>();
|
||||||
@ -190,86 +215,29 @@ impl DataBlob {
|
|||||||
} else {
|
} else {
|
||||||
config.decode_uncompressed_chunk(&self.raw_data[header_len..], &head.iv, &head.tag)?
|
config.decode_uncompressed_chunk(&self.raw_data[header_len..], &head.iv, &head.tag)?
|
||||||
};
|
};
|
||||||
|
if let Some(digest) = digest {
|
||||||
|
Self::verify_digest(&data, Some(config), digest)?;
|
||||||
|
}
|
||||||
Ok(data)
|
Ok(data)
|
||||||
} else {
|
} else {
|
||||||
bail!("unable to decrypt blob - missing CryptConfig");
|
bail!("unable to decrypt blob - missing CryptConfig");
|
||||||
}
|
}
|
||||||
} else if magic == &AUTH_COMPR_BLOB_MAGIC_1_0 || magic == &AUTHENTICATED_BLOB_MAGIC_1_0 {
|
|
||||||
let header_len = std::mem::size_of::<AuthenticatedDataBlobHeader>();
|
|
||||||
let head = unsafe {
|
|
||||||
(&self.raw_data[..header_len]).read_le_value::<AuthenticatedDataBlobHeader>()?
|
|
||||||
};
|
|
||||||
|
|
||||||
let data_start = std::mem::size_of::<AuthenticatedDataBlobHeader>();
|
|
||||||
|
|
||||||
// Note: only verify if we have a crypt config
|
|
||||||
if let Some(config) = config {
|
|
||||||
let signature = config.compute_auth_tag(&self.raw_data[data_start..]);
|
|
||||||
if signature != head.tag {
|
|
||||||
bail!("verifying blob signature failed");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if magic == &AUTH_COMPR_BLOB_MAGIC_1_0 {
|
|
||||||
let data = zstd::block::decompress(&self.raw_data[data_start..], 16*1024*1024)?;
|
|
||||||
Ok(data)
|
|
||||||
} else {
|
|
||||||
Ok(self.raw_data[data_start..].to_vec())
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
bail!("Invalid blob magic number.");
|
bail!("Invalid blob magic number.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a signed DataBlob, optionally compressed
|
/// Load blob from ``reader``, verify CRC
|
||||||
pub fn create_signed(
|
pub fn load_from_reader(reader: &mut dyn std::io::Read) -> Result<Self, Error> {
|
||||||
data: &[u8],
|
|
||||||
config: &CryptConfig,
|
|
||||||
compress: bool,
|
|
||||||
) -> Result<Self, Error> {
|
|
||||||
|
|
||||||
if data.len() > MAX_BLOB_SIZE {
|
|
||||||
bail!("data blob too large ({} bytes).", data.len());
|
|
||||||
}
|
|
||||||
|
|
||||||
let compr_data;
|
|
||||||
let (_compress, data, magic) = if compress {
|
|
||||||
compr_data = zstd::block::compress(data, 1)?;
|
|
||||||
// Note: We only use compression if result is shorter
|
|
||||||
if compr_data.len() < data.len() {
|
|
||||||
(true, &compr_data[..], AUTH_COMPR_BLOB_MAGIC_1_0)
|
|
||||||
} else {
|
|
||||||
(false, data, AUTHENTICATED_BLOB_MAGIC_1_0)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
(false, data, AUTHENTICATED_BLOB_MAGIC_1_0)
|
|
||||||
};
|
|
||||||
|
|
||||||
let header_len = std::mem::size_of::<AuthenticatedDataBlobHeader>();
|
|
||||||
let mut raw_data = Vec::with_capacity(data.len() + header_len);
|
|
||||||
|
|
||||||
let head = AuthenticatedDataBlobHeader {
|
|
||||||
head: DataBlobHeader { magic, crc: [0; 4] },
|
|
||||||
tag: config.compute_auth_tag(data),
|
|
||||||
};
|
|
||||||
unsafe {
|
|
||||||
raw_data.write_le_value(head)?;
|
|
||||||
}
|
|
||||||
raw_data.extend_from_slice(data);
|
|
||||||
|
|
||||||
let mut blob = DataBlob { raw_data };
|
|
||||||
blob.set_crc(blob.compute_crc());
|
|
||||||
|
|
||||||
Ok(blob)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Load blob from ``reader``
|
|
||||||
pub fn load(reader: &mut dyn std::io::Read) -> Result<Self, Error> {
|
|
||||||
|
|
||||||
let mut data = Vec::with_capacity(1024*1024);
|
let mut data = Vec::with_capacity(1024*1024);
|
||||||
reader.read_to_end(&mut data)?;
|
reader.read_to_end(&mut data)?;
|
||||||
|
|
||||||
Self::from_raw(data)
|
let blob = Self::from_raw(data)?;
|
||||||
|
|
||||||
|
blob.verify_crc()?;
|
||||||
|
|
||||||
|
Ok(blob)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create Instance from raw data
|
/// Create Instance from raw data
|
||||||
@ -294,14 +262,6 @@ impl DataBlob {
|
|||||||
|
|
||||||
let blob = DataBlob { raw_data: data };
|
let blob = DataBlob { raw_data: data };
|
||||||
|
|
||||||
Ok(blob)
|
|
||||||
} else if magic == AUTH_COMPR_BLOB_MAGIC_1_0 || magic == AUTHENTICATED_BLOB_MAGIC_1_0 {
|
|
||||||
if data.len() < std::mem::size_of::<AuthenticatedDataBlobHeader>() {
|
|
||||||
bail!("authenticated blob too small ({} bytes).", data.len());
|
|
||||||
}
|
|
||||||
|
|
||||||
let blob = DataBlob { raw_data: data };
|
|
||||||
|
|
||||||
Ok(blob)
|
Ok(blob)
|
||||||
} else {
|
} else {
|
||||||
bail!("unable to parse raw blob - wrong magic");
|
bail!("unable to parse raw blob - wrong magic");
|
||||||
@ -313,7 +273,7 @@ impl DataBlob {
|
|||||||
/// To do that, we need to decompress data first. Please note that
|
/// To do that, we need to decompress data first. Please note that
|
||||||
/// this is not possible for encrypted chunks. This function simply return Ok
|
/// this is not possible for encrypted chunks. This function simply return Ok
|
||||||
/// for encrypted chunks.
|
/// for encrypted chunks.
|
||||||
/// Note: This does not call verify_crc
|
/// Note: This does not call verify_crc, because this is usually done in load
|
||||||
pub fn verify_unencrypted(
|
pub fn verify_unencrypted(
|
||||||
&self,
|
&self,
|
||||||
expected_chunk_size: usize,
|
expected_chunk_size: usize,
|
||||||
@ -326,12 +286,26 @@ impl DataBlob {
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let data = self.decode(None)?;
|
// verifies digest!
|
||||||
|
let data = self.decode(None, Some(expected_digest))?;
|
||||||
|
|
||||||
if expected_chunk_size != data.len() {
|
if expected_chunk_size != data.len() {
|
||||||
bail!("detected chunk with wrong length ({} != {})", expected_chunk_size, data.len());
|
bail!("detected chunk with wrong length ({} != {})", expected_chunk_size, data.len());
|
||||||
}
|
}
|
||||||
let digest = openssl::sha::sha256(&data);
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_digest(
|
||||||
|
data: &[u8],
|
||||||
|
config: Option<&CryptConfig>,
|
||||||
|
expected_digest: &[u8; 32],
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let digest = match config {
|
||||||
|
Some(config) => config.compute_digest(data),
|
||||||
|
None => openssl::sha::sha256(&data),
|
||||||
|
};
|
||||||
if &digest != expected_digest {
|
if &digest != expected_digest {
|
||||||
bail!("detected chunk with wrong digest.");
|
bail!("detected chunk with wrong digest.");
|
||||||
}
|
}
|
||||||
@ -376,7 +350,7 @@ impl <'a, 'b> DataChunkBuilder<'a, 'b> {
|
|||||||
|
|
||||||
/// Set encryption Configuration
|
/// Set encryption Configuration
|
||||||
///
|
///
|
||||||
/// If set, chunks are encrypted.
|
/// If set, chunks are encrypted
|
||||||
pub fn crypt_config(mut self, value: &'b CryptConfig) -> Self {
|
pub fn crypt_config(mut self, value: &'b CryptConfig) -> Self {
|
||||||
if self.digest_computed {
|
if self.digest_computed {
|
||||||
panic!("unable to set crypt_config after compute_digest().");
|
panic!("unable to set crypt_config after compute_digest().");
|
||||||
@ -415,12 +389,7 @@ impl <'a, 'b> DataChunkBuilder<'a, 'b> {
|
|||||||
self.compute_digest();
|
self.compute_digest();
|
||||||
}
|
}
|
||||||
|
|
||||||
let chunk = DataBlob::encode(
|
let chunk = DataBlob::encode(self.orig_data, self.config, self.compress)?;
|
||||||
self.orig_data,
|
|
||||||
self.config,
|
|
||||||
self.compress,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok((chunk, self.digest))
|
Ok((chunk, self.digest))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::io::{Read, BufReader};
|
use std::io::{Read, BufReader};
|
||||||
use proxmox::tools::io::ReadExt;
|
use proxmox::tools::io::ReadExt;
|
||||||
@ -8,8 +8,6 @@ use super::*;
|
|||||||
enum BlobReaderState<R: Read> {
|
enum BlobReaderState<R: Read> {
|
||||||
Uncompressed { expected_crc: u32, csum_reader: ChecksumReader<R> },
|
Uncompressed { expected_crc: u32, csum_reader: ChecksumReader<R> },
|
||||||
Compressed { expected_crc: u32, decompr: zstd::stream::read::Decoder<BufReader<ChecksumReader<R>>> },
|
Compressed { expected_crc: u32, decompr: zstd::stream::read::Decoder<BufReader<ChecksumReader<R>>> },
|
||||||
Signed { expected_crc: u32, expected_hmac: [u8; 32], csum_reader: ChecksumReader<R> },
|
|
||||||
SignedCompressed { expected_crc: u32, expected_hmac: [u8; 32], decompr: zstd::stream::read::Decoder<BufReader<ChecksumReader<R>>> },
|
|
||||||
Encrypted { expected_crc: u32, decrypt_reader: CryptReader<BufReader<ChecksumReader<R>>> },
|
Encrypted { expected_crc: u32, decrypt_reader: CryptReader<BufReader<ChecksumReader<R>>> },
|
||||||
EncryptedCompressed { expected_crc: u32, decompr: zstd::stream::read::Decoder<BufReader<CryptReader<BufReader<ChecksumReader<R>>>>> },
|
EncryptedCompressed { expected_crc: u32, decompr: zstd::stream::read::Decoder<BufReader<CryptReader<BufReader<ChecksumReader<R>>>>> },
|
||||||
}
|
}
|
||||||
@ -41,40 +39,26 @@ impl <R: Read> DataBlobReader<R> {
|
|||||||
let decompr = zstd::stream::read::Decoder::new(csum_reader)?;
|
let decompr = zstd::stream::read::Decoder::new(csum_reader)?;
|
||||||
Ok(Self { state: BlobReaderState::Compressed { expected_crc, decompr }})
|
Ok(Self { state: BlobReaderState::Compressed { expected_crc, decompr }})
|
||||||
}
|
}
|
||||||
AUTHENTICATED_BLOB_MAGIC_1_0 => {
|
|
||||||
let expected_crc = u32::from_le_bytes(head.crc);
|
|
||||||
let mut expected_hmac = [0u8; 32];
|
|
||||||
reader.read_exact(&mut expected_hmac)?;
|
|
||||||
let csum_reader = ChecksumReader::new(reader, config);
|
|
||||||
Ok(Self { state: BlobReaderState::Signed { expected_crc, expected_hmac, csum_reader }})
|
|
||||||
}
|
|
||||||
AUTH_COMPR_BLOB_MAGIC_1_0 => {
|
|
||||||
let expected_crc = u32::from_le_bytes(head.crc);
|
|
||||||
let mut expected_hmac = [0u8; 32];
|
|
||||||
reader.read_exact(&mut expected_hmac)?;
|
|
||||||
let csum_reader = ChecksumReader::new(reader, config);
|
|
||||||
|
|
||||||
let decompr = zstd::stream::read::Decoder::new(csum_reader)?;
|
|
||||||
Ok(Self { state: BlobReaderState::SignedCompressed { expected_crc, expected_hmac, decompr }})
|
|
||||||
}
|
|
||||||
ENCRYPTED_BLOB_MAGIC_1_0 => {
|
ENCRYPTED_BLOB_MAGIC_1_0 => {
|
||||||
|
let config = config.ok_or_else(|| format_err!("unable to read encrypted blob without key"))?;
|
||||||
let expected_crc = u32::from_le_bytes(head.crc);
|
let expected_crc = u32::from_le_bytes(head.crc);
|
||||||
let mut iv = [0u8; 16];
|
let mut iv = [0u8; 16];
|
||||||
let mut expected_tag = [0u8; 16];
|
let mut expected_tag = [0u8; 16];
|
||||||
reader.read_exact(&mut iv)?;
|
reader.read_exact(&mut iv)?;
|
||||||
reader.read_exact(&mut expected_tag)?;
|
reader.read_exact(&mut expected_tag)?;
|
||||||
let csum_reader = ChecksumReader::new(reader, None);
|
let csum_reader = ChecksumReader::new(reader, None);
|
||||||
let decrypt_reader = CryptReader::new(BufReader::with_capacity(64*1024, csum_reader), iv, expected_tag, config.unwrap())?;
|
let decrypt_reader = CryptReader::new(BufReader::with_capacity(64*1024, csum_reader), iv, expected_tag, config)?;
|
||||||
Ok(Self { state: BlobReaderState::Encrypted { expected_crc, decrypt_reader }})
|
Ok(Self { state: BlobReaderState::Encrypted { expected_crc, decrypt_reader }})
|
||||||
}
|
}
|
||||||
ENCR_COMPR_BLOB_MAGIC_1_0 => {
|
ENCR_COMPR_BLOB_MAGIC_1_0 => {
|
||||||
|
let config = config.ok_or_else(|| format_err!("unable to read encrypted blob without key"))?;
|
||||||
let expected_crc = u32::from_le_bytes(head.crc);
|
let expected_crc = u32::from_le_bytes(head.crc);
|
||||||
let mut iv = [0u8; 16];
|
let mut iv = [0u8; 16];
|
||||||
let mut expected_tag = [0u8; 16];
|
let mut expected_tag = [0u8; 16];
|
||||||
reader.read_exact(&mut iv)?;
|
reader.read_exact(&mut iv)?;
|
||||||
reader.read_exact(&mut expected_tag)?;
|
reader.read_exact(&mut expected_tag)?;
|
||||||
let csum_reader = ChecksumReader::new(reader, None);
|
let csum_reader = ChecksumReader::new(reader, None);
|
||||||
let decrypt_reader = CryptReader::new(BufReader::with_capacity(64*1024, csum_reader), iv, expected_tag, config.unwrap())?;
|
let decrypt_reader = CryptReader::new(BufReader::with_capacity(64*1024, csum_reader), iv, expected_tag, config)?;
|
||||||
let decompr = zstd::stream::read::Decoder::new(decrypt_reader)?;
|
let decompr = zstd::stream::read::Decoder::new(decrypt_reader)?;
|
||||||
Ok(Self { state: BlobReaderState::EncryptedCompressed { expected_crc, decompr }})
|
Ok(Self { state: BlobReaderState::EncryptedCompressed { expected_crc, decompr }})
|
||||||
}
|
}
|
||||||
@ -99,31 +83,6 @@ impl <R: Read> DataBlobReader<R> {
|
|||||||
}
|
}
|
||||||
Ok(reader)
|
Ok(reader)
|
||||||
}
|
}
|
||||||
BlobReaderState::Signed { csum_reader, expected_crc, expected_hmac } => {
|
|
||||||
let (reader, crc, hmac) = csum_reader.finish()?;
|
|
||||||
if crc != expected_crc {
|
|
||||||
bail!("blob crc check failed");
|
|
||||||
}
|
|
||||||
if let Some(hmac) = hmac {
|
|
||||||
if hmac != expected_hmac {
|
|
||||||
bail!("blob signature check failed");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(reader)
|
|
||||||
}
|
|
||||||
BlobReaderState::SignedCompressed { expected_crc, expected_hmac, decompr } => {
|
|
||||||
let csum_reader = decompr.finish().into_inner();
|
|
||||||
let (reader, crc, hmac) = csum_reader.finish()?;
|
|
||||||
if crc != expected_crc {
|
|
||||||
bail!("blob crc check failed");
|
|
||||||
}
|
|
||||||
if let Some(hmac) = hmac {
|
|
||||||
if hmac != expected_hmac {
|
|
||||||
bail!("blob signature check failed");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(reader)
|
|
||||||
}
|
|
||||||
BlobReaderState::Encrypted { expected_crc, decrypt_reader } => {
|
BlobReaderState::Encrypted { expected_crc, decrypt_reader } => {
|
||||||
let csum_reader = decrypt_reader.finish()?.into_inner();
|
let csum_reader = decrypt_reader.finish()?.into_inner();
|
||||||
let (reader, crc, _) = csum_reader.finish()?;
|
let (reader, crc, _) = csum_reader.finish()?;
|
||||||
@ -155,12 +114,6 @@ impl <R: Read> Read for DataBlobReader<R> {
|
|||||||
BlobReaderState::Compressed { decompr, .. } => {
|
BlobReaderState::Compressed { decompr, .. } => {
|
||||||
decompr.read(buf)
|
decompr.read(buf)
|
||||||
}
|
}
|
||||||
BlobReaderState::Signed { csum_reader, .. } => {
|
|
||||||
csum_reader.read(buf)
|
|
||||||
}
|
|
||||||
BlobReaderState::SignedCompressed { decompr, .. } => {
|
|
||||||
decompr.read(buf)
|
|
||||||
}
|
|
||||||
BlobReaderState::Encrypted { decrypt_reader, .. } => {
|
BlobReaderState::Encrypted { decrypt_reader, .. } => {
|
||||||
decrypt_reader.read(buf)
|
decrypt_reader.read(buf)
|
||||||
}
|
}
|
||||||
|
@ -8,8 +8,6 @@ use super::*;
|
|||||||
enum BlobWriterState<W: Write> {
|
enum BlobWriterState<W: Write> {
|
||||||
Uncompressed { csum_writer: ChecksumWriter<W> },
|
Uncompressed { csum_writer: ChecksumWriter<W> },
|
||||||
Compressed { compr: zstd::stream::write::Encoder<ChecksumWriter<W>> },
|
Compressed { compr: zstd::stream::write::Encoder<ChecksumWriter<W>> },
|
||||||
Signed { csum_writer: ChecksumWriter<W> },
|
|
||||||
SignedCompressed { compr: zstd::stream::write::Encoder<ChecksumWriter<W>> },
|
|
||||||
Encrypted { crypt_writer: CryptWriter<ChecksumWriter<W>> },
|
Encrypted { crypt_writer: CryptWriter<ChecksumWriter<W>> },
|
||||||
EncryptedCompressed { compr: zstd::stream::write::Encoder<CryptWriter<ChecksumWriter<W>>> },
|
EncryptedCompressed { compr: zstd::stream::write::Encoder<CryptWriter<ChecksumWriter<W>>> },
|
||||||
}
|
}
|
||||||
@ -42,33 +40,6 @@ impl <W: Write + Seek> DataBlobWriter<W> {
|
|||||||
Ok(Self { state: BlobWriterState::Compressed { compr }})
|
Ok(Self { state: BlobWriterState::Compressed { compr }})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_signed(mut writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
|
|
||||||
writer.seek(SeekFrom::Start(0))?;
|
|
||||||
let head = AuthenticatedDataBlobHeader {
|
|
||||||
head: DataBlobHeader { magic: AUTHENTICATED_BLOB_MAGIC_1_0, crc: [0; 4] },
|
|
||||||
tag: [0u8; 32],
|
|
||||||
};
|
|
||||||
unsafe {
|
|
||||||
writer.write_le_value(head)?;
|
|
||||||
}
|
|
||||||
let csum_writer = ChecksumWriter::new(writer, Some(config));
|
|
||||||
Ok(Self { state: BlobWriterState::Signed { csum_writer }})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_signed_compressed(mut writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
|
|
||||||
writer.seek(SeekFrom::Start(0))?;
|
|
||||||
let head = AuthenticatedDataBlobHeader {
|
|
||||||
head: DataBlobHeader { magic: AUTH_COMPR_BLOB_MAGIC_1_0, crc: [0; 4] },
|
|
||||||
tag: [0u8; 32],
|
|
||||||
};
|
|
||||||
unsafe {
|
|
||||||
writer.write_le_value(head)?;
|
|
||||||
}
|
|
||||||
let csum_writer = ChecksumWriter::new(writer, Some(config));
|
|
||||||
let compr = zstd::stream::write::Encoder::new(csum_writer, 1)?;
|
|
||||||
Ok(Self { state: BlobWriterState::SignedCompressed { compr }})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_encrypted(mut writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
|
pub fn new_encrypted(mut writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
|
||||||
writer.seek(SeekFrom::Start(0))?;
|
writer.seek(SeekFrom::Start(0))?;
|
||||||
let head = EncryptedDataBlobHeader {
|
let head = EncryptedDataBlobHeader {
|
||||||
@ -129,37 +100,6 @@ impl <W: Write + Seek> DataBlobWriter<W> {
|
|||||||
|
|
||||||
Ok(writer)
|
Ok(writer)
|
||||||
}
|
}
|
||||||
BlobWriterState::Signed { csum_writer } => {
|
|
||||||
let (mut writer, crc, tag) = csum_writer.finish()?;
|
|
||||||
|
|
||||||
let head = AuthenticatedDataBlobHeader {
|
|
||||||
head: DataBlobHeader { magic: AUTHENTICATED_BLOB_MAGIC_1_0, crc: crc.to_le_bytes() },
|
|
||||||
tag: tag.unwrap(),
|
|
||||||
};
|
|
||||||
|
|
||||||
writer.seek(SeekFrom::Start(0))?;
|
|
||||||
unsafe {
|
|
||||||
writer.write_le_value(head)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(writer)
|
|
||||||
}
|
|
||||||
BlobWriterState::SignedCompressed { compr } => {
|
|
||||||
let csum_writer = compr.finish()?;
|
|
||||||
let (mut writer, crc, tag) = csum_writer.finish()?;
|
|
||||||
|
|
||||||
let head = AuthenticatedDataBlobHeader {
|
|
||||||
head: DataBlobHeader { magic: AUTH_COMPR_BLOB_MAGIC_1_0, crc: crc.to_le_bytes() },
|
|
||||||
tag: tag.unwrap(),
|
|
||||||
};
|
|
||||||
|
|
||||||
writer.seek(SeekFrom::Start(0))?;
|
|
||||||
unsafe {
|
|
||||||
writer.write_le_value(head)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(writer)
|
|
||||||
}
|
|
||||||
BlobWriterState::Encrypted { crypt_writer } => {
|
BlobWriterState::Encrypted { crypt_writer } => {
|
||||||
let (csum_writer, iv, tag) = crypt_writer.finish()?;
|
let (csum_writer, iv, tag) = crypt_writer.finish()?;
|
||||||
let (mut writer, crc, _) = csum_writer.finish()?;
|
let (mut writer, crc, _) = csum_writer.finish()?;
|
||||||
@ -203,12 +143,6 @@ impl <W: Write + Seek> Write for DataBlobWriter<W> {
|
|||||||
BlobWriterState::Compressed { ref mut compr } => {
|
BlobWriterState::Compressed { ref mut compr } => {
|
||||||
compr.write(buf)
|
compr.write(buf)
|
||||||
}
|
}
|
||||||
BlobWriterState::Signed { ref mut csum_writer } => {
|
|
||||||
csum_writer.write(buf)
|
|
||||||
}
|
|
||||||
BlobWriterState::SignedCompressed { ref mut compr } => {
|
|
||||||
compr.write(buf)
|
|
||||||
}
|
|
||||||
BlobWriterState::Encrypted { ref mut crypt_writer } => {
|
BlobWriterState::Encrypted { ref mut crypt_writer } => {
|
||||||
crypt_writer.write(buf)
|
crypt_writer.write(buf)
|
||||||
}
|
}
|
||||||
@ -226,13 +160,7 @@ impl <W: Write + Seek> Write for DataBlobWriter<W> {
|
|||||||
BlobWriterState::Compressed { ref mut compr } => {
|
BlobWriterState::Compressed { ref mut compr } => {
|
||||||
compr.flush()
|
compr.flush()
|
||||||
}
|
}
|
||||||
BlobWriterState::Signed { ref mut csum_writer } => {
|
BlobWriterState::Encrypted { ref mut crypt_writer } => {
|
||||||
csum_writer.flush()
|
|
||||||
}
|
|
||||||
BlobWriterState::SignedCompressed { ref mut compr } => {
|
|
||||||
compr.flush()
|
|
||||||
}
|
|
||||||
BlobWriterState::Encrypted { ref mut crypt_writer } => {
|
|
||||||
crypt_writer.flush()
|
crypt_writer.flush()
|
||||||
}
|
}
|
||||||
BlobWriterState::EncryptedCompressed { ref mut compr } => {
|
BlobWriterState::EncryptedCompressed { ref mut compr } => {
|
||||||
|
@ -7,6 +7,9 @@ use std::convert::TryFrom;
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||||
|
|
||||||
use super::backup_info::{BackupGroup, BackupDir};
|
use super::backup_info::{BackupGroup, BackupDir};
|
||||||
use super::chunk_store::ChunkStore;
|
use super::chunk_store::ChunkStore;
|
||||||
@ -18,7 +21,9 @@ use super::{DataBlob, ArchiveType, archive_type};
|
|||||||
use crate::config::datastore;
|
use crate::config::datastore;
|
||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
use crate::tools;
|
use crate::tools;
|
||||||
use crate::api2::types::GarbageCollectionStatus;
|
use crate::tools::format::HumanByte;
|
||||||
|
use crate::tools::fs::{lock_dir_noblock, DirLockGuard};
|
||||||
|
use crate::api2::types::{GarbageCollectionStatus, Userid};
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
|
static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
|
||||||
@ -143,7 +148,7 @@ impl DataStore {
|
|||||||
self.chunk_store.base_path()
|
self.chunk_store.base_path()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Clenaup a backup directory
|
/// Cleanup a backup directory
|
||||||
///
|
///
|
||||||
/// Removes all files not mentioned in the manifest.
|
/// Removes all files not mentioned in the manifest.
|
||||||
pub fn cleanup_backup_dir(&self, backup_dir: &BackupDir, manifest: &BackupManifest
|
pub fn cleanup_backup_dir(&self, backup_dir: &BackupDir, manifest: &BackupManifest
|
||||||
@ -196,6 +201,8 @@ impl DataStore {
|
|||||||
|
|
||||||
let full_path = self.group_path(backup_group);
|
let full_path = self.group_path(backup_group);
|
||||||
|
|
||||||
|
let _guard = tools::fs::lock_dir_noblock(&full_path, "backup group", "possible running backup")?;
|
||||||
|
|
||||||
log::info!("removing backup group {:?}", full_path);
|
log::info!("removing backup group {:?}", full_path);
|
||||||
std::fs::remove_dir_all(&full_path)
|
std::fs::remove_dir_all(&full_path)
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
@ -210,10 +217,15 @@ impl DataStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Remove a backup directory including all content
|
/// Remove a backup directory including all content
|
||||||
pub fn remove_backup_dir(&self, backup_dir: &BackupDir) -> Result<(), Error> {
|
pub fn remove_backup_dir(&self, backup_dir: &BackupDir, force: bool) -> Result<(), Error> {
|
||||||
|
|
||||||
let full_path = self.snapshot_path(backup_dir);
|
let full_path = self.snapshot_path(backup_dir);
|
||||||
|
|
||||||
|
let _guard;
|
||||||
|
if !force {
|
||||||
|
_guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or used as base")?;
|
||||||
|
}
|
||||||
|
|
||||||
log::info!("removing backup snapshot {:?}", full_path);
|
log::info!("removing backup snapshot {:?}", full_path);
|
||||||
std::fs::remove_dir_all(&full_path)
|
std::fs::remove_dir_all(&full_path)
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
@ -245,16 +257,21 @@ impl DataStore {
|
|||||||
/// Returns the backup owner.
|
/// Returns the backup owner.
|
||||||
///
|
///
|
||||||
/// The backup owner is the user who first created the backup group.
|
/// The backup owner is the user who first created the backup group.
|
||||||
pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<String, Error> {
|
pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Userid, Error> {
|
||||||
let mut full_path = self.base_path();
|
let mut full_path = self.base_path();
|
||||||
full_path.push(backup_group.group_path());
|
full_path.push(backup_group.group_path());
|
||||||
full_path.push("owner");
|
full_path.push("owner");
|
||||||
let owner = proxmox::tools::fs::file_read_firstline(full_path)?;
|
let owner = proxmox::tools::fs::file_read_firstline(full_path)?;
|
||||||
Ok(owner.trim_end().to_string()) // remove trailing newline
|
Ok(owner.trim_end().parse()?) // remove trailing newline
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the backup owner.
|
/// Set the backup owner.
|
||||||
pub fn set_owner(&self, backup_group: &BackupGroup, userid: &str, force: bool) -> Result<(), Error> {
|
pub fn set_owner(
|
||||||
|
&self,
|
||||||
|
backup_group: &BackupGroup,
|
||||||
|
userid: &Userid,
|
||||||
|
force: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
let mut path = self.base_path();
|
let mut path = self.base_path();
|
||||||
path.push(backup_group.group_path());
|
path.push(backup_group.group_path());
|
||||||
path.push("owner");
|
path.push("owner");
|
||||||
@ -278,12 +295,17 @@ impl DataStore {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a backup group if it does not already exists.
|
/// Create (if it does not already exists) and lock a backup group
|
||||||
///
|
///
|
||||||
/// And set the owner to 'userid'. If the group already exists, it returns the
|
/// And set the owner to 'userid'. If the group already exists, it returns the
|
||||||
/// current owner (instead of setting the owner).
|
/// current owner (instead of setting the owner).
|
||||||
pub fn create_backup_group(&self, backup_group: &BackupGroup, userid: &str) -> Result<String, Error> {
|
///
|
||||||
|
/// This also acquires an exclusive lock on the directory and returns the lock guard.
|
||||||
|
pub fn create_locked_backup_group(
|
||||||
|
&self,
|
||||||
|
backup_group: &BackupGroup,
|
||||||
|
userid: &Userid,
|
||||||
|
) -> Result<(Userid, DirLockGuard), Error> {
|
||||||
// create intermediate path first:
|
// create intermediate path first:
|
||||||
let base_path = self.base_path();
|
let base_path = self.base_path();
|
||||||
|
|
||||||
@ -296,13 +318,15 @@ impl DataStore {
|
|||||||
// create the last component now
|
// create the last component now
|
||||||
match std::fs::create_dir(&full_path) {
|
match std::fs::create_dir(&full_path) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
|
let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
|
||||||
self.set_owner(backup_group, userid, false)?;
|
self.set_owner(backup_group, userid, false)?;
|
||||||
let owner = self.get_owner(backup_group)?; // just to be sure
|
let owner = self.get_owner(backup_group)?; // just to be sure
|
||||||
Ok(owner)
|
Ok((owner, guard))
|
||||||
}
|
}
|
||||||
Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
|
Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
|
||||||
|
let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
|
||||||
let owner = self.get_owner(backup_group)?; // just to be sure
|
let owner = self.get_owner(backup_group)?; // just to be sure
|
||||||
Ok(owner)
|
Ok((owner, guard))
|
||||||
}
|
}
|
||||||
Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
|
Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
|
||||||
}
|
}
|
||||||
@ -311,15 +335,20 @@ impl DataStore {
|
|||||||
/// Creates a new backup snapshot inside a BackupGroup
|
/// Creates a new backup snapshot inside a BackupGroup
|
||||||
///
|
///
|
||||||
/// The BackupGroup directory needs to exist.
|
/// The BackupGroup directory needs to exist.
|
||||||
pub fn create_backup_dir(&self, backup_dir: &BackupDir) -> Result<(PathBuf, bool), io::Error> {
|
pub fn create_locked_backup_dir(&self, backup_dir: &BackupDir)
|
||||||
|
-> Result<(PathBuf, bool, DirLockGuard), Error>
|
||||||
|
{
|
||||||
let relative_path = backup_dir.relative_path();
|
let relative_path = backup_dir.relative_path();
|
||||||
let mut full_path = self.base_path();
|
let mut full_path = self.base_path();
|
||||||
full_path.push(&relative_path);
|
full_path.push(&relative_path);
|
||||||
|
|
||||||
|
let lock = ||
|
||||||
|
lock_dir_noblock(&full_path, "snapshot", "internal error - tried creating snapshot that's already in use");
|
||||||
|
|
||||||
match std::fs::create_dir(&full_path) {
|
match std::fs::create_dir(&full_path) {
|
||||||
Ok(_) => Ok((relative_path, true)),
|
Ok(_) => Ok((relative_path, true, lock()?)),
|
||||||
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false)),
|
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false, lock()?)),
|
||||||
Err(e) => Err(e)
|
Err(e) => Err(e.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -339,9 +368,30 @@ impl DataStore {
|
|||||||
.map(|s| s.starts_with("."))
|
.map(|s| s.starts_with("."))
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
}
|
}
|
||||||
|
let handle_entry_err = |err: walkdir::Error| {
|
||||||
|
if let Some(inner) = err.io_error() {
|
||||||
|
let path = err.path().unwrap_or(Path::new(""));
|
||||||
|
match inner.kind() {
|
||||||
|
io::ErrorKind::PermissionDenied => {
|
||||||
|
// only allow to skip ext4 fsck directory, avoid GC if, for example,
|
||||||
|
// a user got file permissions wrong on datastore rsync to new server
|
||||||
|
if err.depth() > 1 || !path.ends_with("lost+found") {
|
||||||
|
bail!("cannot continue garbage-collection safely, permission denied on: {}", path.display())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ => bail!("unexpected error on datastore traversal: {} - {}", inner, path.display()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
};
|
||||||
for entry in walker.filter_entry(|e| !is_hidden(e)) {
|
for entry in walker.filter_entry(|e| !is_hidden(e)) {
|
||||||
let path = entry?.into_path();
|
let path = match entry {
|
||||||
|
Ok(entry) => entry.into_path(),
|
||||||
|
Err(err) => {
|
||||||
|
handle_entry_err(err)?;
|
||||||
|
continue
|
||||||
|
},
|
||||||
|
};
|
||||||
if let Ok(archive_type) = archive_type(&path) {
|
if let Ok(archive_type) = archive_type(&path) {
|
||||||
if archive_type == ArchiveType::FixedIndex || archive_type == ArchiveType::DynamicIndex {
|
if archive_type == ArchiveType::FixedIndex || archive_type == ArchiveType::DynamicIndex {
|
||||||
list.push(path);
|
list.push(path);
|
||||||
@ -369,8 +419,8 @@ impl DataStore {
|
|||||||
tools::fail_on_shutdown()?;
|
tools::fail_on_shutdown()?;
|
||||||
let digest = index.index_digest(pos).unwrap();
|
let digest = index.index_digest(pos).unwrap();
|
||||||
if let Err(err) = self.chunk_store.touch_chunk(digest) {
|
if let Err(err) = self.chunk_store.touch_chunk(digest) {
|
||||||
bail!("unable to access chunk {}, required by {:?} - {}",
|
worker.warn(&format!("warning: unable to access chunk {}, required by {:?} - {}",
|
||||||
proxmox::tools::digest_to_hex(digest), file_name, err);
|
proxmox::tools::digest_to_hex(digest), file_name, err));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -413,9 +463,8 @@ impl DataStore {
|
|||||||
|
|
||||||
let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
|
let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
|
||||||
|
|
||||||
let now = unsafe { libc::time(std::ptr::null_mut()) };
|
let phase1_start_time = unsafe { libc::time(std::ptr::null_mut()) };
|
||||||
|
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(phase1_start_time);
|
||||||
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(now);
|
|
||||||
|
|
||||||
let mut gc_status = GarbageCollectionStatus::default();
|
let mut gc_status = GarbageCollectionStatus::default();
|
||||||
gc_status.upid = Some(worker.to_string());
|
gc_status.upid = Some(worker.to_string());
|
||||||
@ -425,26 +474,26 @@ impl DataStore {
|
|||||||
self.mark_used_chunks(&mut gc_status, &worker)?;
|
self.mark_used_chunks(&mut gc_status, &worker)?;
|
||||||
|
|
||||||
worker.log("Start GC phase2 (sweep unused chunks)");
|
worker.log("Start GC phase2 (sweep unused chunks)");
|
||||||
self.chunk_store.sweep_unused_chunks(oldest_writer, &mut gc_status, &worker)?;
|
self.chunk_store.sweep_unused_chunks(oldest_writer, phase1_start_time, &mut gc_status, &worker)?;
|
||||||
|
|
||||||
worker.log(&format!("Removed bytes: {}", gc_status.removed_bytes));
|
worker.log(&format!("Removed garbage: {}", HumanByte::from(gc_status.removed_bytes)));
|
||||||
worker.log(&format!("Removed chunks: {}", gc_status.removed_chunks));
|
worker.log(&format!("Removed chunks: {}", gc_status.removed_chunks));
|
||||||
if gc_status.pending_bytes > 0 {
|
if gc_status.pending_bytes > 0 {
|
||||||
worker.log(&format!("Pending removals: {} bytes ({} chunks)", gc_status.pending_bytes, gc_status.pending_chunks));
|
worker.log(&format!("Pending removals: {} (in {} chunks)", HumanByte::from(gc_status.pending_bytes), gc_status.pending_chunks));
|
||||||
}
|
}
|
||||||
|
|
||||||
worker.log(&format!("Original data bytes: {}", gc_status.index_data_bytes));
|
worker.log(&format!("Original data usage: {}", HumanByte::from(gc_status.index_data_bytes)));
|
||||||
|
|
||||||
if gc_status.index_data_bytes > 0 {
|
if gc_status.index_data_bytes > 0 {
|
||||||
let comp_per = (gc_status.disk_bytes*100)/gc_status.index_data_bytes;
|
let comp_per = (gc_status.disk_bytes as f64 * 100.)/gc_status.index_data_bytes as f64;
|
||||||
worker.log(&format!("Disk bytes: {} ({} %)", gc_status.disk_bytes, comp_per));
|
worker.log(&format!("On-Disk usage: {} ({:.2}%)", HumanByte::from(gc_status.disk_bytes), comp_per));
|
||||||
}
|
}
|
||||||
|
|
||||||
worker.log(&format!("Disk chunks: {}", gc_status.disk_chunks));
|
worker.log(&format!("On-Disk chunks: {}", gc_status.disk_chunks));
|
||||||
|
|
||||||
if gc_status.disk_chunks > 0 {
|
if gc_status.disk_chunks > 0 {
|
||||||
let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64);
|
let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64);
|
||||||
worker.log(&format!("Average chunk size: {}", avg_chunk));
|
worker.log(&format!("Average chunk size: {}", HumanByte::from(avg_chunk)));
|
||||||
}
|
}
|
||||||
|
|
||||||
*self.last_gc_status.lock().unwrap() = gc_status;
|
*self.last_gc_status.lock().unwrap() = gc_status;
|
||||||
@ -476,27 +525,69 @@ impl DataStore {
|
|||||||
self.chunk_store.insert_chunk(chunk, digest)
|
self.chunk_store.insert_chunk(chunk, digest)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn verify_stored_chunk(&self, digest: &[u8; 32], expected_chunk_size: u64) -> Result<(), Error> {
|
pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<DataBlob, Error> {
|
||||||
let blob = self.chunk_store.read_chunk(digest)?;
|
|
||||||
blob.verify_crc()?;
|
|
||||||
blob.verify_unencrypted(expected_chunk_size as usize, digest)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<(DataBlob, u64), Error> {
|
|
||||||
let mut path = self.base_path();
|
let mut path = self.base_path();
|
||||||
path.push(backup_dir.relative_path());
|
path.push(backup_dir.relative_path());
|
||||||
path.push(filename);
|
path.push(filename);
|
||||||
|
|
||||||
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
proxmox::try_block!({
|
||||||
let raw_size = raw_data.len() as u64;
|
let mut file = std::fs::File::open(&path)?;
|
||||||
let blob = DataBlob::from_raw(raw_data)?;
|
DataBlob::load_from_reader(&mut file)
|
||||||
Ok((blob, raw_size))
|
}).map_err(|err| format_err!("unable to load blob '{:?}' - {}", path, err))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_manifest(&self, backup_dir: &BackupDir) -> Result<(BackupManifest, u64), Error> {
|
|
||||||
let (blob, raw_size) = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||||
|
|
||||||
|
let (chunk_path, digest_str) = self.chunk_store.chunk_path(digest);
|
||||||
|
|
||||||
|
proxmox::try_block!({
|
||||||
|
let mut file = std::fs::File::open(&chunk_path)?;
|
||||||
|
DataBlob::load_from_reader(&mut file)
|
||||||
|
}).map_err(|err| format_err!(
|
||||||
|
"store '{}', unable to load chunk '{}' - {}",
|
||||||
|
self.name(),
|
||||||
|
digest_str,
|
||||||
|
err,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load_manifest(
|
||||||
|
&self,
|
||||||
|
backup_dir: &BackupDir,
|
||||||
|
) -> Result<(BackupManifest, u64), Error> {
|
||||||
|
let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
||||||
|
let raw_size = blob.raw_size();
|
||||||
let manifest = BackupManifest::try_from(blob)?;
|
let manifest = BackupManifest::try_from(blob)?;
|
||||||
Ok((manifest, raw_size))
|
Ok((manifest, raw_size))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn load_manifest_json(
|
||||||
|
&self,
|
||||||
|
backup_dir: &BackupDir,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
||||||
|
// no expected digest available
|
||||||
|
let manifest_data = blob.decode(None, None)?;
|
||||||
|
let manifest: Value = serde_json::from_slice(&manifest_data[..])?;
|
||||||
|
Ok(manifest)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn store_manifest(
|
||||||
|
&self,
|
||||||
|
backup_dir: &BackupDir,
|
||||||
|
manifest: Value,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let manifest = serde_json::to_string_pretty(&manifest)?;
|
||||||
|
let blob = DataBlob::encode(manifest.as_bytes(), None, true)?;
|
||||||
|
let raw_data = blob.raw_data();
|
||||||
|
|
||||||
|
let mut path = self.base_path();
|
||||||
|
path.push(backup_dir.relative_path());
|
||||||
|
path.push(MANIFEST_BLOB_NAME);
|
||||||
|
|
||||||
|
replace_file(&path, raw_data, CreateOptions::new())?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,6 @@ use anyhow::{bail, format_err, Error};
|
|||||||
|
|
||||||
use proxmox::tools::io::ReadExt;
|
use proxmox::tools::io::ReadExt;
|
||||||
use proxmox::tools::uuid::Uuid;
|
use proxmox::tools::uuid::Uuid;
|
||||||
use proxmox::tools::vec;
|
|
||||||
use proxmox::tools::mmap::Mmap;
|
use proxmox::tools::mmap::Mmap;
|
||||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||||
|
|
||||||
@ -41,6 +40,24 @@ proxmox::static_assert_size!(DynamicIndexHeader, 4096);
|
|||||||
// pub data: DynamicIndexHeaderData,
|
// pub data: DynamicIndexHeaderData,
|
||||||
// }
|
// }
|
||||||
|
|
||||||
|
impl DynamicIndexHeader {
|
||||||
|
/// Convenience method to allocate a zero-initialized header struct.
|
||||||
|
pub fn zeroed() -> Box<Self> {
|
||||||
|
unsafe {
|
||||||
|
Box::from_raw(std::alloc::alloc_zeroed(std::alloc::Layout::new::<Self>()) as *mut Self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn as_bytes(&self) -> &[u8] {
|
||||||
|
unsafe {
|
||||||
|
std::slice::from_raw_parts(
|
||||||
|
self as *const Self as *const u8,
|
||||||
|
std::mem::size_of::<Self>(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
pub struct DynamicEntry {
|
pub struct DynamicEntry {
|
||||||
@ -216,6 +233,24 @@ impl IndexFile for DynamicIndexReader {
|
|||||||
digest: self.index[pos].digest.clone(),
|
digest: self.index[pos].digest.clone(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)> {
|
||||||
|
let end_idx = self.index.len() - 1;
|
||||||
|
let end = self.chunk_end(end_idx);
|
||||||
|
let found_idx = self.binary_search(0, 0, end_idx, end, offset);
|
||||||
|
let found_idx = match found_idx {
|
||||||
|
Ok(i) => i,
|
||||||
|
Err(_) => return None
|
||||||
|
};
|
||||||
|
|
||||||
|
let found_start = if found_idx == 0 {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
self.chunk_end(found_idx - 1)
|
||||||
|
};
|
||||||
|
|
||||||
|
Some((found_idx, offset - found_start))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct CachedChunk {
|
struct CachedChunk {
|
||||||
@ -471,27 +506,16 @@ impl DynamicIndexWriter {
|
|||||||
|
|
||||||
let mut writer = BufWriter::with_capacity(1024 * 1024, file);
|
let mut writer = BufWriter::with_capacity(1024 * 1024, file);
|
||||||
|
|
||||||
let header_size = std::mem::size_of::<DynamicIndexHeader>();
|
|
||||||
|
|
||||||
// todo: use static assertion when available in rust
|
|
||||||
if header_size != 4096 {
|
|
||||||
panic!("got unexpected header size");
|
|
||||||
}
|
|
||||||
|
|
||||||
let ctime = epoch_now_u64()?;
|
let ctime = epoch_now_u64()?;
|
||||||
|
|
||||||
let uuid = Uuid::generate();
|
let uuid = Uuid::generate();
|
||||||
|
|
||||||
let mut buffer = vec::zeroed(header_size);
|
let mut header = DynamicIndexHeader::zeroed();
|
||||||
let header = crate::tools::map_struct_mut::<DynamicIndexHeader>(&mut buffer)?;
|
|
||||||
|
|
||||||
header.magic = super::DYNAMIC_SIZED_CHUNK_INDEX_1_0;
|
header.magic = super::DYNAMIC_SIZED_CHUNK_INDEX_1_0;
|
||||||
header.ctime = u64::to_le(ctime);
|
header.ctime = u64::to_le(ctime);
|
||||||
header.uuid = *uuid.as_bytes();
|
header.uuid = *uuid.as_bytes();
|
||||||
|
// header.index_csum = [0u8; 32];
|
||||||
header.index_csum = [0u8; 32];
|
writer.write_all(header.as_bytes())?;
|
||||||
|
|
||||||
writer.write_all(&buffer)?;
|
|
||||||
|
|
||||||
let csum = Some(openssl::sha::Sha256::new());
|
let csum = Some(openssl::sha::Sha256::new());
|
||||||
|
|
||||||
|
@ -17,12 +17,6 @@ pub const ENCRYPTED_BLOB_MAGIC_1_0: [u8; 8] = [123, 103, 133, 190, 34, 45, 76, 2
|
|||||||
// openssl::sha::sha256(b"Proxmox Backup zstd compressed encrypted blob v1.0")[0..8]
|
// openssl::sha::sha256(b"Proxmox Backup zstd compressed encrypted blob v1.0")[0..8]
|
||||||
pub const ENCR_COMPR_BLOB_MAGIC_1_0: [u8; 8] = [230, 89, 27, 191, 11, 191, 216, 11];
|
pub const ENCR_COMPR_BLOB_MAGIC_1_0: [u8; 8] = [230, 89, 27, 191, 11, 191, 216, 11];
|
||||||
|
|
||||||
//openssl::sha::sha256(b"Proxmox Backup authenticated blob v1.0")[0..8]
|
|
||||||
pub const AUTHENTICATED_BLOB_MAGIC_1_0: [u8; 8] = [31, 135, 238, 226, 145, 206, 5, 2];
|
|
||||||
|
|
||||||
//openssl::sha::sha256(b"Proxmox Backup zstd compressed authenticated blob v1.0")[0..8]
|
|
||||||
pub const AUTH_COMPR_BLOB_MAGIC_1_0: [u8; 8] = [126, 166, 15, 190, 145, 31, 169, 96];
|
|
||||||
|
|
||||||
// openssl::sha::sha256(b"Proxmox Backup fixed sized chunk index v1.0")[0..8]
|
// openssl::sha::sha256(b"Proxmox Backup fixed sized chunk index v1.0")[0..8]
|
||||||
pub const FIXED_SIZED_CHUNK_INDEX_1_0: [u8; 8] = [47, 127, 65, 237, 145, 253, 15, 205];
|
pub const FIXED_SIZED_CHUNK_INDEX_1_0: [u8; 8] = [47, 127, 65, 237, 145, 253, 15, 205];
|
||||||
|
|
||||||
@ -50,19 +44,6 @@ pub struct DataBlobHeader {
|
|||||||
pub crc: [u8; 4],
|
pub crc: [u8; 4],
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Authenticated data blob binary storage format
|
|
||||||
///
|
|
||||||
/// The ``DataBlobHeader`` for authenticated blobs additionally contains
|
|
||||||
/// a 16 byte HMAC tag, followed by the data:
|
|
||||||
///
|
|
||||||
/// (MAGIC || CRC32 || TAG || Data).
|
|
||||||
#[derive(Endian)]
|
|
||||||
#[repr(C,packed)]
|
|
||||||
pub struct AuthenticatedDataBlobHeader {
|
|
||||||
pub head: DataBlobHeader,
|
|
||||||
pub tag: [u8; 32],
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Encrypted data blob binary storage format
|
/// Encrypted data blob binary storage format
|
||||||
///
|
///
|
||||||
/// The ``DataBlobHeader`` for encrypted blobs additionally contains
|
/// The ``DataBlobHeader`` for encrypted blobs additionally contains
|
||||||
@ -87,8 +68,6 @@ pub fn header_size(magic: &[u8; 8]) -> usize {
|
|||||||
&COMPRESSED_BLOB_MAGIC_1_0 => std::mem::size_of::<DataBlobHeader>(),
|
&COMPRESSED_BLOB_MAGIC_1_0 => std::mem::size_of::<DataBlobHeader>(),
|
||||||
&ENCRYPTED_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
&ENCRYPTED_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
||||||
&ENCR_COMPR_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
&ENCR_COMPR_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
||||||
&AUTHENTICATED_BLOB_MAGIC_1_0 => std::mem::size_of::<AuthenticatedDataBlobHeader>(),
|
|
||||||
&AUTH_COMPR_BLOB_MAGIC_1_0 => std::mem::size_of::<AuthenticatedDataBlobHeader>(),
|
|
||||||
_ => panic!("unknown blob magic"),
|
_ => panic!("unknown blob magic"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,6 @@ use std::os::unix::io::AsRawFd;
|
|||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use super::read_chunk::*;
|
|
||||||
use super::ChunkInfo;
|
use super::ChunkInfo;
|
||||||
|
|
||||||
use proxmox::tools::io::ReadExt;
|
use proxmox::tools::io::ReadExt;
|
||||||
@ -146,20 +145,6 @@ impl FixedIndexReader {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn chunk_end(&self, pos: usize) -> u64 {
|
|
||||||
if pos >= self.index_length {
|
|
||||||
panic!("chunk index out of range");
|
|
||||||
}
|
|
||||||
|
|
||||||
let end = ((pos + 1) * self.chunk_size) as u64;
|
|
||||||
if end > self.size {
|
|
||||||
self.size
|
|
||||||
} else {
|
|
||||||
end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn print_info(&self) {
|
pub fn print_info(&self) {
|
||||||
println!("Size: {}", self.size);
|
println!("Size: {}", self.size);
|
||||||
println!("ChunkSize: {}", self.chunk_size);
|
println!("ChunkSize: {}", self.chunk_size);
|
||||||
@ -219,6 +204,17 @@ impl IndexFile for FixedIndexReader {
|
|||||||
|
|
||||||
(csum, chunk_end)
|
(csum, chunk_end)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)> {
|
||||||
|
if offset >= self.size {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
Some((
|
||||||
|
(offset / self.chunk_size as u64) as usize,
|
||||||
|
offset & (self.chunk_size - 1) as u64 // fast modulo, valid for 2^x chunk_size
|
||||||
|
))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct FixedIndexWriter {
|
pub struct FixedIndexWriter {
|
||||||
@ -465,142 +461,3 @@ impl FixedIndexWriter {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct BufferedFixedReader<S> {
|
|
||||||
store: S,
|
|
||||||
index: FixedIndexReader,
|
|
||||||
archive_size: u64,
|
|
||||||
read_buffer: Vec<u8>,
|
|
||||||
buffered_chunk_idx: usize,
|
|
||||||
buffered_chunk_start: u64,
|
|
||||||
read_offset: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: ReadChunk> BufferedFixedReader<S> {
|
|
||||||
pub fn new(index: FixedIndexReader, store: S) -> Self {
|
|
||||||
let archive_size = index.size;
|
|
||||||
Self {
|
|
||||||
store,
|
|
||||||
index,
|
|
||||||
archive_size,
|
|
||||||
read_buffer: Vec::with_capacity(1024 * 1024),
|
|
||||||
buffered_chunk_idx: 0,
|
|
||||||
buffered_chunk_start: 0,
|
|
||||||
read_offset: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn archive_size(&self) -> u64 {
|
|
||||||
self.archive_size
|
|
||||||
}
|
|
||||||
|
|
||||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
|
||||||
let index = &self.index;
|
|
||||||
let info = match index.chunk_info(idx) {
|
|
||||||
Some(info) => info,
|
|
||||||
None => bail!("chunk index out of range"),
|
|
||||||
};
|
|
||||||
|
|
||||||
// fixme: avoid copy
|
|
||||||
|
|
||||||
let data = self.store.read_chunk(&info.digest)?;
|
|
||||||
let size = info.range.end - info.range.start;
|
|
||||||
if size != data.len() as u64 {
|
|
||||||
bail!("read chunk with wrong size ({} != {}", size, data.len());
|
|
||||||
}
|
|
||||||
|
|
||||||
self.read_buffer.clear();
|
|
||||||
self.read_buffer.extend_from_slice(&data);
|
|
||||||
|
|
||||||
self.buffered_chunk_idx = idx;
|
|
||||||
|
|
||||||
self.buffered_chunk_start = info.range.start as u64;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: ReadChunk> crate::tools::BufferedRead for BufferedFixedReader<S> {
|
|
||||||
fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error> {
|
|
||||||
if offset == self.archive_size {
|
|
||||||
return Ok(&self.read_buffer[0..0]);
|
|
||||||
}
|
|
||||||
|
|
||||||
let buffer_len = self.read_buffer.len();
|
|
||||||
let index = &self.index;
|
|
||||||
|
|
||||||
// optimization for sequential read
|
|
||||||
if buffer_len > 0
|
|
||||||
&& ((self.buffered_chunk_idx + 1) < index.index_length)
|
|
||||||
&& (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
|
||||||
{
|
|
||||||
let next_idx = self.buffered_chunk_idx + 1;
|
|
||||||
let next_end = index.chunk_end(next_idx);
|
|
||||||
if offset < next_end {
|
|
||||||
self.buffer_chunk(next_idx)?;
|
|
||||||
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
|
|
||||||
return Ok(&self.read_buffer[buffer_offset..]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (buffer_len == 0)
|
|
||||||
|| (offset < self.buffered_chunk_start)
|
|
||||||
|| (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
|
||||||
{
|
|
||||||
let idx = (offset / index.chunk_size as u64) as usize;
|
|
||||||
self.buffer_chunk(idx)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
|
|
||||||
Ok(&self.read_buffer[buffer_offset..])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: ReadChunk> std::io::Read for BufferedFixedReader<S> {
|
|
||||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
|
||||||
use crate::tools::BufferedRead;
|
|
||||||
use std::io::{Error, ErrorKind};
|
|
||||||
|
|
||||||
let data = match self.buffered_read(self.read_offset) {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(err) => return Err(Error::new(ErrorKind::Other, err.to_string())),
|
|
||||||
};
|
|
||||||
|
|
||||||
let n = if data.len() > buf.len() {
|
|
||||||
buf.len()
|
|
||||||
} else {
|
|
||||||
data.len()
|
|
||||||
};
|
|
||||||
|
|
||||||
unsafe {
|
|
||||||
std::ptr::copy_nonoverlapping(data.as_ptr(), buf.as_mut_ptr(), n);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.read_offset += n as u64;
|
|
||||||
|
|
||||||
Ok(n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: ReadChunk> Seek for BufferedFixedReader<S> {
|
|
||||||
fn seek(&mut self, pos: SeekFrom) -> Result<u64, std::io::Error> {
|
|
||||||
let new_offset = match pos {
|
|
||||||
SeekFrom::Start(start_offset) => start_offset as i64,
|
|
||||||
SeekFrom::End(end_offset) => (self.archive_size as i64) + end_offset,
|
|
||||||
SeekFrom::Current(offset) => (self.read_offset as i64) + offset,
|
|
||||||
};
|
|
||||||
|
|
||||||
use std::io::{Error, ErrorKind};
|
|
||||||
if (new_offset < 0) || (new_offset > (self.archive_size as i64)) {
|
|
||||||
return Err(Error::new(
|
|
||||||
ErrorKind::Other,
|
|
||||||
format!(
|
|
||||||
"seek is out of range {} ([0..{}])",
|
|
||||||
new_offset, self.archive_size
|
|
||||||
),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
self.read_offset = new_offset as u64;
|
|
||||||
|
|
||||||
Ok(self.read_offset)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct ChunkReadInfo {
|
pub struct ChunkReadInfo {
|
||||||
pub range: Range<u64>,
|
pub range: Range<u64>,
|
||||||
pub digest: [u8; 32],
|
pub digest: [u8; 32],
|
||||||
@ -22,6 +23,9 @@ pub trait IndexFile {
|
|||||||
fn index_bytes(&self) -> u64;
|
fn index_bytes(&self) -> u64;
|
||||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo>;
|
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo>;
|
||||||
|
|
||||||
|
/// Get the chunk index and the relative offset within it for a byte offset
|
||||||
|
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)>;
|
||||||
|
|
||||||
/// Compute index checksum and size
|
/// Compute index checksum and size
|
||||||
fn compute_csum(&self) -> ([u8; 32], u64);
|
fn compute_csum(&self) -> ([u8; 32], u64);
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Context, Error};
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use chrono::{Local, TimeZone, DateTime};
|
use chrono::{Local, TimeZone, DateTime};
|
||||||
@ -146,12 +146,26 @@ pub fn encrypt_key_with_passphrase(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_and_decrypt_key(path: &std::path::Path, passphrase: &dyn Fn() -> Result<Vec<u8>, Error>) -> Result<([u8;32], DateTime<Local>), Error> {
|
pub fn load_and_decrypt_key(
|
||||||
|
path: &std::path::Path,
|
||||||
|
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||||
|
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||||
|
do_load_and_decrypt_key(path, passphrase)
|
||||||
|
.with_context(|| format!("failed to load decryption key from {:?}", path))
|
||||||
|
}
|
||||||
|
|
||||||
let raw = file_get_contents(&path)?;
|
fn do_load_and_decrypt_key(
|
||||||
let data = String::from_utf8(raw)?;
|
path: &std::path::Path,
|
||||||
|
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||||
|
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||||
|
decrypt_key(&file_get_contents(&path)?, passphrase)
|
||||||
|
}
|
||||||
|
|
||||||
let key_config: KeyConfig = serde_json::from_str(&data)?;
|
pub fn decrypt_key(
|
||||||
|
mut keydata: &[u8],
|
||||||
|
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||||
|
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||||
|
let key_config: KeyConfig = serde_json::from_reader(&mut keydata)?;
|
||||||
|
|
||||||
let raw_data = key_config.data;
|
let raw_data = key_config.data;
|
||||||
let created = key_config.created;
|
let created = key_config.created;
|
||||||
|
@ -3,22 +3,76 @@ use std::convert::TryFrom;
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::backup::BackupDir;
|
use crate::backup::{BackupDir, CryptMode, CryptConfig};
|
||||||
|
|
||||||
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
||||||
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
||||||
|
|
||||||
|
mod hex_csum {
|
||||||
|
use serde::{self, Deserialize, Serializer, Deserializer};
|
||||||
|
|
||||||
|
pub fn serialize<S>(
|
||||||
|
csum: &[u8; 32],
|
||||||
|
serializer: S,
|
||||||
|
) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: Serializer,
|
||||||
|
{
|
||||||
|
let s = proxmox::tools::digest_to_hex(csum);
|
||||||
|
serializer.serialize_str(&s)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deserialize<'de, D>(
|
||||||
|
deserializer: D,
|
||||||
|
) -> Result<[u8; 32], D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
{
|
||||||
|
let s = String::deserialize(deserializer)?;
|
||||||
|
proxmox::tools::hex_to_digest(&s).map_err(serde::de::Error::custom)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn crypt_mode_none() -> CryptMode { CryptMode::None }
|
||||||
|
fn empty_value() -> Value { json!({}) }
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
pub struct FileInfo {
|
pub struct FileInfo {
|
||||||
pub filename: String,
|
pub filename: String,
|
||||||
pub encrypted: Option<bool>,
|
#[serde(default="crypt_mode_none")] // to be compatible with < 0.8.0 backups
|
||||||
|
pub crypt_mode: CryptMode,
|
||||||
pub size: u64,
|
pub size: u64,
|
||||||
|
#[serde(with = "hex_csum")]
|
||||||
pub csum: [u8; 32],
|
pub csum: [u8; 32],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl FileInfo {
|
||||||
|
|
||||||
|
/// Return expected CryptMode of referenced chunks
|
||||||
|
///
|
||||||
|
/// Encrypted Indices should only reference encrypted chunks, while signed or plain indices
|
||||||
|
/// should only reference plain chunks.
|
||||||
|
pub fn chunk_crypt_mode (&self) -> CryptMode {
|
||||||
|
match self.crypt_mode {
|
||||||
|
CryptMode::Encrypt => CryptMode::Encrypt,
|
||||||
|
CryptMode::SignOnly | CryptMode::None => CryptMode::None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
pub struct BackupManifest {
|
pub struct BackupManifest {
|
||||||
snapshot: BackupDir,
|
backup_type: String,
|
||||||
|
backup_id: String,
|
||||||
|
backup_time: i64,
|
||||||
files: Vec<FileInfo>,
|
files: Vec<FileInfo>,
|
||||||
|
#[serde(default="empty_value")] // to be compatible with < 0.8.0 backups
|
||||||
|
pub unprotected: Value,
|
||||||
|
pub signature: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq)]
|
#[derive(PartialEq)]
|
||||||
@ -46,12 +100,19 @@ pub fn archive_type<P: AsRef<Path>>(
|
|||||||
impl BackupManifest {
|
impl BackupManifest {
|
||||||
|
|
||||||
pub fn new(snapshot: BackupDir) -> Self {
|
pub fn new(snapshot: BackupDir) -> Self {
|
||||||
Self { files: Vec::new(), snapshot }
|
Self {
|
||||||
|
backup_type: snapshot.group().backup_type().into(),
|
||||||
|
backup_id: snapshot.group().backup_id().into(),
|
||||||
|
backup_time: snapshot.backup_time().timestamp(),
|
||||||
|
files: Vec::new(),
|
||||||
|
unprotected: json!({}),
|
||||||
|
signature: None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32], encrypted: Option<bool>) -> Result<(), Error> {
|
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32], crypt_mode: CryptMode) -> Result<(), Error> {
|
||||||
let _archive_type = archive_type(&filename)?; // check type
|
let _archive_type = archive_type(&filename)?; // check type
|
||||||
self.files.push(FileInfo { filename, size, csum, encrypted });
|
self.files.push(FileInfo { filename, size, csum, crypt_mode });
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -59,7 +120,7 @@ impl BackupManifest {
|
|||||||
&self.files[..]
|
&self.files[..]
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lookup_file_info(&self, name: &str) -> Result<&FileInfo, Error> {
|
pub fn lookup_file_info(&self, name: &str) -> Result<&FileInfo, Error> {
|
||||||
|
|
||||||
let info = self.files.iter().find(|item| item.filename == name);
|
let info = self.files.iter().find(|item| item.filename == name);
|
||||||
|
|
||||||
@ -84,74 +145,164 @@ impl BackupManifest {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn into_json(self) -> Value {
|
// Generate canonical json
|
||||||
json!({
|
fn to_canonical_json(value: &Value) -> Result<Vec<u8>, Error> {
|
||||||
"backup-type": self.snapshot.group().backup_type(),
|
let mut data = Vec::new();
|
||||||
"backup-id": self.snapshot.group().backup_id(),
|
Self::write_canonical_json(value, &mut data)?;
|
||||||
"backup-time": self.snapshot.backup_time().timestamp(),
|
Ok(data)
|
||||||
"files": self.files.iter()
|
|
||||||
.fold(Vec::new(), |mut acc, info| {
|
|
||||||
let mut value = json!({
|
|
||||||
"filename": info.filename,
|
|
||||||
"encrypted": info.encrypted,
|
|
||||||
"size": info.size,
|
|
||||||
"csum": proxmox::tools::digest_to_hex(&info.csum),
|
|
||||||
});
|
|
||||||
|
|
||||||
if let Some(encrypted) = info.encrypted {
|
|
||||||
value["encrypted"] = encrypted.into();
|
|
||||||
}
|
|
||||||
|
|
||||||
acc.push(value);
|
|
||||||
acc
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn write_canonical_json(value: &Value, output: &mut Vec<u8>) -> Result<(), Error> {
|
||||||
|
match value {
|
||||||
|
Value::Null => bail!("got unexpected null value"),
|
||||||
|
Value::String(_) | Value::Number(_) | Value::Bool(_) => {
|
||||||
|
serde_json::to_writer(output, &value)?;
|
||||||
|
}
|
||||||
|
Value::Array(list) => {
|
||||||
|
output.push(b'[');
|
||||||
|
let mut iter = list.iter();
|
||||||
|
if let Some(item) = iter.next() {
|
||||||
|
Self::write_canonical_json(item, output)?;
|
||||||
|
for item in iter {
|
||||||
|
output.push(b',');
|
||||||
|
Self::write_canonical_json(item, output)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
output.push(b']');
|
||||||
|
}
|
||||||
|
Value::Object(map) => {
|
||||||
|
output.push(b'{');
|
||||||
|
let mut keys: Vec<&str> = map.keys().map(String::as_str).collect();
|
||||||
|
keys.sort();
|
||||||
|
let mut iter = keys.into_iter();
|
||||||
|
if let Some(key) = iter.next() {
|
||||||
|
serde_json::to_writer(&mut *output, &key)?;
|
||||||
|
output.push(b':');
|
||||||
|
Self::write_canonical_json(&map[key], output)?;
|
||||||
|
for key in iter {
|
||||||
|
output.push(b',');
|
||||||
|
serde_json::to_writer(&mut *output, &key)?;
|
||||||
|
output.push(b':');
|
||||||
|
Self::write_canonical_json(&map[key], output)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
output.push(b'}');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compute manifest signature
|
||||||
|
///
|
||||||
|
/// By generating a HMAC SHA256 over the canonical json
|
||||||
|
/// representation, The 'unpreotected' property is excluded.
|
||||||
|
pub fn signature(&self, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
|
||||||
|
Self::json_signature(&serde_json::to_value(&self)?, crypt_config)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn json_signature(data: &Value, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
|
||||||
|
|
||||||
|
let mut signed_data = data.clone();
|
||||||
|
|
||||||
|
signed_data.as_object_mut().unwrap().remove("unprotected"); // exclude
|
||||||
|
signed_data.as_object_mut().unwrap().remove("signature"); // exclude
|
||||||
|
|
||||||
|
let canonical = Self::to_canonical_json(&signed_data)?;
|
||||||
|
|
||||||
|
let sig = crypt_config.compute_auth_tag(&canonical);
|
||||||
|
|
||||||
|
Ok(sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts the Manifest into json string, and add a signature if there is a crypt_config.
|
||||||
|
pub fn to_string(&self, crypt_config: Option<&CryptConfig>) -> Result<String, Error> {
|
||||||
|
|
||||||
|
let mut manifest = serde_json::to_value(&self)?;
|
||||||
|
|
||||||
|
if let Some(crypt_config) = crypt_config {
|
||||||
|
let sig = self.signature(crypt_config)?;
|
||||||
|
manifest["signature"] = proxmox::tools::digest_to_hex(&sig).into();
|
||||||
|
}
|
||||||
|
|
||||||
|
let manifest = serde_json::to_string_pretty(&manifest).unwrap().into();
|
||||||
|
Ok(manifest)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Try to read the manifest. This verifies the signature if there is a crypt_config.
|
||||||
|
pub fn from_data(data: &[u8], crypt_config: Option<&CryptConfig>) -> Result<BackupManifest, Error> {
|
||||||
|
let json: Value = serde_json::from_slice(data)?;
|
||||||
|
let signature = json["signature"].as_str().map(String::from);
|
||||||
|
|
||||||
|
if let Some(ref crypt_config) = crypt_config {
|
||||||
|
if let Some(signature) = signature {
|
||||||
|
let expected_signature = proxmox::tools::digest_to_hex(&Self::json_signature(&json, crypt_config)?);
|
||||||
|
if signature != expected_signature {
|
||||||
|
bail!("wrong signature in manifest");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// not signed: warn/fail?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let manifest: BackupManifest = serde_json::from_value(json)?;
|
||||||
|
Ok(manifest)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl TryFrom<super::DataBlob> for BackupManifest {
|
impl TryFrom<super::DataBlob> for BackupManifest {
|
||||||
type Error = Error;
|
type Error = Error;
|
||||||
|
|
||||||
fn try_from(blob: super::DataBlob) -> Result<Self, Error> {
|
fn try_from(blob: super::DataBlob) -> Result<Self, Error> {
|
||||||
let data = blob.decode(None)
|
// no expected digest available
|
||||||
|
let data = blob.decode(None, None)
|
||||||
.map_err(|err| format_err!("decode backup manifest blob failed - {}", err))?;
|
.map_err(|err| format_err!("decode backup manifest blob failed - {}", err))?;
|
||||||
let json: Value = serde_json::from_slice(&data[..])
|
let json: Value = serde_json::from_slice(&data[..])
|
||||||
.map_err(|err| format_err!("unable to parse backup manifest json - {}", err))?;
|
.map_err(|err| format_err!("unable to parse backup manifest json - {}", err))?;
|
||||||
BackupManifest::try_from(json)
|
let manifest: BackupManifest = serde_json::from_value(json)?;
|
||||||
|
Ok(manifest)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<Value> for BackupManifest {
|
|
||||||
type Error = Error;
|
|
||||||
|
|
||||||
fn try_from(data: Value) -> Result<Self, Error> {
|
#[test]
|
||||||
|
fn test_manifest_signature() -> Result<(), Error> {
|
||||||
|
|
||||||
use crate::tools::{required_string_property, required_integer_property, required_array_property};
|
use crate::backup::{KeyDerivationConfig};
|
||||||
|
|
||||||
proxmox::try_block!({
|
let pw = b"test";
|
||||||
let backup_type = required_string_property(&data, "backup-type")?;
|
|
||||||
let backup_id = required_string_property(&data, "backup-id")?;
|
|
||||||
let backup_time = required_integer_property(&data, "backup-time")?;
|
|
||||||
|
|
||||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
let kdf = KeyDerivationConfig::Scrypt {
|
||||||
|
n: 65536,
|
||||||
|
r: 8,
|
||||||
|
p: 1,
|
||||||
|
salt: Vec::new(),
|
||||||
|
};
|
||||||
|
|
||||||
let mut manifest = BackupManifest::new(snapshot);
|
let testkey = kdf.derive_key(pw)?;
|
||||||
|
|
||||||
for item in required_array_property(&data, "files")?.iter() {
|
let crypt_config = CryptConfig::new(testkey)?;
|
||||||
let filename = required_string_property(item, "filename")?.to_owned();
|
|
||||||
let csum = required_string_property(item, "csum")?;
|
|
||||||
let csum = proxmox::tools::hex_to_digest(csum)?;
|
|
||||||
let size = required_integer_property(item, "size")? as u64;
|
|
||||||
let encrypted = item["encrypted"].as_bool();
|
|
||||||
manifest.add_file(filename, size, csum, encrypted)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if manifest.files().is_empty() {
|
let snapshot: BackupDir = "host/elsa/2020-06-26T13:56:05Z".parse()?;
|
||||||
bail!("manifest does not list any files.");
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(manifest)
|
let mut manifest = BackupManifest::new(snapshot);
|
||||||
}).map_err(|err: Error| format_err!("unable to parse backup manifest - {}", err))
|
|
||||||
|
|
||||||
}
|
manifest.add_file("test1.img.fidx".into(), 200, [1u8; 32], CryptMode::Encrypt)?;
|
||||||
|
manifest.add_file("abc.blob".into(), 200, [2u8; 32], CryptMode::None)?;
|
||||||
|
|
||||||
|
manifest.unprotected["note"] = "This is not protected by the signature.".into();
|
||||||
|
|
||||||
|
let text = manifest.to_string(Some(&crypt_config))?;
|
||||||
|
|
||||||
|
let manifest: Value = serde_json::from_str(&text)?;
|
||||||
|
let signature = manifest["signature"].as_str().unwrap().to_string();
|
||||||
|
|
||||||
|
assert_eq!(signature, "d7b446fb7db081662081d4b40fedd858a1d6307a5aff4ecff7d5bf4fd35679e9");
|
||||||
|
|
||||||
|
let manifest: BackupManifest = serde_json::from_value(manifest)?;
|
||||||
|
let expected_signature = proxmox::tools::digest_to_hex(&manifest.signature(&crypt_config)?);
|
||||||
|
|
||||||
|
assert_eq!(signature, expected_signature);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -53,7 +53,7 @@ fn remove_incomplete_snapshots(
|
|||||||
let mut keep_unfinished = true;
|
let mut keep_unfinished = true;
|
||||||
for info in list.iter() {
|
for info in list.iter() {
|
||||||
// backup is considered unfinished if there is no manifest
|
// backup is considered unfinished if there is no manifest
|
||||||
if info.files.iter().any(|name| name == super::MANIFEST_BLOB_NAME) {
|
if info.is_finished() {
|
||||||
// There is a new finished backup, so there is no need
|
// There is a new finished backup, so there is no need
|
||||||
// to keep older unfinished backups.
|
// to keep older unfinished backups.
|
||||||
keep_unfinished = false;
|
keep_unfinished = false;
|
||||||
|
@ -2,9 +2,9 @@ use std::future::Future;
|
|||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::{bail, Error};
|
||||||
|
|
||||||
use super::crypt_config::CryptConfig;
|
use super::crypt_config::{CryptConfig, CryptMode};
|
||||||
use super::data_blob::DataBlob;
|
use super::data_blob::DataBlob;
|
||||||
use super::datastore::DataStore;
|
use super::datastore::DataStore;
|
||||||
|
|
||||||
@ -21,33 +21,47 @@ pub trait ReadChunk {
|
|||||||
pub struct LocalChunkReader {
|
pub struct LocalChunkReader {
|
||||||
store: Arc<DataStore>,
|
store: Arc<DataStore>,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
|
crypt_mode: CryptMode,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LocalChunkReader {
|
impl LocalChunkReader {
|
||||||
pub fn new(store: Arc<DataStore>, crypt_config: Option<Arc<CryptConfig>>) -> Self {
|
pub fn new(store: Arc<DataStore>, crypt_config: Option<Arc<CryptConfig>>, crypt_mode: CryptMode) -> Self {
|
||||||
Self {
|
Self {
|
||||||
store,
|
store,
|
||||||
crypt_config,
|
crypt_config,
|
||||||
|
crypt_mode,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ensure_crypt_mode(&self, chunk_mode: CryptMode) -> Result<(), Error> {
|
||||||
|
match self.crypt_mode {
|
||||||
|
CryptMode::Encrypt => {
|
||||||
|
match chunk_mode {
|
||||||
|
CryptMode::Encrypt => Ok(()),
|
||||||
|
CryptMode::SignOnly | CryptMode::None => bail!("Index and chunk CryptMode don't match."),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
CryptMode::SignOnly | CryptMode::None => {
|
||||||
|
match chunk_mode {
|
||||||
|
CryptMode::Encrypt => bail!("Index and chunk CryptMode don't match."),
|
||||||
|
CryptMode::SignOnly | CryptMode::None => Ok(()),
|
||||||
|
}
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ReadChunk for LocalChunkReader {
|
impl ReadChunk for LocalChunkReader {
|
||||||
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||||
let (path, _) = self.store.chunk_path(digest);
|
let chunk = self.store.load_chunk(digest)?;
|
||||||
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
self.ensure_crypt_mode(chunk.crypt_mode()?)?;
|
||||||
let chunk = DataBlob::from_raw(raw_data)?;
|
|
||||||
chunk.verify_crc()?;
|
|
||||||
|
|
||||||
Ok(chunk)
|
Ok(chunk)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||||
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
||||||
|
|
||||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
|
||||||
|
|
||||||
// fixme: verify digest?
|
|
||||||
|
|
||||||
Ok(raw_data)
|
Ok(raw_data)
|
||||||
}
|
}
|
||||||
@ -76,8 +90,9 @@ impl AsyncReadChunk for LocalChunkReader {
|
|||||||
let (path, _) = self.store.chunk_path(digest);
|
let (path, _) = self.store.chunk_path(digest);
|
||||||
|
|
||||||
let raw_data = tokio::fs::read(&path).await?;
|
let raw_data = tokio::fs::read(&path).await?;
|
||||||
let chunk = DataBlob::from_raw(raw_data)?;
|
|
||||||
chunk.verify_crc()?;
|
let chunk = DataBlob::load_from_reader(&mut &raw_data[..])?;
|
||||||
|
self.ensure_crypt_mode(chunk.crypt_mode()?)?;
|
||||||
|
|
||||||
Ok(chunk)
|
Ok(chunk)
|
||||||
})
|
})
|
||||||
@ -90,7 +105,7 @@ impl AsyncReadChunk for LocalChunkReader {
|
|||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let chunk = AsyncReadChunk::read_raw_chunk(self, digest).await?;
|
let chunk = AsyncReadChunk::read_raw_chunk(self, digest).await?;
|
||||||
|
|
||||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
|
||||||
|
|
||||||
// fixme: verify digest?
|
// fixme: verify digest?
|
||||||
|
|
||||||
|
@ -1,58 +1,123 @@
|
|||||||
use anyhow::{bail, Error};
|
use std::collections::HashSet;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
|
use crate::api2::types::*;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
DataStore, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
DataStore, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
||||||
ENCR_COMPR_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0,
|
CryptMode,
|
||||||
FileInfo, ArchiveType, archive_type,
|
FileInfo, ArchiveType, archive_type,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||||
|
|
||||||
let (blob, raw_size) = datastore.load_blob(backup_dir, &info.filename)?;
|
let blob = datastore.load_blob(backup_dir, &info.filename)?;
|
||||||
|
|
||||||
let csum = openssl::sha::sha256(blob.raw_data());
|
let raw_size = blob.raw_size();
|
||||||
if raw_size != info.size {
|
if raw_size != info.size {
|
||||||
bail!("wrong size ({} != {})", info.size, raw_size);
|
bail!("wrong size ({} != {})", info.size, raw_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let csum = openssl::sha::sha256(blob.raw_data());
|
||||||
if csum != info.csum {
|
if csum != info.csum {
|
||||||
bail!("wrong index checksum");
|
bail!("wrong index checksum");
|
||||||
}
|
}
|
||||||
|
|
||||||
blob.verify_crc()?;
|
match blob.crypt_mode()? {
|
||||||
|
CryptMode::Encrypt => Ok(()),
|
||||||
let magic = blob.magic();
|
CryptMode::None => {
|
||||||
|
// digest already verified above
|
||||||
if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
blob.decode(None, None)?;
|
||||||
return Ok(());
|
Ok(())
|
||||||
|
},
|
||||||
|
CryptMode::SignOnly => bail!("Invalid CryptMode for blob"),
|
||||||
}
|
}
|
||||||
|
|
||||||
blob.decode(None)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_index_chunks(
|
fn verify_index_chunks(
|
||||||
datastore: &DataStore,
|
datastore: &DataStore,
|
||||||
index: Box<dyn IndexFile>,
|
index: Box<dyn IndexFile>,
|
||||||
|
verified_chunks: &mut HashSet<[u8;32]>,
|
||||||
|
corrupt_chunks: &mut HashSet<[u8; 32]>,
|
||||||
|
crypt_mode: CryptMode,
|
||||||
worker: &WorkerTask,
|
worker: &WorkerTask,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let mut errors = 0;
|
||||||
for pos in 0..index.index_count() {
|
for pos in 0..index.index_count() {
|
||||||
|
|
||||||
worker.fail_on_abort()?;
|
worker.fail_on_abort()?;
|
||||||
|
|
||||||
let info = index.chunk_info(pos).unwrap();
|
let info = index.chunk_info(pos).unwrap();
|
||||||
|
|
||||||
|
if verified_chunks.contains(&info.digest) {
|
||||||
|
continue; // already verified
|
||||||
|
}
|
||||||
|
|
||||||
|
if corrupt_chunks.contains(&info.digest) {
|
||||||
|
let digest_str = proxmox::tools::digest_to_hex(&info.digest);
|
||||||
|
worker.log(format!("chunk {} was marked as corrupt", digest_str));
|
||||||
|
errors += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let chunk = match datastore.load_chunk(&info.digest) {
|
||||||
|
Err(err) => {
|
||||||
|
corrupt_chunks.insert(info.digest);
|
||||||
|
worker.log(format!("can't verify chunk, load failed - {}", err));
|
||||||
|
errors += 1;
|
||||||
|
continue;
|
||||||
|
},
|
||||||
|
Ok(chunk) => chunk,
|
||||||
|
};
|
||||||
|
|
||||||
|
let chunk_crypt_mode = match chunk.crypt_mode() {
|
||||||
|
Err(err) => {
|
||||||
|
corrupt_chunks.insert(info.digest);
|
||||||
|
worker.log(format!("can't verify chunk, unknown CryptMode - {}", err));
|
||||||
|
errors += 1;
|
||||||
|
continue;
|
||||||
|
},
|
||||||
|
Ok(mode) => mode,
|
||||||
|
};
|
||||||
|
|
||||||
|
if chunk_crypt_mode != crypt_mode {
|
||||||
|
worker.log(format!(
|
||||||
|
"chunk CryptMode {:?} does not match index CryptMode {:?}",
|
||||||
|
chunk_crypt_mode,
|
||||||
|
crypt_mode
|
||||||
|
));
|
||||||
|
errors += 1;
|
||||||
|
}
|
||||||
|
|
||||||
let size = info.range.end - info.range.start;
|
let size = info.range.end - info.range.start;
|
||||||
datastore.verify_stored_chunk(&info.digest, size)?;
|
|
||||||
|
if let Err(err) = chunk.verify_unencrypted(size as usize, &info.digest) {
|
||||||
|
corrupt_chunks.insert(info.digest);
|
||||||
|
worker.log(format!("{}", err));
|
||||||
|
errors += 1;
|
||||||
|
} else {
|
||||||
|
verified_chunks.insert(info.digest);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors > 0 {
|
||||||
|
bail!("chunks could not be verified");
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_fixed_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
fn verify_fixed_index(
|
||||||
|
datastore: &DataStore,
|
||||||
|
backup_dir: &BackupDir,
|
||||||
|
info: &FileInfo,
|
||||||
|
verified_chunks: &mut HashSet<[u8;32]>,
|
||||||
|
corrupt_chunks: &mut HashSet<[u8;32]>,
|
||||||
|
worker: &WorkerTask,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut path = backup_dir.relative_path();
|
let mut path = backup_dir.relative_path();
|
||||||
path.push(&info.filename);
|
path.push(&info.filename);
|
||||||
@ -68,10 +133,18 @@ fn verify_fixed_index(datastore: &DataStore, backup_dir: &BackupDir, info: &File
|
|||||||
bail!("wrong index checksum");
|
bail!("wrong index checksum");
|
||||||
}
|
}
|
||||||
|
|
||||||
verify_index_chunks(datastore, Box::new(index), worker)
|
verify_index_chunks(datastore, Box::new(index), verified_chunks, corrupt_chunks, info.chunk_crypt_mode(), worker)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
fn verify_dynamic_index(
|
||||||
|
datastore: &DataStore,
|
||||||
|
backup_dir: &BackupDir,
|
||||||
|
info: &FileInfo,
|
||||||
|
verified_chunks: &mut HashSet<[u8;32]>,
|
||||||
|
corrupt_chunks: &mut HashSet<[u8;32]>,
|
||||||
|
worker: &WorkerTask,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut path = backup_dir.relative_path();
|
let mut path = backup_dir.relative_path();
|
||||||
path.push(&info.filename);
|
path.push(&info.filename);
|
||||||
|
|
||||||
@ -86,7 +159,7 @@ fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &Fi
|
|||||||
bail!("wrong index checksum");
|
bail!("wrong index checksum");
|
||||||
}
|
}
|
||||||
|
|
||||||
verify_index_chunks(datastore, Box::new(index), worker)
|
verify_index_chunks(datastore, Box::new(index), verified_chunks, corrupt_chunks, info.chunk_crypt_mode(), worker)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify a single backup snapshot
|
/// Verify a single backup snapshot
|
||||||
@ -98,9 +171,15 @@ fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &Fi
|
|||||||
/// - Ok(true) if verify is successful
|
/// - Ok(true) if verify is successful
|
||||||
/// - Ok(false) if there were verification errors
|
/// - Ok(false) if there were verification errors
|
||||||
/// - Err(_) if task was aborted
|
/// - Err(_) if task was aborted
|
||||||
pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker: &WorkerTask) -> Result<bool, Error> {
|
pub fn verify_backup_dir(
|
||||||
|
datastore: &DataStore,
|
||||||
|
backup_dir: &BackupDir,
|
||||||
|
verified_chunks: &mut HashSet<[u8;32]>,
|
||||||
|
corrupt_chunks: &mut HashSet<[u8;32]>,
|
||||||
|
worker: &WorkerTask
|
||||||
|
) -> Result<bool, Error> {
|
||||||
|
|
||||||
let manifest = match datastore.load_manifest(&backup_dir) {
|
let mut manifest = match datastore.load_manifest(&backup_dir) {
|
||||||
Ok((manifest, _)) => manifest,
|
Ok((manifest, _)) => manifest,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
|
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
|
||||||
@ -112,12 +191,29 @@ pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker:
|
|||||||
|
|
||||||
let mut error_count = 0;
|
let mut error_count = 0;
|
||||||
|
|
||||||
|
let mut verify_result = "ok";
|
||||||
for info in manifest.files() {
|
for info in manifest.files() {
|
||||||
let result = proxmox::try_block!({
|
let result = proxmox::try_block!({
|
||||||
worker.log(format!(" check {}", info.filename));
|
worker.log(format!(" check {}", info.filename));
|
||||||
match archive_type(&info.filename)? {
|
match archive_type(&info.filename)? {
|
||||||
ArchiveType::FixedIndex => verify_fixed_index(&datastore, &backup_dir, info, worker),
|
ArchiveType::FixedIndex =>
|
||||||
ArchiveType::DynamicIndex => verify_dynamic_index(&datastore, &backup_dir, info, worker),
|
verify_fixed_index(
|
||||||
|
&datastore,
|
||||||
|
&backup_dir,
|
||||||
|
info,
|
||||||
|
verified_chunks,
|
||||||
|
corrupt_chunks,
|
||||||
|
worker
|
||||||
|
),
|
||||||
|
ArchiveType::DynamicIndex =>
|
||||||
|
verify_dynamic_index(
|
||||||
|
&datastore,
|
||||||
|
&backup_dir,
|
||||||
|
info,
|
||||||
|
verified_chunks,
|
||||||
|
corrupt_chunks,
|
||||||
|
worker
|
||||||
|
),
|
||||||
ArchiveType::Blob => verify_blob(&datastore, &backup_dir, info),
|
ArchiveType::Blob => verify_blob(&datastore, &backup_dir, info),
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@ -127,9 +223,20 @@ pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker:
|
|||||||
if let Err(err) = result {
|
if let Err(err) = result {
|
||||||
worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err));
|
worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err));
|
||||||
error_count += 1;
|
error_count += 1;
|
||||||
|
verify_result = "failed";
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let verify_state = SnapshotVerifyState {
|
||||||
|
state: verify_result.to_string(),
|
||||||
|
upid: worker.upid().clone(),
|
||||||
|
};
|
||||||
|
manifest.unprotected["verify_state"] = serde_json::to_value(verify_state)?;
|
||||||
|
datastore.store_manifest(&backup_dir, serde_json::to_value(manifest)?)
|
||||||
|
.map_err(|err| format_err!("unable to store manifest blob - {}", err))?;
|
||||||
|
|
||||||
|
|
||||||
Ok(error_count == 0)
|
Ok(error_count == 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,31 +245,32 @@ pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker:
|
|||||||
/// Errors are logged to the worker log.
|
/// Errors are logged to the worker log.
|
||||||
///
|
///
|
||||||
/// Returns
|
/// Returns
|
||||||
/// - Ok(true) if verify is successful
|
/// - Ok(failed_dirs) where failed_dirs had verification errors
|
||||||
/// - Ok(false) if there were verification errors
|
|
||||||
/// - Err(_) if task was aborted
|
/// - Err(_) if task was aborted
|
||||||
pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &WorkerTask) -> Result<bool, Error> {
|
pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &WorkerTask) -> Result<Vec<String>, Error> {
|
||||||
|
|
||||||
|
let mut errors = Vec::new();
|
||||||
let mut list = match group.list_backups(&datastore.base_path()) {
|
let mut list = match group.list_backups(&datastore.base_path()) {
|
||||||
Ok(list) => list,
|
Ok(list) => list,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
|
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
|
||||||
return Ok(false);
|
return Ok(errors);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
worker.log(format!("verify group {}:{}", datastore.name(), group));
|
worker.log(format!("verify group {}:{}", datastore.name(), group));
|
||||||
|
|
||||||
let mut error_count = 0;
|
let mut verified_chunks = HashSet::with_capacity(1024*16); // start with 16384 chunks (up to 65GB)
|
||||||
|
let mut corrupt_chunks = HashSet::with_capacity(64); // start with 64 chunks since we assume there are few corrupt ones
|
||||||
|
|
||||||
BackupInfo::sort_list(&mut list, false); // newest first
|
BackupInfo::sort_list(&mut list, false); // newest first
|
||||||
for info in list {
|
for info in list {
|
||||||
if !verify_backup_dir(datastore, &info.backup_dir, worker)? {
|
if !verify_backup_dir(datastore, &info.backup_dir, &mut verified_chunks, &mut corrupt_chunks, worker)?{
|
||||||
error_count += 1;
|
errors.push(info.backup_dir.to_string());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(error_count == 0)
|
Ok(errors)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify all backups inside a datastore
|
/// Verify all backups inside a datastore
|
||||||
@ -170,27 +278,28 @@ pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &
|
|||||||
/// Errors are logged to the worker log.
|
/// Errors are logged to the worker log.
|
||||||
///
|
///
|
||||||
/// Returns
|
/// Returns
|
||||||
/// - Ok(true) if verify is successful
|
/// - Ok(failed_dirs) where failed_dirs had verification errors
|
||||||
/// - Ok(false) if there were verification errors
|
|
||||||
/// - Err(_) if task was aborted
|
/// - Err(_) if task was aborted
|
||||||
pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<bool, Error> {
|
pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<Vec<String>, Error> {
|
||||||
|
|
||||||
let list = match BackupGroup::list_groups(&datastore.base_path()) {
|
let mut errors = Vec::new();
|
||||||
|
|
||||||
|
let mut list = match BackupGroup::list_groups(&datastore.base_path()) {
|
||||||
Ok(list) => list,
|
Ok(list) => list,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err));
|
worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err));
|
||||||
return Ok(false);
|
return Ok(errors);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
list.sort_unstable();
|
||||||
|
|
||||||
worker.log(format!("verify datastore {}", datastore.name()));
|
worker.log(format!("verify datastore {}", datastore.name()));
|
||||||
|
|
||||||
let mut error_count = 0;
|
|
||||||
for group in list {
|
for group in list {
|
||||||
if !verify_backup_group(datastore, &group, worker)? {
|
let mut group_errors = verify_backup_group(datastore, &group, worker)?;
|
||||||
error_count += 1;
|
errors.append(&mut group_errors);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(error_count == 0)
|
Ok(errors)
|
||||||
}
|
}
|
||||||
|
@ -37,6 +37,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
config::update_self_signed_cert(false)?;
|
config::update_self_signed_cert(false)?;
|
||||||
|
|
||||||
proxmox_backup::rrd::create_rrdb_dir()?;
|
proxmox_backup::rrd::create_rrdb_dir()?;
|
||||||
|
proxmox_backup::config::jobstate::create_jobstate_dir()?;
|
||||||
|
|
||||||
if let Err(err) = generate_auth_key() {
|
if let Err(err) = generate_auth_key() {
|
||||||
bail!("unable to generate auth key - {}", err);
|
bail!("unable to generate auth key - {}", err);
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -9,7 +9,7 @@ use proxmox_backup::tools;
|
|||||||
use proxmox_backup::config;
|
use proxmox_backup::config;
|
||||||
use proxmox_backup::api2::{self, types::* };
|
use proxmox_backup::api2::{self, types::* };
|
||||||
use proxmox_backup::client::*;
|
use proxmox_backup::client::*;
|
||||||
use proxmox_backup::tools::ticket::*;
|
use proxmox_backup::tools::ticket::Ticket;
|
||||||
use proxmox_backup::auth_helpers::*;
|
use proxmox_backup::auth_helpers::*;
|
||||||
|
|
||||||
mod proxmox_backup_manager;
|
mod proxmox_backup_manager;
|
||||||
@ -59,12 +59,13 @@ fn connect() -> Result<HttpClient, Error> {
|
|||||||
.verify_cert(false); // not required for connection to localhost
|
.verify_cert(false); // not required for connection to localhost
|
||||||
|
|
||||||
let client = if uid.is_root() {
|
let client = if uid.is_root() {
|
||||||
let ticket = assemble_rsa_ticket(private_auth_key(), "PBS", Some("root@pam"), None)?;
|
let ticket = Ticket::new("PBS", Userid::root_userid())?
|
||||||
|
.sign(private_auth_key(), None)?;
|
||||||
options = options.password(Some(ticket));
|
options = options.password(Some(ticket));
|
||||||
HttpClient::new("localhost", "root@pam", options)?
|
HttpClient::new("localhost", Userid::root_userid(), options)?
|
||||||
} else {
|
} else {
|
||||||
options = options.ticket_cache(true).interactive(true);
|
options = options.ticket_cache(true).interactive(true);
|
||||||
HttpClient::new("localhost", "root@pam", options)?
|
HttpClient::new("localhost", Userid::root_userid(), options)?
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(client)
|
Ok(client)
|
||||||
@ -127,7 +128,7 @@ async fn garbage_collection_status(param: Value) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let mut result = client.get(&path, None).await?;
|
let mut result = client.get(&path, None).await?;
|
||||||
let mut data = result["data"].take();
|
let mut data = result["data"].take();
|
||||||
let schema = api2::admin::datastore::API_RETURN_SCHEMA_GARBAGE_COLLECTION_STATUS;
|
let schema = &api2::admin::datastore::API_RETURN_SCHEMA_GARBAGE_COLLECTION_STATUS;
|
||||||
|
|
||||||
let options = default_table_format_options();
|
let options = default_table_format_options();
|
||||||
|
|
||||||
@ -193,7 +194,7 @@ async fn task_list(param: Value) -> Result<Value, Error> {
|
|||||||
let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
|
let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
|
||||||
|
|
||||||
let mut data = result["data"].take();
|
let mut data = result["data"].take();
|
||||||
let schema = api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
|
let schema = &api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
|
||||||
|
|
||||||
let options = default_table_format_options()
|
let options = default_table_format_options()
|
||||||
.column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
|
.column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::path::Path;
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
@ -9,6 +9,7 @@ use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
|
|||||||
use proxmox::try_block;
|
use proxmox::try_block;
|
||||||
use proxmox::api::RpcEnvironmentType;
|
use proxmox::api::RpcEnvironmentType;
|
||||||
|
|
||||||
|
use proxmox_backup::api2::types::Userid;
|
||||||
use proxmox_backup::configdir;
|
use proxmox_backup::configdir;
|
||||||
use proxmox_backup::buildcfg;
|
use proxmox_backup::buildcfg;
|
||||||
use proxmox_backup::server;
|
use proxmox_backup::server;
|
||||||
@ -17,13 +18,21 @@ use proxmox_backup::server::{ApiConfig, rest::*};
|
|||||||
use proxmox_backup::auth_helpers::*;
|
use proxmox_backup::auth_helpers::*;
|
||||||
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
|
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
|
||||||
|
|
||||||
fn main() {
|
use proxmox_backup::api2::pull::do_sync_job;
|
||||||
|
|
||||||
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_backup::tools::setup_safe_path_env();
|
proxmox_backup::tools::setup_safe_path_env();
|
||||||
|
|
||||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
let backup_uid = proxmox_backup::backup::backup_user()?.uid;
|
||||||
eprintln!("Error: {}", err);
|
let backup_gid = proxmox_backup::backup::backup_group()?.gid;
|
||||||
std::process::exit(-1);
|
let running_uid = nix::unistd::Uid::effective();
|
||||||
|
let running_gid = nix::unistd::Gid::effective();
|
||||||
|
|
||||||
|
if running_uid != backup_uid || running_gid != backup_gid {
|
||||||
|
bail!("proxy not running as backup user or group (got uid {} gid {})", running_uid, running_gid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
proxmox_backup::tools::runtime::main(run())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
@ -40,11 +49,6 @@ async fn run() -> Result<(), Error> {
|
|||||||
let mut config = ApiConfig::new(
|
let mut config = ApiConfig::new(
|
||||||
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
|
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
|
||||||
|
|
||||||
// add default dirs which includes jquery and bootstrap
|
|
||||||
// my $base = '/usr/share/libpve-http-server-perl';
|
|
||||||
// add_dirs($self->{dirs}, '/css/' => "$base/css/");
|
|
||||||
// add_dirs($self->{dirs}, '/js/' => "$base/js/");
|
|
||||||
// add_dirs($self->{dirs}, '/fonts/' => "$base/fonts/");
|
|
||||||
config.add_alias("novnc", "/usr/share/novnc-pve");
|
config.add_alias("novnc", "/usr/share/novnc-pve");
|
||||||
config.add_alias("extjs", "/usr/share/javascript/extjs");
|
config.add_alias("extjs", "/usr/share/javascript/extjs");
|
||||||
config.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
|
config.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
|
||||||
@ -53,6 +57,11 @@ async fn run() -> Result<(), Error> {
|
|||||||
config.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
|
config.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
|
||||||
config.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
|
config.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
|
||||||
|
|
||||||
|
let mut indexpath = PathBuf::from(buildcfg::JS_DIR);
|
||||||
|
indexpath.push("index.hbs");
|
||||||
|
config.register_template("index", &indexpath)?;
|
||||||
|
config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?;
|
||||||
|
|
||||||
let rest_server = RestServer::new(config);
|
let rest_server = RestServer::new(config);
|
||||||
|
|
||||||
//openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
|
//openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
|
||||||
@ -313,7 +322,7 @@ async fn schedule_datastore_garbage_collection() {
|
|||||||
if let Err(err) = WorkerTask::new_thread(
|
if let Err(err) = WorkerTask::new_thread(
|
||||||
worker_type,
|
worker_type,
|
||||||
Some(store.clone()),
|
Some(store.clone()),
|
||||||
"backup@pam",
|
Userid::backup_userid().clone(),
|
||||||
false,
|
false,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
worker.log(format!("starting garbage collection on store {}", store));
|
worker.log(format!("starting garbage collection on store {}", store));
|
||||||
@ -424,7 +433,7 @@ async fn schedule_datastore_prune() {
|
|||||||
if let Err(err) = WorkerTask::new_thread(
|
if let Err(err) = WorkerTask::new_thread(
|
||||||
worker_type,
|
worker_type,
|
||||||
Some(store.clone()),
|
Some(store.clone()),
|
||||||
"backup@pam",
|
Userid::backup_userid().clone(),
|
||||||
false,
|
false,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
worker.log(format!("Starting datastore prune on store \"{}\"", store));
|
worker.log(format!("Starting datastore prune on store \"{}\"", store));
|
||||||
@ -450,7 +459,7 @@ async fn schedule_datastore_prune() {
|
|||||||
BackupDir::backup_time_to_string(info.backup_dir.backup_time())));
|
BackupDir::backup_time_to_string(info.backup_dir.backup_time())));
|
||||||
|
|
||||||
if !keep {
|
if !keep {
|
||||||
datastore.remove_backup_dir(&info.backup_dir)?;
|
datastore.remove_backup_dir(&info.backup_dir, true)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -466,10 +475,7 @@ async fn schedule_datastore_prune() {
|
|||||||
async fn schedule_datastore_sync_jobs() {
|
async fn schedule_datastore_sync_jobs() {
|
||||||
|
|
||||||
use proxmox_backup::{
|
use proxmox_backup::{
|
||||||
backup::DataStore,
|
config::{ sync::{self, SyncJobConfig}, jobstate::{self, Job} },
|
||||||
client::{ HttpClient, HttpClientOptions, BackupRepository, pull::pull_store },
|
|
||||||
server::{ WorkerTask },
|
|
||||||
config::{ sync::{self, SyncJobConfig}, remote::{self, Remote} },
|
|
||||||
tools::systemd::time::{ parse_calendar_event, compute_next_event },
|
tools::systemd::time::{ parse_calendar_event, compute_next_event },
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -481,14 +487,6 @@ async fn schedule_datastore_sync_jobs() {
|
|||||||
Ok((config, _digest)) => config,
|
Ok((config, _digest)) => config,
|
||||||
};
|
};
|
||||||
|
|
||||||
let remote_config = match remote::config() {
|
|
||||||
Err(err) => {
|
|
||||||
eprintln!("unable to read remote config - {}", err);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
Ok((config, _digest)) => config,
|
|
||||||
};
|
|
||||||
|
|
||||||
for (job_id, (_, job_config)) in config.sections {
|
for (job_id, (_, job_config)) in config.sections {
|
||||||
let job_config: SyncJobConfig = match serde_json::from_value(job_config) {
|
let job_config: SyncJobConfig = match serde_json::from_value(job_config) {
|
||||||
Ok(c) => c,
|
Ok(c) => c,
|
||||||
@ -513,16 +511,10 @@ async fn schedule_datastore_sync_jobs() {
|
|||||||
|
|
||||||
let worker_type = "syncjob";
|
let worker_type = "syncjob";
|
||||||
|
|
||||||
let last = match lookup_last_worker(worker_type, &job_id) {
|
let last = match jobstate::last_run_time(worker_type, &job_id) {
|
||||||
Ok(Some(upid)) => {
|
Ok(time) => time,
|
||||||
if proxmox_backup::server::worker_is_active_local(&upid) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
upid.starttime
|
|
||||||
},
|
|
||||||
Ok(None) => 0,
|
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("lookup_last_job_start failed: {}", err);
|
eprintln!("could not get last run time of {} {}: {}", worker_type, job_id, err);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -544,57 +536,15 @@ async fn schedule_datastore_sync_jobs() {
|
|||||||
};
|
};
|
||||||
if next > now { continue; }
|
if next > now { continue; }
|
||||||
|
|
||||||
|
let job = match Job::new(worker_type, &job_id) {
|
||||||
let job_id2 = job_id.clone();
|
Ok(job) => job,
|
||||||
|
Err(_) => continue, // could not get lock
|
||||||
let tgt_store = match DataStore::lookup_datastore(&job_config.store) {
|
|
||||||
Ok(datastore) => datastore,
|
|
||||||
Err(err) => {
|
|
||||||
eprintln!("lookup_datastore '{}' failed - {}", job_config.store, err);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let remote: Remote = match remote_config.lookup("remote", &job_config.remote) {
|
let userid = Userid::backup_userid().clone();
|
||||||
Ok(remote) => remote,
|
|
||||||
Err(err) => {
|
|
||||||
eprintln!("remote_config lookup failed: {}", err);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let username = String::from("backup@pam");
|
if let Err(err) = do_sync_job(job, job_config, &userid, Some(event_str)) {
|
||||||
|
eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
|
||||||
let delete = job_config.remove_vanished.unwrap_or(true);
|
|
||||||
|
|
||||||
if let Err(err) = WorkerTask::spawn(
|
|
||||||
worker_type,
|
|
||||||
Some(job_id.clone()),
|
|
||||||
&username.clone(),
|
|
||||||
false,
|
|
||||||
move |worker| async move {
|
|
||||||
worker.log(format!("Starting datastore sync job '{}'", job_id));
|
|
||||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
|
||||||
worker.log(format!("Sync datastore '{}' from '{}/{}'",
|
|
||||||
job_config.store, job_config.remote, job_config.remote_store));
|
|
||||||
|
|
||||||
let options = HttpClientOptions::new()
|
|
||||||
.password(Some(remote.password.clone()))
|
|
||||||
.fingerprint(remote.fingerprint.clone());
|
|
||||||
|
|
||||||
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
|
|
||||||
let _auth_info = client.login() // make sure we can auth
|
|
||||||
.await
|
|
||||||
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
|
|
||||||
|
|
||||||
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), job_config.remote_store);
|
|
||||||
|
|
||||||
pull_store(&worker, &client, &src_repo, tgt_store, delete, username).await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
) {
|
|
||||||
eprintln!("unable to start datastore sync job {} - {}", job_id2, err);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,14 +4,24 @@ use std::sync::Arc;
|
|||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use chrono::{TimeZone, Utc};
|
use chrono::{TimeZone, Utc};
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
use proxmox::api::{ApiMethod, RpcEnvironment};
|
use proxmox::api::{ApiMethod, RpcEnvironment};
|
||||||
use proxmox::api::api;
|
use proxmox::api::{
|
||||||
|
api,
|
||||||
|
cli::{
|
||||||
|
OUTPUT_FORMAT,
|
||||||
|
ColumnConfig,
|
||||||
|
get_output_format,
|
||||||
|
format_and_print_result_full,
|
||||||
|
default_table_format_options,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
use proxmox_backup::backup::{
|
use proxmox_backup::backup::{
|
||||||
load_and_decrypt_key,
|
load_and_decrypt_key,
|
||||||
CryptConfig,
|
CryptConfig,
|
||||||
|
KeyDerivationConfig,
|
||||||
};
|
};
|
||||||
|
|
||||||
use proxmox_backup::client::*;
|
use proxmox_backup::client::*;
|
||||||
@ -19,11 +29,79 @@ use proxmox_backup::client::*;
|
|||||||
use crate::{
|
use crate::{
|
||||||
KEYFILE_SCHEMA, REPO_URL_SCHEMA,
|
KEYFILE_SCHEMA, REPO_URL_SCHEMA,
|
||||||
extract_repository_from_value,
|
extract_repository_from_value,
|
||||||
get_encryption_key_password,
|
|
||||||
record_repository,
|
record_repository,
|
||||||
connect,
|
connect,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Copy, Clone, Serialize)]
|
||||||
|
/// Speed test result
|
||||||
|
struct Speed {
|
||||||
|
/// The meassured speed in Bytes/second
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
speed: Option<f64>,
|
||||||
|
/// Top result we want to compare with
|
||||||
|
top: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"tls": {
|
||||||
|
type: Speed,
|
||||||
|
},
|
||||||
|
"sha256": {
|
||||||
|
type: Speed,
|
||||||
|
},
|
||||||
|
"compress": {
|
||||||
|
type: Speed,
|
||||||
|
},
|
||||||
|
"decompress": {
|
||||||
|
type: Speed,
|
||||||
|
},
|
||||||
|
"aes256_gcm": {
|
||||||
|
type: Speed,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Copy, Clone, Serialize)]
|
||||||
|
/// Benchmark Results
|
||||||
|
struct BenchmarkResult {
|
||||||
|
/// TLS upload speed
|
||||||
|
tls: Speed,
|
||||||
|
/// SHA256 checksum computation speed
|
||||||
|
sha256: Speed,
|
||||||
|
/// ZStd level 1 compression speed
|
||||||
|
compress: Speed,
|
||||||
|
/// ZStd level 1 decompression speed
|
||||||
|
decompress: Speed,
|
||||||
|
/// AES256 GCM encryption speed
|
||||||
|
aes256_gcm: Speed,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
|
||||||
|
tls: Speed {
|
||||||
|
speed: None,
|
||||||
|
top: 1_000_000.0 * 590.0, // TLS to localhost, AMD Ryzen 7 2700X
|
||||||
|
},
|
||||||
|
sha256: Speed {
|
||||||
|
speed: None,
|
||||||
|
top: 1_000_000.0 * 2120.0, // AMD Ryzen 7 2700X
|
||||||
|
},
|
||||||
|
compress: Speed {
|
||||||
|
speed: None,
|
||||||
|
top: 1_000_000.0 * 2158.0, // AMD Ryzen 7 2700X
|
||||||
|
},
|
||||||
|
decompress: Speed {
|
||||||
|
speed: None,
|
||||||
|
top: 1_000_000.0 * 8062.0, // AMD Ryzen 7 2700X
|
||||||
|
},
|
||||||
|
aes256_gcm: Speed {
|
||||||
|
speed: None,
|
||||||
|
top: 1_000_000.0 * 3803.0, // AMD Ryzen 7 2700X
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -31,10 +109,19 @@ use crate::{
|
|||||||
schema: REPO_URL_SCHEMA,
|
schema: REPO_URL_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
verbose: {
|
||||||
|
description: "Verbose output.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
keyfile: {
|
keyfile: {
|
||||||
schema: KEYFILE_SCHEMA,
|
schema: KEYFILE_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
"output-format": {
|
||||||
|
schema: OUTPUT_FORMAT,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
@ -45,38 +132,195 @@ pub async fn benchmark(
|
|||||||
_rpcenv: &mut dyn RpcEnvironment,
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let repo = extract_repository_from_value(¶m)?;
|
let repo = extract_repository_from_value(¶m).ok();
|
||||||
|
|
||||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||||
|
|
||||||
|
let verbose = param["verbose"].as_bool().unwrap_or(false);
|
||||||
|
|
||||||
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
let crypt_config = match keyfile {
|
let crypt_config = match keyfile {
|
||||||
None => None,
|
None => None,
|
||||||
Some(path) => {
|
Some(path) => {
|
||||||
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
let (key, _) = load_and_decrypt_key(&path, &crate::key::get_encryption_key_password)?;
|
||||||
let crypt_config = CryptConfig::new(key)?;
|
let crypt_config = CryptConfig::new(key)?;
|
||||||
Some(Arc::new(crypt_config))
|
Some(Arc::new(crypt_config))
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let mut benchmark_result = BENCHMARK_RESULT_2020_TOP;
|
||||||
|
|
||||||
|
// do repo tests first, because this may prompt for a password
|
||||||
|
if let Some(repo) = repo {
|
||||||
|
test_upload_speed(&mut benchmark_result, repo, crypt_config.clone(), verbose).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
test_crypt_speed(&mut benchmark_result, verbose)?;
|
||||||
|
|
||||||
|
render_result(&output_format, &benchmark_result)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// print comparison table
|
||||||
|
fn render_result(
|
||||||
|
output_format: &str,
|
||||||
|
benchmark_result: &BenchmarkResult,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let mut data = serde_json::to_value(benchmark_result)?;
|
||||||
|
let schema = &BenchmarkResult::API_SCHEMA;
|
||||||
|
|
||||||
|
let render_speed = |value: &Value, _record: &Value| -> Result<String, Error> {
|
||||||
|
match value["speed"].as_f64() {
|
||||||
|
None => Ok(String::from("not tested")),
|
||||||
|
Some(speed) => {
|
||||||
|
let top = value["top"].as_f64().unwrap();
|
||||||
|
Ok(format!("{:.2} MB/s ({:.0}%)", speed/1_000_000.0, (speed*100.0)/top))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let options = default_table_format_options()
|
||||||
|
.column(ColumnConfig::new("tls")
|
||||||
|
.header("TLS (maximal backup upload speed)")
|
||||||
|
.right_align(false).renderer(render_speed))
|
||||||
|
.column(ColumnConfig::new("sha256")
|
||||||
|
.header("SHA256 checksum computation speed")
|
||||||
|
.right_align(false).renderer(render_speed))
|
||||||
|
.column(ColumnConfig::new("compress")
|
||||||
|
.header("ZStd level 1 compression speed")
|
||||||
|
.right_align(false).renderer(render_speed))
|
||||||
|
.column(ColumnConfig::new("decompress")
|
||||||
|
.header("ZStd level 1 decompression speed")
|
||||||
|
.right_align(false).renderer(render_speed))
|
||||||
|
.column(ColumnConfig::new("aes256_gcm")
|
||||||
|
.header("AES256 GCM encryption speed")
|
||||||
|
.right_align(false).renderer(render_speed));
|
||||||
|
|
||||||
|
|
||||||
|
format_and_print_result_full(&mut data, schema, output_format, &options);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn test_upload_speed(
|
||||||
|
benchmark_result: &mut BenchmarkResult,
|
||||||
|
repo: BackupRepository,
|
||||||
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
|
verbose: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let backup_time = Utc.timestamp(Utc::now().timestamp(), 0);
|
let backup_time = Utc.timestamp(Utc::now().timestamp(), 0);
|
||||||
|
|
||||||
let client = connect(repo.host(), repo.user())?;
|
let client = connect(repo.host(), repo.user())?;
|
||||||
record_repository(&repo);
|
record_repository(&repo);
|
||||||
|
|
||||||
|
if verbose { eprintln!("Connecting to backup server"); }
|
||||||
let client = BackupWriter::start(
|
let client = BackupWriter::start(
|
||||||
client,
|
client,
|
||||||
crypt_config.clone(),
|
crypt_config.clone(),
|
||||||
repo.store(),
|
repo.store(),
|
||||||
"host",
|
"host",
|
||||||
"benshmark",
|
"benchmark",
|
||||||
backup_time,
|
backup_time,
|
||||||
false,
|
false,
|
||||||
).await?;
|
).await?;
|
||||||
|
|
||||||
println!("Start upload speed test");
|
if verbose { eprintln!("Start TLS speed test"); }
|
||||||
let speed = client.upload_speedtest().await?;
|
let speed = client.upload_speedtest(verbose).await?;
|
||||||
|
|
||||||
println!("Upload speed: {} MiB/s", speed);
|
eprintln!("TLS speed: {:.2} MB/s", speed/1_000_000.0);
|
||||||
|
|
||||||
|
benchmark_result.tls.speed = Some(speed);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// test hash/crypt/compress speed
|
||||||
|
fn test_crypt_speed(
|
||||||
|
benchmark_result: &mut BenchmarkResult,
|
||||||
|
_verbose: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let pw = b"test";
|
||||||
|
|
||||||
|
let kdf = KeyDerivationConfig::Scrypt {
|
||||||
|
n: 65536,
|
||||||
|
r: 8,
|
||||||
|
p: 1,
|
||||||
|
salt: Vec::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let testkey = kdf.derive_key(pw)?;
|
||||||
|
|
||||||
|
let crypt_config = CryptConfig::new(testkey)?;
|
||||||
|
|
||||||
|
let random_data = proxmox::sys::linux::random_data(1024*1024)?;
|
||||||
|
|
||||||
|
let start_time = std::time::Instant::now();
|
||||||
|
|
||||||
|
let mut bytes = 0;
|
||||||
|
loop {
|
||||||
|
openssl::sha::sha256(&random_data);
|
||||||
|
bytes += random_data.len();
|
||||||
|
if start_time.elapsed().as_micros() > 1_000_000 { break; }
|
||||||
|
}
|
||||||
|
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||||
|
benchmark_result.sha256.speed = Some(speed);
|
||||||
|
|
||||||
|
eprintln!("SHA256 speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||||
|
|
||||||
|
|
||||||
|
let start_time = std::time::Instant::now();
|
||||||
|
|
||||||
|
let mut bytes = 0;
|
||||||
|
loop {
|
||||||
|
let mut reader = &random_data[..];
|
||||||
|
zstd::stream::encode_all(&mut reader, 1)?;
|
||||||
|
bytes += random_data.len();
|
||||||
|
if start_time.elapsed().as_micros() > 3_000_000 { break; }
|
||||||
|
}
|
||||||
|
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||||
|
benchmark_result.compress.speed = Some(speed);
|
||||||
|
|
||||||
|
eprintln!("Compression speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||||
|
|
||||||
|
|
||||||
|
let start_time = std::time::Instant::now();
|
||||||
|
|
||||||
|
let compressed_data = {
|
||||||
|
let mut reader = &random_data[..];
|
||||||
|
zstd::stream::encode_all(&mut reader, 1)?
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut bytes = 0;
|
||||||
|
loop {
|
||||||
|
let mut reader = &compressed_data[..];
|
||||||
|
let data = zstd::stream::decode_all(&mut reader)?;
|
||||||
|
bytes += data.len();
|
||||||
|
if start_time.elapsed().as_micros() > 1_000_000 { break; }
|
||||||
|
}
|
||||||
|
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||||
|
benchmark_result.decompress.speed = Some(speed);
|
||||||
|
|
||||||
|
eprintln!("Decompress speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||||
|
|
||||||
|
|
||||||
|
let start_time = std::time::Instant::now();
|
||||||
|
|
||||||
|
let mut bytes = 0;
|
||||||
|
loop {
|
||||||
|
let mut out = Vec::new();
|
||||||
|
crypt_config.encrypt_to(&random_data, &mut out)?;
|
||||||
|
bytes += random_data.len();
|
||||||
|
if start_time.elapsed().as_micros() > 1_000_000 { break; }
|
||||||
|
}
|
||||||
|
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
|
||||||
|
benchmark_result.aes256_gcm.speed = Some(speed);
|
||||||
|
|
||||||
|
eprintln!("AES256/GCM speed: {:.2} MB/s", speed/1_000_000_.0);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
267
src/bin/proxmox_backup_client/catalog.rs
Normal file
267
src/bin/proxmox_backup_client/catalog.rs
Normal file
@ -0,0 +1,267 @@
|
|||||||
|
use std::os::unix::fs::OpenOptionsExt;
|
||||||
|
use std::io::{Seek, SeekFrom};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox::api::{api, cli::*};
|
||||||
|
|
||||||
|
use proxmox_backup::tools;
|
||||||
|
|
||||||
|
use proxmox_backup::client::*;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
REPO_URL_SCHEMA,
|
||||||
|
KEYFD_SCHEMA,
|
||||||
|
extract_repository_from_value,
|
||||||
|
record_repository,
|
||||||
|
keyfile_parameters,
|
||||||
|
key::get_encryption_key_password,
|
||||||
|
decrypt_key,
|
||||||
|
api_datastore_latest_snapshot,
|
||||||
|
complete_repository,
|
||||||
|
complete_backup_snapshot,
|
||||||
|
complete_group_or_snapshot,
|
||||||
|
complete_pxar_archive_name,
|
||||||
|
connect,
|
||||||
|
BackupDir,
|
||||||
|
BackupGroup,
|
||||||
|
BufferedDynamicReader,
|
||||||
|
BufferedDynamicReadAt,
|
||||||
|
CatalogReader,
|
||||||
|
CATALOG_NAME,
|
||||||
|
CryptConfig,
|
||||||
|
DynamicIndexReader,
|
||||||
|
IndexFile,
|
||||||
|
Shell,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
repository: {
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
snapshot: {
|
||||||
|
type: String,
|
||||||
|
description: "Snapshot path.",
|
||||||
|
},
|
||||||
|
"keyfile": {
|
||||||
|
optional: true,
|
||||||
|
type: String,
|
||||||
|
description: "Path to encryption key.",
|
||||||
|
},
|
||||||
|
"keyfd": {
|
||||||
|
schema: KEYFD_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Dump catalog.
|
||||||
|
async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
|
||||||
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||||
|
let snapshot: BackupDir = path.parse()?;
|
||||||
|
|
||||||
|
let (keydata, _) = keyfile_parameters(¶m)?;
|
||||||
|
|
||||||
|
let crypt_config = match keydata {
|
||||||
|
None => None,
|
||||||
|
Some(key) => {
|
||||||
|
let (key, _created) = decrypt_key(&key, &get_encryption_key_password)?;
|
||||||
|
let crypt_config = CryptConfig::new(key)?;
|
||||||
|
Some(Arc::new(crypt_config))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
|
||||||
|
let client = BackupReader::start(
|
||||||
|
client,
|
||||||
|
crypt_config.clone(),
|
||||||
|
repo.store(),
|
||||||
|
&snapshot.group().backup_type(),
|
||||||
|
&snapshot.group().backup_id(),
|
||||||
|
snapshot.backup_time(),
|
||||||
|
true,
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
let (manifest, _) = client.download_manifest().await?;
|
||||||
|
|
||||||
|
let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
|
||||||
|
|
||||||
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
|
||||||
|
let file_info = manifest.lookup_file_info(&CATALOG_NAME)?;
|
||||||
|
|
||||||
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
||||||
|
|
||||||
|
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
|
||||||
|
let mut catalogfile = std::fs::OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.read(true)
|
||||||
|
.custom_flags(libc::O_TMPFILE)
|
||||||
|
.open("/tmp")?;
|
||||||
|
|
||||||
|
std::io::copy(&mut reader, &mut catalogfile)
|
||||||
|
.map_err(|err| format_err!("unable to download catalog - {}", err))?;
|
||||||
|
|
||||||
|
catalogfile.seek(SeekFrom::Start(0))?;
|
||||||
|
|
||||||
|
let mut catalog_reader = CatalogReader::new(catalogfile);
|
||||||
|
|
||||||
|
catalog_reader.dump()?;
|
||||||
|
|
||||||
|
record_repository(&repo);
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
"snapshot": {
|
||||||
|
type: String,
|
||||||
|
description: "Group/Snapshot path.",
|
||||||
|
},
|
||||||
|
"archive-name": {
|
||||||
|
type: String,
|
||||||
|
description: "Backup archive name.",
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
optional: true,
|
||||||
|
schema: REPO_URL_SCHEMA,
|
||||||
|
},
|
||||||
|
"keyfile": {
|
||||||
|
optional: true,
|
||||||
|
type: String,
|
||||||
|
description: "Path to encryption key.",
|
||||||
|
},
|
||||||
|
"keyfd": {
|
||||||
|
schema: KEYFD_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Shell to interactively inspect and restore snapshots.
|
||||||
|
async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||||
|
let repo = extract_repository_from_value(¶m)?;
|
||||||
|
let client = connect(repo.host(), repo.user())?;
|
||||||
|
let path = tools::required_string_param(¶m, "snapshot")?;
|
||||||
|
let archive_name = tools::required_string_param(¶m, "archive-name")?;
|
||||||
|
|
||||||
|
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
|
||||||
|
let group: BackupGroup = path.parse()?;
|
||||||
|
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||||
|
} else {
|
||||||
|
let snapshot: BackupDir = path.parse()?;
|
||||||
|
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
|
||||||
|
};
|
||||||
|
|
||||||
|
let (keydata, _) = keyfile_parameters(¶m)?;
|
||||||
|
|
||||||
|
let crypt_config = match keydata {
|
||||||
|
None => None,
|
||||||
|
Some(key) => {
|
||||||
|
let (key, _created) = decrypt_key(&key, &get_encryption_key_password)?;
|
||||||
|
let crypt_config = CryptConfig::new(key)?;
|
||||||
|
Some(Arc::new(crypt_config))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let server_archive_name = if archive_name.ends_with(".pxar") {
|
||||||
|
format!("{}.didx", archive_name)
|
||||||
|
} else {
|
||||||
|
bail!("Can only mount pxar archives.");
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = BackupReader::start(
|
||||||
|
client,
|
||||||
|
crypt_config.clone(),
|
||||||
|
repo.store(),
|
||||||
|
&backup_type,
|
||||||
|
&backup_id,
|
||||||
|
backup_time,
|
||||||
|
true,
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.read(true)
|
||||||
|
.custom_flags(libc::O_TMPFILE)
|
||||||
|
.open("/tmp")?;
|
||||||
|
|
||||||
|
let (manifest, _) = client.download_manifest().await?;
|
||||||
|
|
||||||
|
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||||
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
|
||||||
|
let file_info = manifest.lookup_file_info(&server_archive_name)?;
|
||||||
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), file_info.chunk_crypt_mode(), most_used);
|
||||||
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
let archive_size = reader.archive_size();
|
||||||
|
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||||
|
Arc::new(BufferedDynamicReadAt::new(reader));
|
||||||
|
let decoder = proxmox_backup::pxar::fuse::Accessor::new(reader, archive_size).await?;
|
||||||
|
|
||||||
|
client.download(CATALOG_NAME, &mut tmpfile).await?;
|
||||||
|
let index = DynamicIndexReader::new(tmpfile)
|
||||||
|
.map_err(|err| format_err!("unable to read catalog index - {}", err))?;
|
||||||
|
|
||||||
|
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||||
|
let (csum, size) = index.compute_csum();
|
||||||
|
manifest.verify_file(CATALOG_NAME, &csum, size)?;
|
||||||
|
|
||||||
|
let most_used = index.find_most_used_chunks(8);
|
||||||
|
|
||||||
|
let file_info = manifest.lookup_file_info(&CATALOG_NAME)?;
|
||||||
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
||||||
|
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
|
let mut catalogfile = std::fs::OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.read(true)
|
||||||
|
.custom_flags(libc::O_TMPFILE)
|
||||||
|
.open("/tmp")?;
|
||||||
|
|
||||||
|
std::io::copy(&mut reader, &mut catalogfile)
|
||||||
|
.map_err(|err| format_err!("unable to download catalog - {}", err))?;
|
||||||
|
|
||||||
|
catalogfile.seek(SeekFrom::Start(0))?;
|
||||||
|
let catalog_reader = CatalogReader::new(catalogfile);
|
||||||
|
let state = Shell::new(
|
||||||
|
catalog_reader,
|
||||||
|
&server_archive_name,
|
||||||
|
decoder,
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
println!("Starting interactive shell");
|
||||||
|
state.shell().await?;
|
||||||
|
|
||||||
|
record_repository(&repo);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn catalog_mgmt_cli() -> CliCommandMap {
|
||||||
|
let catalog_shell_cmd_def = CliCommand::new(&API_METHOD_CATALOG_SHELL)
|
||||||
|
.arg_param(&["snapshot", "archive-name"])
|
||||||
|
.completion_cb("repository", complete_repository)
|
||||||
|
.completion_cb("archive-name", complete_pxar_archive_name)
|
||||||
|
.completion_cb("snapshot", complete_group_or_snapshot);
|
||||||
|
|
||||||
|
let catalog_dump_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
|
||||||
|
.arg_param(&["snapshot"])
|
||||||
|
.completion_cb("repository", complete_repository)
|
||||||
|
.completion_cb("snapshot", complete_backup_snapshot);
|
||||||
|
|
||||||
|
CliCommandMap::new()
|
||||||
|
.insert("dump", catalog_dump_cmd_def)
|
||||||
|
.insert("shell", catalog_shell_cmd_def)
|
||||||
|
}
|
284
src/bin/proxmox_backup_client/key.rs
Normal file
284
src/bin/proxmox_backup_client/key.rs
Normal file
@ -0,0 +1,284 @@
|
|||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use chrono::{Local, TimeZone};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
use proxmox::api::cli::{CliCommand, CliCommandMap};
|
||||||
|
use proxmox::sys::linux::tty;
|
||||||
|
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||||
|
|
||||||
|
use proxmox_backup::backup::{
|
||||||
|
encrypt_key_with_passphrase, load_and_decrypt_key, store_key_config, KeyConfig,
|
||||||
|
};
|
||||||
|
use proxmox_backup::tools;
|
||||||
|
|
||||||
|
pub const DEFAULT_ENCRYPTION_KEY_FILE_NAME: &str = "encryption-key.json";
|
||||||
|
pub const MASTER_PUBKEY_FILE_NAME: &str = "master-public.pem";
|
||||||
|
|
||||||
|
pub fn find_master_pubkey() -> Result<Option<PathBuf>, Error> {
|
||||||
|
super::find_xdg_file(MASTER_PUBKEY_FILE_NAME, "main public key file")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn place_master_pubkey() -> Result<PathBuf, Error> {
|
||||||
|
super::place_xdg_file(MASTER_PUBKEY_FILE_NAME, "main public key file")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_default_encryption_key() -> Result<Option<PathBuf>, Error> {
|
||||||
|
super::find_xdg_file(DEFAULT_ENCRYPTION_KEY_FILE_NAME, "default encryption key file")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn place_default_encryption_key() -> Result<PathBuf, Error> {
|
||||||
|
super::place_xdg_file(DEFAULT_ENCRYPTION_KEY_FILE_NAME, "default encryption key file")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read_optional_default_encryption_key() -> Result<Option<Vec<u8>>, Error> {
|
||||||
|
find_default_encryption_key()?
|
||||||
|
.map(file_get_contents)
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
|
||||||
|
// fixme: implement other input methods
|
||||||
|
|
||||||
|
use std::env::VarError::*;
|
||||||
|
match std::env::var("PBS_ENCRYPTION_PASSWORD") {
|
||||||
|
Ok(p) => return Ok(p.as_bytes().to_vec()),
|
||||||
|
Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
|
||||||
|
Err(NotPresent) => {
|
||||||
|
// Try another method
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're on a TTY, query the user for a password
|
||||||
|
if tty::stdin_isatty() {
|
||||||
|
return Ok(tty::read_password("Encryption Key Password: ")?);
|
||||||
|
}
|
||||||
|
|
||||||
|
bail!("no password input mechanism available");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
default: "scrypt",
|
||||||
|
)]
|
||||||
|
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Key derivation function for password protected encryption keys.
|
||||||
|
pub enum Kdf {
|
||||||
|
/// Do not encrypt the key.
|
||||||
|
None,
|
||||||
|
|
||||||
|
/// Encrypt they key with a password using SCrypt.
|
||||||
|
Scrypt,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Kdf {
|
||||||
|
#[inline]
|
||||||
|
fn default() -> Self {
|
||||||
|
Kdf::Scrypt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
kdf: {
|
||||||
|
type: Kdf,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
description:
|
||||||
|
"Output file. Without this the key will become the new default encryption key.",
|
||||||
|
optional: true,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Create a new encryption key.
|
||||||
|
fn create(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
||||||
|
let path = match path {
|
||||||
|
Some(path) => PathBuf::from(path),
|
||||||
|
None => {
|
||||||
|
let path = place_default_encryption_key()?;
|
||||||
|
println!("creating default key at: {:?}", path);
|
||||||
|
path
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let kdf = kdf.unwrap_or_default();
|
||||||
|
|
||||||
|
let key = proxmox::sys::linux::random_data(32)?;
|
||||||
|
|
||||||
|
match kdf {
|
||||||
|
Kdf::None => {
|
||||||
|
let created = Local.timestamp(Local::now().timestamp(), 0);
|
||||||
|
|
||||||
|
store_key_config(
|
||||||
|
&path,
|
||||||
|
false,
|
||||||
|
KeyConfig {
|
||||||
|
kdf: None,
|
||||||
|
created,
|
||||||
|
modified: created,
|
||||||
|
data: key,
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
Kdf::Scrypt => {
|
||||||
|
// always read passphrase from tty
|
||||||
|
if !tty::stdin_isatty() {
|
||||||
|
bail!("unable to read passphrase - no tty");
|
||||||
|
}
|
||||||
|
|
||||||
|
let password = tty::read_and_verify_password("Encryption Key Password: ")?;
|
||||||
|
|
||||||
|
let key_config = encrypt_key_with_passphrase(&key, &password)?;
|
||||||
|
|
||||||
|
store_key_config(&path, false, key_config)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
kdf: {
|
||||||
|
type: Kdf,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
description: "Key file. Without this the default key's password will be changed.",
|
||||||
|
optional: true,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Change the encryption key's password.
|
||||||
|
fn change_passphrase(kdf: Option<Kdf>, path: Option<String>) -> Result<(), Error> {
|
||||||
|
let path = match path {
|
||||||
|
Some(path) => PathBuf::from(path),
|
||||||
|
None => {
|
||||||
|
let path = find_default_encryption_key()?
|
||||||
|
.ok_or_else(|| {
|
||||||
|
format_err!("no encryption file provided and no default file found")
|
||||||
|
})?;
|
||||||
|
println!("updating default key at: {:?}", path);
|
||||||
|
path
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let kdf = kdf.unwrap_or_default();
|
||||||
|
|
||||||
|
if !tty::stdin_isatty() {
|
||||||
|
bail!("unable to change passphrase - no tty");
|
||||||
|
}
|
||||||
|
|
||||||
|
let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
||||||
|
|
||||||
|
match kdf {
|
||||||
|
Kdf::None => {
|
||||||
|
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
||||||
|
|
||||||
|
store_key_config(
|
||||||
|
&path,
|
||||||
|
true,
|
||||||
|
KeyConfig {
|
||||||
|
kdf: None,
|
||||||
|
created, // keep original value
|
||||||
|
modified,
|
||||||
|
data: key.to_vec(),
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
Kdf::Scrypt => {
|
||||||
|
let password = tty::read_and_verify_password("New Password: ")?;
|
||||||
|
|
||||||
|
let mut new_key_config = encrypt_key_with_passphrase(&key, &password)?;
|
||||||
|
new_key_config.created = created; // keep original value
|
||||||
|
|
||||||
|
store_key_config(&path, true, new_key_config)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
path: {
|
||||||
|
description: "Path to the PEM formatted RSA public key.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Import an RSA public key used to put an encrypted version of the symmetric backup encryption
|
||||||
|
/// key onto the backup server along with each backup.
|
||||||
|
fn import_master_pubkey(path: String) -> Result<(), Error> {
|
||||||
|
let pem_data = file_get_contents(&path)?;
|
||||||
|
|
||||||
|
if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
|
||||||
|
bail!("Unable to decode PEM data - {}", err);
|
||||||
|
}
|
||||||
|
|
||||||
|
let target_path = place_master_pubkey()?;
|
||||||
|
|
||||||
|
replace_file(&target_path, &pem_data, CreateOptions::new())?;
|
||||||
|
|
||||||
|
println!("Imported public master key to {:?}", target_path);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api]
|
||||||
|
/// Create an RSA public/private key pair used to put an encrypted version of the symmetric backup
|
||||||
|
/// encryption key onto the backup server along with each backup.
|
||||||
|
fn create_master_key() -> Result<(), Error> {
|
||||||
|
// we need a TTY to query the new password
|
||||||
|
if !tty::stdin_isatty() {
|
||||||
|
bail!("unable to create master key - no tty");
|
||||||
|
}
|
||||||
|
|
||||||
|
let rsa = openssl::rsa::Rsa::generate(4096)?;
|
||||||
|
let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
|
||||||
|
|
||||||
|
let password = String::from_utf8(tty::read_and_verify_password("Master Key Password: ")?)?;
|
||||||
|
|
||||||
|
let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
|
||||||
|
let filename_pub = "master-public.pem";
|
||||||
|
println!("Writing public master key to {}", filename_pub);
|
||||||
|
replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new())?;
|
||||||
|
|
||||||
|
let cipher = openssl::symm::Cipher::aes_256_cbc();
|
||||||
|
let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, password.as_bytes())?;
|
||||||
|
|
||||||
|
let filename_priv = "master-private.pem";
|
||||||
|
println!("Writing private master key to {}", filename_priv);
|
||||||
|
replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new())?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cli() -> CliCommandMap {
|
||||||
|
let key_create_cmd_def = CliCommand::new(&API_METHOD_CREATE)
|
||||||
|
.arg_param(&["path"])
|
||||||
|
.completion_cb("path", tools::complete_file_name);
|
||||||
|
|
||||||
|
let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_CHANGE_PASSPHRASE)
|
||||||
|
.arg_param(&["path"])
|
||||||
|
.completion_cb("path", tools::complete_file_name);
|
||||||
|
|
||||||
|
let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_CREATE_MASTER_KEY);
|
||||||
|
let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_IMPORT_MASTER_PUBKEY)
|
||||||
|
.arg_param(&["path"])
|
||||||
|
.completion_cb("path", tools::complete_file_name);
|
||||||
|
|
||||||
|
CliCommandMap::new()
|
||||||
|
.insert("create", key_create_cmd_def)
|
||||||
|
.insert("create-master-key", key_create_master_key_cmd_def)
|
||||||
|
.insert("import-master-pubkey", key_import_master_pubkey_cmd_def)
|
||||||
|
.insert("change-passphrase", key_change_passphrase_cmd_def)
|
||||||
|
}
|
@ -1,6 +1,39 @@
|
|||||||
|
use anyhow::{Context, Error};
|
||||||
|
|
||||||
mod benchmark;
|
mod benchmark;
|
||||||
pub use benchmark::*;
|
pub use benchmark::*;
|
||||||
mod mount;
|
mod mount;
|
||||||
pub use mount::*;
|
pub use mount::*;
|
||||||
mod task;
|
mod task;
|
||||||
pub use task::*;
|
pub use task::*;
|
||||||
|
mod catalog;
|
||||||
|
pub use catalog::*;
|
||||||
|
|
||||||
|
pub mod key;
|
||||||
|
|
||||||
|
pub fn base_directories() -> Result<xdg::BaseDirectories, Error> {
|
||||||
|
xdg::BaseDirectories::with_prefix("proxmox-backup").map_err(Error::from)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convenience helper for better error messages:
|
||||||
|
pub fn find_xdg_file(
|
||||||
|
file_name: impl AsRef<std::path::Path>,
|
||||||
|
description: &'static str,
|
||||||
|
) -> Result<Option<std::path::PathBuf>, Error> {
|
||||||
|
let file_name = file_name.as_ref();
|
||||||
|
base_directories()
|
||||||
|
.map(|base| base.find_config_file(file_name))
|
||||||
|
.with_context(|| format!("error searching for {}", description))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn place_xdg_file(
|
||||||
|
file_name: impl AsRef<std::path::Path>,
|
||||||
|
description: &'static str,
|
||||||
|
) -> Result<std::path::PathBuf, Error> {
|
||||||
|
let file_name = file_name.as_ref();
|
||||||
|
base_directories()
|
||||||
|
.and_then(|base| {
|
||||||
|
base.place_config_file(file_name).map_err(Error::from)
|
||||||
|
})
|
||||||
|
.with_context(|| format!("failed to place {} in xdg home", description))
|
||||||
|
}
|
||||||
|
@ -30,7 +30,6 @@ use proxmox_backup::client::*;
|
|||||||
use crate::{
|
use crate::{
|
||||||
REPO_URL_SCHEMA,
|
REPO_URL_SCHEMA,
|
||||||
extract_repository_from_value,
|
extract_repository_from_value,
|
||||||
get_encryption_key_password,
|
|
||||||
complete_pxar_archive_name,
|
complete_pxar_archive_name,
|
||||||
complete_group_or_snapshot,
|
complete_group_or_snapshot,
|
||||||
complete_repository,
|
complete_repository,
|
||||||
@ -119,7 +118,7 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
|||||||
let crypt_config = match keyfile {
|
let crypt_config = match keyfile {
|
||||||
None => None,
|
None => None,
|
||||||
Some(path) => {
|
Some(path) => {
|
||||||
let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
|
let (key, _) = load_and_decrypt_key(&path, &crate::key::get_encryption_key_password)?;
|
||||||
Some(Arc::new(CryptConfig::new(key)?))
|
Some(Arc::new(CryptConfig::new(key)?))
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -140,12 +139,14 @@ async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
|
|||||||
true,
|
true,
|
||||||
).await?;
|
).await?;
|
||||||
|
|
||||||
let manifest = client.download_manifest().await?;
|
let (manifest, _) = client.download_manifest().await?;
|
||||||
|
|
||||||
|
let file_info = manifest.lookup_file_info(&archive_name)?;
|
||||||
|
|
||||||
if server_archive_name.ends_with(".didx") {
|
if server_archive_name.ends_with(".didx") {
|
||||||
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
|
||||||
let most_used = index.find_most_used_chunks(8);
|
let most_used = index.find_most_used_chunks(8);
|
||||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
||||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||||
let archive_size = reader.archive_size();
|
let archive_size = reader.archive_size();
|
||||||
let reader: proxmox_backup::pxar::fuse::Reader =
|
let reader: proxmox_backup::pxar::fuse::Reader =
|
||||||
|
@ -1,32 +1,18 @@
|
|||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
|
|
||||||
use proxmox::api::{api, cli::*};
|
use proxmox::api::{api, cli::*};
|
||||||
|
|
||||||
use proxmox_backup::config;
|
use proxmox_backup::config;
|
||||||
use proxmox_backup::configdir;
|
|
||||||
use proxmox_backup::auth_helpers::*;
|
use proxmox_backup::auth_helpers::*;
|
||||||
|
use proxmox_backup::tools::cert::CertInfo;
|
||||||
fn x509name_to_string(name: &openssl::x509::X509NameRef) -> Result<String, Error> {
|
|
||||||
let mut parts = Vec::new();
|
|
||||||
for entry in name.entries() {
|
|
||||||
parts.push(format!("{} = {}", entry.object().nid().short_name()?, entry.data().as_utf8()?));
|
|
||||||
}
|
|
||||||
Ok(parts.join(", "))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api]
|
#[api]
|
||||||
/// Display node certificate information.
|
/// Display node certificate information.
|
||||||
fn cert_info() -> Result<(), Error> {
|
fn cert_info() -> Result<(), Error> {
|
||||||
|
|
||||||
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
|
let cert = CertInfo::new()?;
|
||||||
|
|
||||||
let cert_pem = proxmox::tools::fs::file_get_contents(&cert_path)?;
|
println!("Subject: {}", cert.subject_name()?);
|
||||||
|
|
||||||
let cert = openssl::x509::X509::from_pem(&cert_pem)?;
|
|
||||||
|
|
||||||
println!("Subject: {}", x509name_to_string(cert.subject_name())?);
|
|
||||||
|
|
||||||
if let Some(san) = cert.subject_alt_names() {
|
if let Some(san) = cert.subject_alt_names() {
|
||||||
for name in san.iter() {
|
for name in san.iter() {
|
||||||
@ -42,17 +28,12 @@ fn cert_info() -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
println!("Issuer: {}", x509name_to_string(cert.issuer_name())?);
|
println!("Issuer: {}", cert.issuer_name()?);
|
||||||
println!("Validity:");
|
println!("Validity:");
|
||||||
println!(" Not Before: {}", cert.not_before());
|
println!(" Not Before: {}", cert.not_before());
|
||||||
println!(" Not After : {}", cert.not_after());
|
println!(" Not After : {}", cert.not_after());
|
||||||
|
|
||||||
let fp = cert.digest(openssl::hash::MessageDigest::sha256())?;
|
println!("Fingerprint (sha256): {}", cert.fingerprint()?);
|
||||||
let fp_string = proxmox::tools::digest_to_hex(&fp);
|
|
||||||
let fp_string = fp_string.as_bytes().chunks(2).map(|v| std::str::from_utf8(v).unwrap())
|
|
||||||
.collect::<Vec<&str>>().join(":");
|
|
||||||
|
|
||||||
println!("Fingerprint (sha256): {}", fp_string);
|
|
||||||
|
|
||||||
let pubkey = cert.public_key()?;
|
let pubkey = cert.public_key()?;
|
||||||
println!("Public key type: {}", openssl::nid::Nid::from_raw(pubkey.id().as_raw()).long_name()?);
|
println!("Public key type: {}", openssl::nid::Nid::from_raw(pubkey.id().as_raw()).long_name()?);
|
||||||
|
@ -239,7 +239,7 @@ pub fn zpool_commands() -> CommandLineInterface {
|
|||||||
.insert("create",
|
.insert("create",
|
||||||
CliCommand::new(&API_METHOD_CREATE_ZPOOL)
|
CliCommand::new(&API_METHOD_CREATE_ZPOOL)
|
||||||
.arg_param(&["name"])
|
.arg_param(&["name"])
|
||||||
.completion_cb("devices", complete_disk_name) // fixme: comlete the list
|
.completion_cb("devices", complete_disk_name) // fixme: complete the list
|
||||||
);
|
);
|
||||||
|
|
||||||
cmd_def.into()
|
cmd_def.into()
|
||||||
|
@ -3,8 +3,10 @@ use std::ffi::OsStr;
|
|||||||
use std::fs::OpenOptions;
|
use std::fs::OpenOptions;
|
||||||
use std::os::unix::fs::OpenOptionsExt;
|
use std::os::unix::fs::OpenOptionsExt;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::future::FutureExt;
|
use futures::future::FutureExt;
|
||||||
use futures::select;
|
use futures::select;
|
||||||
use tokio::signal::unix::{signal, SignalKind};
|
use tokio::signal::unix::{signal, SignalKind};
|
||||||
@ -24,11 +26,14 @@ fn extract_archive_from_reader<R: std::io::Read>(
|
|||||||
allow_existing_dirs: bool,
|
allow_existing_dirs: bool,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
match_list: &[MatchEntry],
|
match_list: &[MatchEntry],
|
||||||
|
extract_match_default: bool,
|
||||||
|
on_error: Option<Box<dyn FnMut(Error) -> Result<(), Error> + Send>>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
proxmox_backup::pxar::extract_archive(
|
proxmox_backup::pxar::extract_archive(
|
||||||
pxar::decoder::Decoder::from_std(reader)?,
|
pxar::decoder::Decoder::from_std(reader)?,
|
||||||
Path::new(target),
|
Path::new(target),
|
||||||
&match_list,
|
&match_list,
|
||||||
|
extract_match_default,
|
||||||
feature_flags,
|
feature_flags,
|
||||||
allow_existing_dirs,
|
allow_existing_dirs,
|
||||||
|path| {
|
|path| {
|
||||||
@ -36,6 +41,7 @@ fn extract_archive_from_reader<R: std::io::Read>(
|
|||||||
println!("{:?}", path);
|
println!("{:?}", path);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
on_error,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -102,6 +108,11 @@ fn extract_archive_from_reader<R: std::io::Read>(
|
|||||||
optional: true,
|
optional: true,
|
||||||
default: false,
|
default: false,
|
||||||
},
|
},
|
||||||
|
strict: {
|
||||||
|
description: "Stop on errors. Otherwise most errors will simply warn.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
@ -119,6 +130,7 @@ fn extract_archive(
|
|||||||
no_device_nodes: bool,
|
no_device_nodes: bool,
|
||||||
no_fifos: bool,
|
no_fifos: bool,
|
||||||
no_sockets: bool,
|
no_sockets: bool,
|
||||||
|
strict: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut feature_flags = Flags::DEFAULT;
|
let mut feature_flags = Flags::DEFAULT;
|
||||||
if no_xattrs {
|
if no_xattrs {
|
||||||
@ -162,6 +174,22 @@ fn extract_archive(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let extract_match_default = match_list.is_empty();
|
||||||
|
|
||||||
|
let was_ok = Arc::new(AtomicBool::new(true));
|
||||||
|
let on_error = if strict {
|
||||||
|
// by default errors are propagated up
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
let was_ok = Arc::clone(&was_ok);
|
||||||
|
// otherwise we want to log them but not act on them
|
||||||
|
Some(Box::new(move |err| {
|
||||||
|
was_ok.store(false, Ordering::Release);
|
||||||
|
eprintln!("error: {}", err);
|
||||||
|
Ok(())
|
||||||
|
}) as Box<dyn FnMut(Error) -> Result<(), Error> + Send>)
|
||||||
|
};
|
||||||
|
|
||||||
if archive == "-" {
|
if archive == "-" {
|
||||||
let stdin = std::io::stdin();
|
let stdin = std::io::stdin();
|
||||||
let mut reader = stdin.lock();
|
let mut reader = stdin.lock();
|
||||||
@ -172,6 +200,8 @@ fn extract_archive(
|
|||||||
allow_existing_dirs,
|
allow_existing_dirs,
|
||||||
verbose,
|
verbose,
|
||||||
&match_list,
|
&match_list,
|
||||||
|
extract_match_default,
|
||||||
|
on_error,
|
||||||
)?;
|
)?;
|
||||||
} else {
|
} else {
|
||||||
if verbose {
|
if verbose {
|
||||||
@ -186,9 +216,15 @@ fn extract_archive(
|
|||||||
allow_existing_dirs,
|
allow_existing_dirs,
|
||||||
verbose,
|
verbose,
|
||||||
&match_list,
|
&match_list,
|
||||||
|
extract_match_default,
|
||||||
|
on_error,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !was_ok.load(Ordering::Acquire) {
|
||||||
|
bail!("there were errors");
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,18 +123,19 @@ impl BackupReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Download backup manifest (index.json)
|
/// Download backup manifest (index.json)
|
||||||
pub async fn download_manifest(&self) -> Result<BackupManifest, Error> {
|
///
|
||||||
|
/// The manifest signature is verified if we have a crypt_config.
|
||||||
use std::convert::TryFrom;
|
pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec<u8>), Error> {
|
||||||
|
|
||||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||||
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
|
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
|
||||||
let blob = DataBlob::from_raw(raw_data)?;
|
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
|
||||||
blob.verify_crc()?;
|
// no expected digest available
|
||||||
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
let data = blob.decode(None, None)?;
|
||||||
let json: Value = serde_json::from_slice(&data[..])?;
|
|
||||||
|
|
||||||
BackupManifest::try_from(json)
|
let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||||
|
|
||||||
|
Ok((manifest, data))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Download a .blob file
|
/// Download a .blob file
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
use std::convert::TryFrom;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
@ -15,7 +16,7 @@ pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_RE
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct BackupRepository {
|
pub struct BackupRepository {
|
||||||
/// The user name used for Authentication
|
/// The user name used for Authentication
|
||||||
user: Option<String>,
|
user: Option<Userid>,
|
||||||
/// The host name or IP address
|
/// The host name or IP address
|
||||||
host: Option<String>,
|
host: Option<String>,
|
||||||
/// The name of the datastore
|
/// The name of the datastore
|
||||||
@ -24,15 +25,15 @@ pub struct BackupRepository {
|
|||||||
|
|
||||||
impl BackupRepository {
|
impl BackupRepository {
|
||||||
|
|
||||||
pub fn new(user: Option<String>, host: Option<String>, store: String) -> Self {
|
pub fn new(user: Option<Userid>, host: Option<String>, store: String) -> Self {
|
||||||
Self { user, host, store }
|
Self { user, host, store }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn user(&self) -> &str {
|
pub fn user(&self) -> &Userid {
|
||||||
if let Some(ref user) = self.user {
|
if let Some(ref user) = self.user {
|
||||||
return user;
|
return &user;
|
||||||
}
|
}
|
||||||
"root@pam"
|
Userid::root_userid()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn host(&self) -> &str {
|
pub fn host(&self) -> &str {
|
||||||
@ -73,7 +74,7 @@ impl std::str::FromStr for BackupRepository {
|
|||||||
.ok_or_else(|| format_err!("unable to parse repository url '{}'", url))?;
|
.ok_or_else(|| format_err!("unable to parse repository url '{}'", url))?;
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
user: cap.get(1).map(|m| m.as_str().to_owned()),
|
user: cap.get(1).map(|m| Userid::try_from(m.as_str().to_owned())).transpose()?,
|
||||||
host: cap.get(2).map(|m| m.as_str().to_owned()),
|
host: cap.get(2).map(|m| m.as_str().to_owned()),
|
||||||
store: cap[3].to_owned(),
|
store: cap[3].to_owned(),
|
||||||
})
|
})
|
||||||
|
@ -3,7 +3,7 @@ use std::os::unix::fs::OpenOptionsExt;
|
|||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
use futures::stream::Stream;
|
use futures::stream::Stream;
|
||||||
@ -16,6 +16,7 @@ use proxmox::tools::digest_to_hex;
|
|||||||
|
|
||||||
use super::merge_known_chunks::{MergedChunkInfo, MergeKnownChunks};
|
use super::merge_known_chunks::{MergedChunkInfo, MergeKnownChunks};
|
||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
|
use crate::tools::format::HumanByte;
|
||||||
|
|
||||||
use super::{HttpClient, H2Client};
|
use super::{HttpClient, H2Client};
|
||||||
|
|
||||||
@ -163,21 +164,12 @@ impl BackupWriter {
|
|||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
compress: bool,
|
compress: bool,
|
||||||
crypt_or_sign: Option<bool>,
|
encrypt: bool,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
|
let blob = match (encrypt, &self.crypt_config) {
|
||||||
let blob = if let Some(ref crypt_config) = self.crypt_config {
|
(false, _) => DataBlob::encode(&data, None, compress)?,
|
||||||
if let Some(encrypt) = crypt_or_sign {
|
(true, None) => bail!("requested encryption without a crypt config"),
|
||||||
if encrypt {
|
(true, Some(crypt_config)) => DataBlob::encode(&data, Some(crypt_config), compress)?,
|
||||||
DataBlob::encode(&data, Some(crypt_config), compress)?
|
|
||||||
} else {
|
|
||||||
DataBlob::create_signed(&data, crypt_config, compress)?
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
DataBlob::encode(&data, None, compress)?
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
DataBlob::encode(&data, None, compress)?
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let raw_data = blob.into_inner();
|
let raw_data = blob.into_inner();
|
||||||
@ -194,8 +186,8 @@ impl BackupWriter {
|
|||||||
src_path: P,
|
src_path: P,
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
compress: bool,
|
compress: bool,
|
||||||
crypt_or_sign: Option<bool>,
|
encrypt: bool,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
|
|
||||||
let src_path = src_path.as_ref();
|
let src_path = src_path.as_ref();
|
||||||
|
|
||||||
@ -209,7 +201,7 @@ impl BackupWriter {
|
|||||||
.await
|
.await
|
||||||
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
|
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
|
||||||
|
|
||||||
self.upload_blob_from_data(contents, file_name, compress, crypt_or_sign).await
|
self.upload_blob_from_data(contents, file_name, compress, encrypt).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn upload_stream(
|
pub async fn upload_stream(
|
||||||
@ -219,6 +211,8 @@ impl BackupWriter {
|
|||||||
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
||||||
prefix: &str,
|
prefix: &str,
|
||||||
fixed_size: Option<u64>,
|
fixed_size: Option<u64>,
|
||||||
|
compress: bool,
|
||||||
|
encrypt: bool,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
let known_chunks = Arc::new(Mutex::new(HashSet::new()));
|
let known_chunks = Arc::new(Mutex::new(HashSet::new()));
|
||||||
|
|
||||||
@ -227,6 +221,10 @@ impl BackupWriter {
|
|||||||
param["size"] = size.into();
|
param["size"] = size.into();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if encrypt && self.crypt_config.is_none() {
|
||||||
|
bail!("requested encryption without a crypt config");
|
||||||
|
}
|
||||||
|
|
||||||
let index_path = format!("{}_index", prefix);
|
let index_path = format!("{}_index", prefix);
|
||||||
let close_path = format!("{}_close", prefix);
|
let close_path = format!("{}_close", prefix);
|
||||||
|
|
||||||
@ -245,22 +243,43 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let wid = self.h2.post(&index_path, Some(param)).await?.as_u64().unwrap();
|
let wid = self.h2.post(&index_path, Some(param)).await?.as_u64().unwrap();
|
||||||
|
|
||||||
let (chunk_count, size, duration, speed, csum) =
|
let (chunk_count, chunk_reused, size, size_reused, duration, csum) =
|
||||||
Self::upload_chunk_info_stream(
|
Self::upload_chunk_info_stream(
|
||||||
self.h2.clone(),
|
self.h2.clone(),
|
||||||
wid,
|
wid,
|
||||||
stream,
|
stream,
|
||||||
&prefix,
|
&prefix,
|
||||||
known_chunks.clone(),
|
known_chunks.clone(),
|
||||||
self.crypt_config.clone(),
|
if encrypt { self.crypt_config.clone() } else { None },
|
||||||
|
compress,
|
||||||
self.verbose,
|
self.verbose,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
println!("{}: Uploaded {} bytes as {} chunks in {} seconds ({} MB/s).", archive_name, size, chunk_count, duration.as_secs(), speed);
|
let uploaded = size - size_reused;
|
||||||
if chunk_count > 0 {
|
let vsize_h: HumanByte = size.into();
|
||||||
println!("{}: Average chunk size was {} bytes.", archive_name, size/chunk_count);
|
let archive = if self.verbose {
|
||||||
println!("{}: Time per request: {} microseconds.", archive_name, (duration.as_micros())/(chunk_count as u128));
|
archive_name.to_string()
|
||||||
|
} else {
|
||||||
|
crate::tools::format::strip_server_file_expenstion(archive_name.clone())
|
||||||
|
};
|
||||||
|
if archive_name != CATALOG_NAME {
|
||||||
|
let speed: HumanByte = ((uploaded * 1_000_000) / (duration.as_micros() as usize)).into();
|
||||||
|
let uploaded: HumanByte = uploaded.into();
|
||||||
|
println!("{}: had to upload {} of {} in {:.2}s, average speed {}/s).", archive, uploaded, vsize_h, duration.as_secs_f64(), speed);
|
||||||
|
} else {
|
||||||
|
println!("Uploaded backup catalog ({})", vsize_h);
|
||||||
|
}
|
||||||
|
|
||||||
|
if size_reused > 0 && size > 1024*1024 {
|
||||||
|
let reused_percent = size_reused as f64 * 100. / size as f64;
|
||||||
|
let reused: HumanByte = size_reused.into();
|
||||||
|
println!("{}: backup was done incrementally, reused {} ({:.1}%)", archive, reused, reused_percent);
|
||||||
|
}
|
||||||
|
if self.verbose && chunk_count > 0 {
|
||||||
|
println!("{}: Reused {} from {} chunks.", archive, chunk_reused, chunk_count);
|
||||||
|
println!("{}: Average chunk size was {}.", archive, HumanByte::from(size/chunk_count));
|
||||||
|
println!("{}: Average time per request: {} microseconds.", archive, (duration.as_micros())/(chunk_count as u128));
|
||||||
}
|
}
|
||||||
|
|
||||||
let param = json!({
|
let param = json!({
|
||||||
@ -276,7 +295,7 @@ impl BackupWriter {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn response_queue() -> (
|
fn response_queue(verbose: bool) -> (
|
||||||
mpsc::Sender<h2::client::ResponseFuture>,
|
mpsc::Sender<h2::client::ResponseFuture>,
|
||||||
oneshot::Receiver<Result<(), Error>>
|
oneshot::Receiver<Result<(), Error>>
|
||||||
) {
|
) {
|
||||||
@ -300,11 +319,11 @@ impl BackupWriter {
|
|||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
verify_queue_rx
|
verify_queue_rx
|
||||||
.map(Ok::<_, Error>)
|
.map(Ok::<_, Error>)
|
||||||
.try_for_each(|response: h2::client::ResponseFuture| {
|
.try_for_each(move |response: h2::client::ResponseFuture| {
|
||||||
response
|
response
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
.and_then(H2Client::h2api_response)
|
.and_then(H2Client::h2api_response)
|
||||||
.map_ok(|result| println!("RESPONSE: {:?}", result))
|
.map_ok(move |result| if verbose { println!("RESPONSE: {:?}", result) })
|
||||||
.map_err(|err| format_err!("pipelined request failed: {}", err))
|
.map_err(|err| format_err!("pipelined request failed: {}", err))
|
||||||
})
|
})
|
||||||
.map(|result| {
|
.map(|result| {
|
||||||
@ -455,18 +474,16 @@ impl BackupWriter {
|
|||||||
/// Download backup manifest (index.json) of last backup
|
/// Download backup manifest (index.json) of last backup
|
||||||
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
|
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
|
||||||
|
|
||||||
use std::convert::TryFrom;
|
|
||||||
|
|
||||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||||
|
|
||||||
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
|
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
|
||||||
self.h2.download("previous", Some(param), &mut raw_data).await?;
|
self.h2.download("previous", Some(param), &mut raw_data).await?;
|
||||||
|
|
||||||
let blob = DataBlob::from_raw(raw_data)?;
|
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
|
||||||
blob.verify_crc()?;
|
// no expected digest available
|
||||||
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref), None)?;
|
||||||
let json: Value = serde_json::from_slice(&data[..])?;
|
|
||||||
let manifest = BackupManifest::try_from(json)?;
|
let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||||
|
|
||||||
Ok(manifest)
|
Ok(manifest)
|
||||||
}
|
}
|
||||||
@ -478,14 +495,19 @@ impl BackupWriter {
|
|||||||
prefix: &str,
|
prefix: &str,
|
||||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
|
compress: bool,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
) -> impl Future<Output = Result<(usize, usize, std::time::Duration, usize, [u8; 32]), Error>> {
|
) -> impl Future<Output = Result<(usize, usize, usize, usize, std::time::Duration, [u8; 32]), Error>> {
|
||||||
|
|
||||||
let repeat = Arc::new(AtomicUsize::new(0));
|
let total_chunks = Arc::new(AtomicUsize::new(0));
|
||||||
let repeat2 = repeat.clone();
|
let total_chunks2 = total_chunks.clone();
|
||||||
|
let known_chunk_count = Arc::new(AtomicUsize::new(0));
|
||||||
|
let known_chunk_count2 = known_chunk_count.clone();
|
||||||
|
|
||||||
let stream_len = Arc::new(AtomicUsize::new(0));
|
let stream_len = Arc::new(AtomicUsize::new(0));
|
||||||
let stream_len2 = stream_len.clone();
|
let stream_len2 = stream_len.clone();
|
||||||
|
let reused_len = Arc::new(AtomicUsize::new(0));
|
||||||
|
let reused_len2 = reused_len.clone();
|
||||||
|
|
||||||
let append_chunk_path = format!("{}_index", prefix);
|
let append_chunk_path = format!("{}_index", prefix);
|
||||||
let upload_chunk_path = format!("{}_chunk", prefix);
|
let upload_chunk_path = format!("{}_chunk", prefix);
|
||||||
@ -504,11 +526,11 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let chunk_len = data.len();
|
let chunk_len = data.len();
|
||||||
|
|
||||||
repeat.fetch_add(1, Ordering::SeqCst);
|
total_chunks.fetch_add(1, Ordering::SeqCst);
|
||||||
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
|
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
|
||||||
|
|
||||||
let mut chunk_builder = DataChunkBuilder::new(data.as_ref())
|
let mut chunk_builder = DataChunkBuilder::new(data.as_ref())
|
||||||
.compress(true);
|
.compress(compress);
|
||||||
|
|
||||||
if let Some(ref crypt_config) = crypt_config {
|
if let Some(ref crypt_config) = crypt_config {
|
||||||
chunk_builder = chunk_builder.crypt_config(crypt_config);
|
chunk_builder = chunk_builder.crypt_config(crypt_config);
|
||||||
@ -527,6 +549,8 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let chunk_is_known = known_chunks.contains(digest);
|
let chunk_is_known = known_chunks.contains(digest);
|
||||||
if chunk_is_known {
|
if chunk_is_known {
|
||||||
|
known_chunk_count.fetch_add(1, Ordering::SeqCst);
|
||||||
|
reused_len.fetch_add(chunk_len, Ordering::SeqCst);
|
||||||
future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
|
future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
|
||||||
} else {
|
} else {
|
||||||
known_chunks.insert(*digest);
|
known_chunks.insert(*digest);
|
||||||
@ -549,7 +573,7 @@ impl BackupWriter {
|
|||||||
let digest = chunk_info.digest;
|
let digest = chunk_info.digest;
|
||||||
let digest_str = digest_to_hex(&digest);
|
let digest_str = digest_to_hex(&digest);
|
||||||
|
|
||||||
if verbose {
|
if false && verbose { // TO verbose, needs finer verbosity setting granularity
|
||||||
println!("upload new chunk {} ({} bytes, offset {})", digest_str,
|
println!("upload new chunk {} ({} bytes, offset {})", digest_str,
|
||||||
chunk_info.chunk_len, offset);
|
chunk_info.chunk_len, offset);
|
||||||
}
|
}
|
||||||
@ -592,18 +616,21 @@ impl BackupWriter {
|
|||||||
upload_result.await?.and(result)
|
upload_result.await?.and(result)
|
||||||
}.boxed())
|
}.boxed())
|
||||||
.and_then(move |_| {
|
.and_then(move |_| {
|
||||||
let repeat = repeat2.load(Ordering::SeqCst);
|
let duration = start_time.elapsed();
|
||||||
|
let total_chunks = total_chunks2.load(Ordering::SeqCst);
|
||||||
|
let known_chunk_count = known_chunk_count2.load(Ordering::SeqCst);
|
||||||
let stream_len = stream_len2.load(Ordering::SeqCst);
|
let stream_len = stream_len2.load(Ordering::SeqCst);
|
||||||
let speed = ((stream_len*1_000_000)/(1024*1024))/(start_time.elapsed().as_micros() as usize);
|
let reused_len = reused_len2.load(Ordering::SeqCst);
|
||||||
|
|
||||||
let mut guard = index_csum_2.lock().unwrap();
|
let mut guard = index_csum_2.lock().unwrap();
|
||||||
let csum = guard.take().unwrap().finish();
|
let csum = guard.take().unwrap().finish();
|
||||||
|
|
||||||
futures::future::ok((repeat, stream_len, start_time.elapsed(), speed, csum))
|
futures::future::ok((total_chunks, known_chunk_count, stream_len, reused_len, duration, csum))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn upload_speedtest(&self) -> Result<usize, Error> {
|
/// Upload speed test - prints result to stderr
|
||||||
|
pub async fn upload_speedtest(&self, verbose: bool) -> Result<f64, Error> {
|
||||||
|
|
||||||
let mut data = vec![];
|
let mut data = vec![];
|
||||||
// generate pseudo random byte sequence
|
// generate pseudo random byte sequence
|
||||||
@ -618,7 +645,7 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let mut repeat = 0;
|
let mut repeat = 0;
|
||||||
|
|
||||||
let (upload_queue, upload_result) = Self::response_queue();
|
let (upload_queue, upload_result) = Self::response_queue(verbose);
|
||||||
|
|
||||||
let start_time = std::time::Instant::now();
|
let start_time = std::time::Instant::now();
|
||||||
|
|
||||||
@ -630,7 +657,7 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let mut upload_queue = upload_queue.clone();
|
let mut upload_queue = upload_queue.clone();
|
||||||
|
|
||||||
println!("send test data ({} bytes)", data.len());
|
if verbose { eprintln!("send test data ({} bytes)", data.len()); }
|
||||||
let request = H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
|
let request = H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
|
||||||
let request_future = self.h2.send_request(request, Some(bytes::Bytes::from(data.clone()))).await?;
|
let request_future = self.h2.send_request(request, Some(bytes::Bytes::from(data.clone()))).await?;
|
||||||
|
|
||||||
@ -641,9 +668,9 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let _ = upload_result.await?;
|
let _ = upload_result.await?;
|
||||||
|
|
||||||
println!("Uploaded {} chunks in {} seconds.", repeat, start_time.elapsed().as_secs());
|
eprintln!("Uploaded {} chunks in {} seconds.", repeat, start_time.elapsed().as_secs());
|
||||||
let speed = ((item_len*1_000_000*(repeat as usize))/(1024*1024))/(start_time.elapsed().as_micros() as usize);
|
let speed = ((item_len*(repeat as usize)) as f64)/start_time.elapsed().as_secs_f64();
|
||||||
println!("Time per request: {} microseconds.", (start_time.elapsed().as_micros())/(repeat as u128));
|
eprintln!("Time per request: {} microseconds.", (start_time.elapsed().as_micros())/(repeat as u128));
|
||||||
|
|
||||||
Ok(speed)
|
Ok(speed)
|
||||||
}
|
}
|
||||||
|
@ -16,6 +16,7 @@ use percent_encoding::percent_encode;
|
|||||||
use xdg::BaseDirectories;
|
use xdg::BaseDirectories;
|
||||||
|
|
||||||
use proxmox::{
|
use proxmox::{
|
||||||
|
api::error::HttpError,
|
||||||
sys::linux::tty,
|
sys::linux::tty,
|
||||||
tools::{
|
tools::{
|
||||||
fs::{file_get_json, replace_file, CreateOptions},
|
fs::{file_get_json, replace_file, CreateOptions},
|
||||||
@ -23,6 +24,7 @@ use proxmox::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use super::pipe_to_stream::PipeToSendStream;
|
use super::pipe_to_stream::PipeToSendStream;
|
||||||
|
use crate::api2::types::Userid;
|
||||||
use crate::tools::async_io::EitherStream;
|
use crate::tools::async_io::EitherStream;
|
||||||
use crate::tools::{self, BroadcastFuture, DEFAULT_ENCODE_SET};
|
use crate::tools::{self, BroadcastFuture, DEFAULT_ENCODE_SET};
|
||||||
|
|
||||||
@ -103,7 +105,7 @@ pub struct HttpClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Delete stored ticket data (logout)
|
/// Delete stored ticket data (logout)
|
||||||
pub fn delete_ticket_info(prefix: &str, server: &str, username: &str) -> Result<(), Error> {
|
pub fn delete_ticket_info(prefix: &str, server: &str, username: &Userid) -> Result<(), Error> {
|
||||||
|
|
||||||
let base = BaseDirectories::with_prefix(prefix)?;
|
let base = BaseDirectories::with_prefix(prefix)?;
|
||||||
|
|
||||||
@ -115,7 +117,7 @@ pub fn delete_ticket_info(prefix: &str, server: &str, username: &str) -> Result<
|
|||||||
let mut data = file_get_json(&path, Some(json!({})))?;
|
let mut data = file_get_json(&path, Some(json!({})))?;
|
||||||
|
|
||||||
if let Some(map) = data[server].as_object_mut() {
|
if let Some(map) = data[server].as_object_mut() {
|
||||||
map.remove(username);
|
map.remove(username.as_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
replace_file(path, data.to_string().as_bytes(), CreateOptions::new().perm(mode))?;
|
replace_file(path, data.to_string().as_bytes(), CreateOptions::new().perm(mode))?;
|
||||||
@ -222,7 +224,7 @@ fn store_ticket_info(prefix: &str, server: &str, username: &str, ticket: &str, t
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_ticket_info(prefix: &str, server: &str, username: &str) -> Option<(String, String)> {
|
fn load_ticket_info(prefix: &str, server: &str, userid: &Userid) -> Option<(String, String)> {
|
||||||
let base = BaseDirectories::with_prefix(prefix).ok()?;
|
let base = BaseDirectories::with_prefix(prefix).ok()?;
|
||||||
|
|
||||||
// usually /run/user/<uid>/...
|
// usually /run/user/<uid>/...
|
||||||
@ -230,7 +232,7 @@ fn load_ticket_info(prefix: &str, server: &str, username: &str) -> Option<(Strin
|
|||||||
let data = file_get_json(&path, None).ok()?;
|
let data = file_get_json(&path, None).ok()?;
|
||||||
let now = Utc::now().timestamp();
|
let now = Utc::now().timestamp();
|
||||||
let ticket_lifetime = tools::ticket::TICKET_LIFETIME - 60;
|
let ticket_lifetime = tools::ticket::TICKET_LIFETIME - 60;
|
||||||
let uinfo = data[server][username].as_object()?;
|
let uinfo = data[server][userid.as_str()].as_object()?;
|
||||||
let timestamp = uinfo["timestamp"].as_i64()?;
|
let timestamp = uinfo["timestamp"].as_i64()?;
|
||||||
let age = now - timestamp;
|
let age = now - timestamp;
|
||||||
|
|
||||||
@ -244,8 +246,11 @@ fn load_ticket_info(prefix: &str, server: &str, username: &str) -> Option<(Strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl HttpClient {
|
impl HttpClient {
|
||||||
|
pub fn new(
|
||||||
pub fn new(server: &str, username: &str, mut options: HttpClientOptions) -> Result<Self, Error> {
|
server: &str,
|
||||||
|
userid: &Userid,
|
||||||
|
mut options: HttpClientOptions,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
|
|
||||||
let verified_fingerprint = Arc::new(Mutex::new(None));
|
let verified_fingerprint = Arc::new(Mutex::new(None));
|
||||||
|
|
||||||
@ -305,20 +310,20 @@ impl HttpClient {
|
|||||||
} else {
|
} else {
|
||||||
let mut ticket_info = None;
|
let mut ticket_info = None;
|
||||||
if use_ticket_cache {
|
if use_ticket_cache {
|
||||||
ticket_info = load_ticket_info(options.prefix.as_ref().unwrap(), server, username);
|
ticket_info = load_ticket_info(options.prefix.as_ref().unwrap(), server, userid);
|
||||||
}
|
}
|
||||||
if let Some((ticket, _token)) = ticket_info {
|
if let Some((ticket, _token)) = ticket_info {
|
||||||
ticket
|
ticket
|
||||||
} else {
|
} else {
|
||||||
Self::get_password(&username, options.interactive)?
|
Self::get_password(userid, options.interactive)?
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let login_future = Self::credentials(
|
let login_future = Self::credentials(
|
||||||
client.clone(),
|
client.clone(),
|
||||||
server.to_owned(),
|
server.to_owned(),
|
||||||
username.to_owned(),
|
userid.to_owned(),
|
||||||
password,
|
password.to_owned(),
|
||||||
).map_ok({
|
).map_ok({
|
||||||
let server = server.to_string();
|
let server = server.to_string();
|
||||||
let prefix = options.prefix.clone();
|
let prefix = options.prefix.clone();
|
||||||
@ -354,7 +359,7 @@ impl HttpClient {
|
|||||||
(*self.fingerprint.lock().unwrap()).clone()
|
(*self.fingerprint.lock().unwrap()).clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_password(username: &str, interactive: bool) -> Result<String, Error> {
|
fn get_password(username: &Userid, interactive: bool) -> Result<String, Error> {
|
||||||
// If we're on a TTY, query the user for a password
|
// If we're on a TTY, query the user for a password
|
||||||
if interactive && tty::stdin_isatty() {
|
if interactive && tty::stdin_isatty() {
|
||||||
let msg = format!("Password for \"{}\": ", username);
|
let msg = format!("Password for \"{}\": ", username);
|
||||||
@ -578,7 +583,7 @@ impl HttpClient {
|
|||||||
async fn credentials(
|
async fn credentials(
|
||||||
client: Client<HttpsConnector>,
|
client: Client<HttpsConnector>,
|
||||||
server: String,
|
server: String,
|
||||||
username: String,
|
username: Userid,
|
||||||
password: String,
|
password: String,
|
||||||
) -> Result<AuthInfo, Error> {
|
) -> Result<AuthInfo, Error> {
|
||||||
let data = json!({ "username": username, "password": password });
|
let data = json!({ "username": username, "password": password });
|
||||||
@ -606,7 +611,7 @@ impl HttpClient {
|
|||||||
Ok(value)
|
Ok(value)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
bail!("HTTP Error {}: {}", status, text);
|
Err(Error::from(HttpError::new(status, text)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -819,7 +824,7 @@ impl H2Client {
|
|||||||
bail!("got result without data property");
|
bail!("got result without data property");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
bail!("HTTP Error {}: {}", status, text);
|
Err(Error::from(HttpError::new(status, text)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,8 +6,8 @@ use std::convert::TryFrom;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::io::{Seek, SeekFrom};
|
use std::io::{Seek, SeekFrom};
|
||||||
use chrono::{Utc, TimeZone};
|
|
||||||
|
|
||||||
|
use proxmox::api::error::{StatusCode, HttpError};
|
||||||
use crate::server::{WorkerTask};
|
use crate::server::{WorkerTask};
|
||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
@ -27,16 +27,18 @@ async fn pull_index_chunks<I: IndexFile>(
|
|||||||
|
|
||||||
|
|
||||||
for pos in 0..index.index_count() {
|
for pos in 0..index.index_count() {
|
||||||
let digest = index.index_digest(pos).unwrap();
|
let info = index.chunk_info(pos).unwrap();
|
||||||
let chunk_exists = target.cond_touch_chunk(digest, false)?;
|
let chunk_exists = target.cond_touch_chunk(&info.digest, false)?;
|
||||||
if chunk_exists {
|
if chunk_exists {
|
||||||
//worker.log(format!("chunk {} exists {}", pos, proxmox::tools::digest_to_hex(digest)));
|
//worker.log(format!("chunk {} exists {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
//worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest)));
|
//worker.log(format!("sync {} chunk {}", pos, proxmox::tools::digest_to_hex(digest)));
|
||||||
let chunk = chunk_reader.read_raw_chunk(&digest).await?;
|
let chunk = chunk_reader.read_raw_chunk(&info.digest).await?;
|
||||||
|
|
||||||
target.insert_chunk(&chunk, &digest)?;
|
chunk.verify_unencrypted(info.size() as usize, &info.digest)?;
|
||||||
|
|
||||||
|
target.insert_chunk(&chunk, &info.digest)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -60,15 +62,32 @@ async fn download_manifest(
|
|||||||
Ok(tmp_manifest_file)
|
Ok(tmp_manifest_file)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn verify_archive(
|
||||||
|
info: &FileInfo,
|
||||||
|
csum: &[u8; 32],
|
||||||
|
size: u64,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
if size != info.size {
|
||||||
|
bail!("wrong size for file '{}' ({} != {})", info.filename, info.size, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
if csum != &info.csum {
|
||||||
|
bail!("wrong checksum for file '{}'", info.filename);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
async fn pull_single_archive(
|
async fn pull_single_archive(
|
||||||
worker: &WorkerTask,
|
worker: &WorkerTask,
|
||||||
reader: &BackupReader,
|
reader: &BackupReader,
|
||||||
chunk_reader: &mut RemoteChunkReader,
|
chunk_reader: &mut RemoteChunkReader,
|
||||||
tgt_store: Arc<DataStore>,
|
tgt_store: Arc<DataStore>,
|
||||||
snapshot: &BackupDir,
|
snapshot: &BackupDir,
|
||||||
archive_name: &str,
|
archive_info: &FileInfo,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let archive_name = &archive_info.filename;
|
||||||
let mut path = tgt_store.base_path();
|
let mut path = tgt_store.base_path();
|
||||||
path.push(snapshot.relative_path());
|
path.push(snapshot.relative_path());
|
||||||
path.push(archive_name);
|
path.push(archive_name);
|
||||||
@ -89,16 +108,23 @@ async fn pull_single_archive(
|
|||||||
ArchiveType::DynamicIndex => {
|
ArchiveType::DynamicIndex => {
|
||||||
let index = DynamicIndexReader::new(tmpfile)
|
let index = DynamicIndexReader::new(tmpfile)
|
||||||
.map_err(|err| format_err!("unable to read dynamic index {:?} - {}", tmp_path, err))?;
|
.map_err(|err| format_err!("unable to read dynamic index {:?} - {}", tmp_path, err))?;
|
||||||
|
let (csum, size) = index.compute_csum();
|
||||||
|
verify_archive(archive_info, &csum, size)?;
|
||||||
|
|
||||||
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
|
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
|
||||||
}
|
}
|
||||||
ArchiveType::FixedIndex => {
|
ArchiveType::FixedIndex => {
|
||||||
let index = FixedIndexReader::new(tmpfile)
|
let index = FixedIndexReader::new(tmpfile)
|
||||||
.map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", tmp_path, err))?;
|
.map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", tmp_path, err))?;
|
||||||
|
let (csum, size) = index.compute_csum();
|
||||||
|
verify_archive(archive_info, &csum, size)?;
|
||||||
|
|
||||||
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
|
pull_index_chunks(worker, chunk_reader, tgt_store.clone(), index).await?;
|
||||||
}
|
}
|
||||||
ArchiveType::Blob => { /* nothing to do */ }
|
ArchiveType::Blob => {
|
||||||
|
let (csum, size) = compute_file_csum(&mut tmpfile)?;
|
||||||
|
verify_archive(archive_info, &csum, size)?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if let Err(err) = std::fs::rename(&tmp_path, &path) {
|
if let Err(err) = std::fs::rename(&tmp_path, &path) {
|
||||||
bail!("Atomic rename file {:?} failed - {}", path, err);
|
bail!("Atomic rename file {:?} failed - {}", path, err);
|
||||||
@ -152,17 +178,36 @@ async fn pull_snapshot(
|
|||||||
let mut tmp_manifest_name = manifest_name.clone();
|
let mut tmp_manifest_name = manifest_name.clone();
|
||||||
tmp_manifest_name.set_extension("tmp");
|
tmp_manifest_name.set_extension("tmp");
|
||||||
|
|
||||||
let mut tmp_manifest_file = download_manifest(&reader, &tmp_manifest_name).await?;
|
let download_res = download_manifest(&reader, &tmp_manifest_name).await;
|
||||||
let tmp_manifest_blob = DataBlob::load(&mut tmp_manifest_file)?;
|
let mut tmp_manifest_file = match download_res {
|
||||||
tmp_manifest_blob.verify_crc()?;
|
Ok(manifest_file) => manifest_file,
|
||||||
|
Err(err) => {
|
||||||
|
match err.downcast_ref::<HttpError>() {
|
||||||
|
Some(HttpError { code, message }) => {
|
||||||
|
match code {
|
||||||
|
&StatusCode::NOT_FOUND => {
|
||||||
|
worker.log(format!("skipping snapshot {} - vanished since start of sync", snapshot));
|
||||||
|
return Ok(());
|
||||||
|
},
|
||||||
|
_ => {
|
||||||
|
bail!("HTTP error {} - {}", code, message);
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => {
|
||||||
|
return Err(err);
|
||||||
|
},
|
||||||
|
};
|
||||||
|
},
|
||||||
|
};
|
||||||
|
let tmp_manifest_blob = DataBlob::load_from_reader(&mut tmp_manifest_file)?;
|
||||||
|
|
||||||
if manifest_name.exists() {
|
if manifest_name.exists() {
|
||||||
let manifest_blob = proxmox::try_block!({
|
let manifest_blob = proxmox::try_block!({
|
||||||
let mut manifest_file = std::fs::File::open(&manifest_name)
|
let mut manifest_file = std::fs::File::open(&manifest_name)
|
||||||
.map_err(|err| format_err!("unable to open local manifest {:?} - {}", manifest_name, err))?;
|
.map_err(|err| format_err!("unable to open local manifest {:?} - {}", manifest_name, err))?;
|
||||||
|
|
||||||
let manifest_blob = DataBlob::load(&mut manifest_file)?;
|
let manifest_blob = DataBlob::load_from_reader(&mut manifest_file)?;
|
||||||
manifest_blob.verify_crc()?;
|
|
||||||
Ok(manifest_blob)
|
Ok(manifest_blob)
|
||||||
}).map_err(|err: Error| {
|
}).map_err(|err: Error| {
|
||||||
format_err!("unable to read local manifest {:?} - {}", manifest_name, err)
|
format_err!("unable to read local manifest {:?} - {}", manifest_name, err)
|
||||||
@ -179,8 +224,6 @@ async fn pull_snapshot(
|
|||||||
|
|
||||||
let manifest = BackupManifest::try_from(tmp_manifest_blob)?;
|
let manifest = BackupManifest::try_from(tmp_manifest_blob)?;
|
||||||
|
|
||||||
let mut chunk_reader = RemoteChunkReader::new(reader.clone(), None, HashMap::new());
|
|
||||||
|
|
||||||
for item in manifest.files() {
|
for item in manifest.files() {
|
||||||
let mut path = tgt_store.base_path();
|
let mut path = tgt_store.base_path();
|
||||||
path.push(snapshot.relative_path());
|
path.push(snapshot.relative_path());
|
||||||
@ -221,13 +264,15 @@ async fn pull_snapshot(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut chunk_reader = RemoteChunkReader::new(reader.clone(), None, item.chunk_crypt_mode(), HashMap::new());
|
||||||
|
|
||||||
pull_single_archive(
|
pull_single_archive(
|
||||||
worker,
|
worker,
|
||||||
&reader,
|
&reader,
|
||||||
&mut chunk_reader,
|
&mut chunk_reader,
|
||||||
tgt_store.clone(),
|
tgt_store.clone(),
|
||||||
snapshot,
|
snapshot,
|
||||||
&item.filename,
|
&item,
|
||||||
).await?;
|
).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -252,13 +297,13 @@ pub async fn pull_snapshot_from(
|
|||||||
snapshot: &BackupDir,
|
snapshot: &BackupDir,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let (_path, is_new) = tgt_store.create_backup_dir(&snapshot)?;
|
let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(&snapshot)?;
|
||||||
|
|
||||||
if is_new {
|
if is_new {
|
||||||
worker.log(format!("sync snapshot {:?}", snapshot.relative_path()));
|
worker.log(format!("sync snapshot {:?}", snapshot.relative_path()));
|
||||||
|
|
||||||
if let Err(err) = pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await {
|
if let Err(err) = pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await {
|
||||||
if let Err(cleanup_err) = tgt_store.remove_backup_dir(&snapshot) {
|
if let Err(cleanup_err) = tgt_store.remove_backup_dir(&snapshot, true) {
|
||||||
worker.log(format!("cleanup error - {}", cleanup_err));
|
worker.log(format!("cleanup error - {}", cleanup_err));
|
||||||
}
|
}
|
||||||
return Err(err);
|
return Err(err);
|
||||||
@ -302,7 +347,16 @@ pub async fn pull_group(
|
|||||||
let mut remote_snapshots = std::collections::HashSet::new();
|
let mut remote_snapshots = std::collections::HashSet::new();
|
||||||
|
|
||||||
for item in list {
|
for item in list {
|
||||||
let backup_time = Utc.timestamp(item.backup_time, 0);
|
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
||||||
|
|
||||||
|
// in-progress backups can't be synced
|
||||||
|
if let None = item.size {
|
||||||
|
worker.log(format!("skipping snapshot {} - in-progress backup", snapshot));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let backup_time = snapshot.backup_time();
|
||||||
|
|
||||||
remote_snapshots.insert(backup_time);
|
remote_snapshots.insert(backup_time);
|
||||||
|
|
||||||
if let Some(last_sync_time) = last_sync {
|
if let Some(last_sync_time) = last_sync {
|
||||||
@ -319,14 +373,12 @@ pub async fn pull_group(
|
|||||||
new_client,
|
new_client,
|
||||||
None,
|
None,
|
||||||
src_repo.store(),
|
src_repo.store(),
|
||||||
&item.backup_type,
|
snapshot.group().backup_type(),
|
||||||
&item.backup_id,
|
snapshot.group().backup_id(),
|
||||||
backup_time,
|
backup_time,
|
||||||
true,
|
true,
|
||||||
).await?;
|
).await?;
|
||||||
|
|
||||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
|
||||||
|
|
||||||
pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot).await?;
|
pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -336,7 +388,7 @@ pub async fn pull_group(
|
|||||||
let backup_time = info.backup_dir.backup_time();
|
let backup_time = info.backup_dir.backup_time();
|
||||||
if remote_snapshots.contains(&backup_time) { continue; }
|
if remote_snapshots.contains(&backup_time) { continue; }
|
||||||
worker.log(format!("delete vanished snapshot {:?}", info.backup_dir.relative_path()));
|
worker.log(format!("delete vanished snapshot {:?}", info.backup_dir.relative_path()));
|
||||||
tgt_store.remove_backup_dir(&info.backup_dir)?;
|
tgt_store.remove_backup_dir(&info.backup_dir, false)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -349,7 +401,7 @@ pub async fn pull_store(
|
|||||||
src_repo: &BackupRepository,
|
src_repo: &BackupRepository,
|
||||||
tgt_store: Arc<DataStore>,
|
tgt_store: Arc<DataStore>,
|
||||||
delete: bool,
|
delete: bool,
|
||||||
username: String,
|
userid: Userid,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
// explicit create shared lock to prevent GC on newly created chunks
|
// explicit create shared lock to prevent GC on newly created chunks
|
||||||
@ -380,11 +432,11 @@ pub async fn pull_store(
|
|||||||
for item in list {
|
for item in list {
|
||||||
let group = BackupGroup::new(&item.backup_type, &item.backup_id);
|
let group = BackupGroup::new(&item.backup_type, &item.backup_id);
|
||||||
|
|
||||||
let owner = tgt_store.create_backup_group(&group, &username)?;
|
let (owner, _lock_guard) = tgt_store.create_locked_backup_group(&group, &userid)?;
|
||||||
// permission check
|
// permission check
|
||||||
if owner != username { // only the owner is allowed to create additional snapshots
|
if userid != owner { // only the owner is allowed to create additional snapshots
|
||||||
worker.log(format!("sync group {}/{} failed - owner check failed ({} != {})",
|
worker.log(format!("sync group {}/{} failed - owner check failed ({} != {})",
|
||||||
item.backup_type, item.backup_id, username, owner));
|
item.backup_type, item.backup_id, userid, owner));
|
||||||
errors = true;
|
errors = true;
|
||||||
continue; // do not stop here, instead continue
|
continue; // do not stop here, instead continue
|
||||||
}
|
}
|
||||||
|
@ -3,18 +3,20 @@ use std::collections::HashMap;
|
|||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::{bail, Error};
|
||||||
|
|
||||||
use super::BackupReader;
|
use super::BackupReader;
|
||||||
use crate::backup::{AsyncReadChunk, CryptConfig, DataBlob, ReadChunk};
|
use crate::backup::{AsyncReadChunk, CryptConfig, CryptMode, DataBlob, ReadChunk};
|
||||||
use crate::tools::runtime::block_on;
|
use crate::tools::runtime::block_on;
|
||||||
|
|
||||||
/// Read chunks from remote host using ``BackupReader``
|
/// Read chunks from remote host using ``BackupReader``
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct RemoteChunkReader {
|
pub struct RemoteChunkReader {
|
||||||
client: Arc<BackupReader>,
|
client: Arc<BackupReader>,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
|
crypt_mode: CryptMode,
|
||||||
cache_hint: HashMap<[u8; 32], usize>,
|
cache_hint: HashMap<[u8; 32], usize>,
|
||||||
cache: Mutex<HashMap<[u8; 32], Vec<u8>>>,
|
cache: Arc<Mutex<HashMap<[u8; 32], Vec<u8>>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RemoteChunkReader {
|
impl RemoteChunkReader {
|
||||||
@ -24,16 +26,20 @@ impl RemoteChunkReader {
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
client: Arc<BackupReader>,
|
client: Arc<BackupReader>,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
|
crypt_mode: CryptMode,
|
||||||
cache_hint: HashMap<[u8; 32], usize>,
|
cache_hint: HashMap<[u8; 32], usize>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
client,
|
client,
|
||||||
crypt_config,
|
crypt_config,
|
||||||
|
crypt_mode,
|
||||||
cache_hint,
|
cache_hint,
|
||||||
cache: Mutex::new(HashMap::new()),
|
cache: Arc::new(Mutex::new(HashMap::new())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Downloads raw chunk. This only verifies the (untrusted) CRC32, use
|
||||||
|
/// DataBlob::verify_unencrypted or DataBlob::decode before storing/processing further.
|
||||||
pub async fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
pub async fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||||
let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
|
let mut chunk_data = Vec::with_capacity(4 * 1024 * 1024);
|
||||||
|
|
||||||
@ -41,10 +47,22 @@ impl RemoteChunkReader {
|
|||||||
.download_chunk(&digest, &mut chunk_data)
|
.download_chunk(&digest, &mut chunk_data)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let chunk = DataBlob::from_raw(chunk_data)?;
|
let chunk = DataBlob::load_from_reader(&mut &chunk_data[..])?;
|
||||||
chunk.verify_crc()?;
|
|
||||||
|
|
||||||
Ok(chunk)
|
match self.crypt_mode {
|
||||||
|
CryptMode::Encrypt => {
|
||||||
|
match chunk.crypt_mode()? {
|
||||||
|
CryptMode::Encrypt => Ok(chunk),
|
||||||
|
CryptMode::SignOnly | CryptMode::None => bail!("Index and chunk CryptMode don't match."),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
CryptMode::SignOnly | CryptMode::None => {
|
||||||
|
match chunk.crypt_mode()? {
|
||||||
|
CryptMode::Encrypt => bail!("Index and chunk CryptMode don't match."),
|
||||||
|
CryptMode::SignOnly | CryptMode::None => Ok(chunk),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -60,9 +78,7 @@ impl ReadChunk for RemoteChunkReader {
|
|||||||
|
|
||||||
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
||||||
|
|
||||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
|
||||||
|
|
||||||
// fixme: verify digest?
|
|
||||||
|
|
||||||
let use_cache = self.cache_hint.contains_key(digest);
|
let use_cache = self.cache_hint.contains_key(digest);
|
||||||
if use_cache {
|
if use_cache {
|
||||||
@ -92,9 +108,7 @@ impl AsyncReadChunk for RemoteChunkReader {
|
|||||||
|
|
||||||
let chunk = Self::read_raw_chunk(self, digest).await?;
|
let chunk = Self::read_raw_chunk(self, digest).await?;
|
||||||
|
|
||||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
|
||||||
|
|
||||||
// fixme: verify digest?
|
|
||||||
|
|
||||||
let use_cache = self.cache_hint.contains_key(digest);
|
let use_cache = self.cache_hint.contains_key(digest);
|
||||||
if use_cache {
|
if use_cache {
|
||||||
|
@ -15,13 +15,14 @@ use proxmox::try_block;
|
|||||||
|
|
||||||
use crate::buildcfg;
|
use crate::buildcfg;
|
||||||
|
|
||||||
pub mod datastore;
|
|
||||||
pub mod remote;
|
|
||||||
pub mod user;
|
|
||||||
pub mod acl;
|
pub mod acl;
|
||||||
pub mod cached_user_info;
|
pub mod cached_user_info;
|
||||||
|
pub mod datastore;
|
||||||
|
pub mod jobstate;
|
||||||
pub mod network;
|
pub mod network;
|
||||||
|
pub mod remote;
|
||||||
pub mod sync;
|
pub mod sync;
|
||||||
|
pub mod user;
|
||||||
|
|
||||||
/// Check configuration directory permissions
|
/// Check configuration directory permissions
|
||||||
///
|
///
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user