Compare commits
402 Commits
Author | SHA1 | Date | |
---|---|---|---|
3e4a67f350 | |||
e0e5b4426a | |||
7158b304f5 | |||
833eca6d2f | |||
151acf5d96 | |||
4a363fb4a7 | |||
229adeb746 | |||
1eff9a1e89 | |||
ed4f0a0edc | |||
13bed6226e | |||
d937daedb3 | |||
8cce51135c | |||
0cfe1b3f13 | |||
05c16a6e59 | |||
3294b516d3 | |||
139bcedc53 | |||
cf9ea3c4c7 | |||
e84fde3e14 | |||
1de47507ff | |||
1a9948a488 | |||
04c2731349 | |||
5656888cc9 | |||
5fdc5a6f3d | |||
61d7b5013c | |||
871181d984 | |||
02939e178d | |||
3be308b949 | |||
83088644da | |||
14db8b52dc | |||
597427afaf | |||
3cddfb29be | |||
e15b76369a | |||
d7c1251435 | |||
ea3ce82a74 | |||
092378ba92 | |||
068e526862 | |||
a9767cf7de | |||
aadcc2815c | |||
0f3b7efa84 | |||
7c77e2f94a | |||
abd4c4cb8c | |||
09f12d1cf3 | |||
1db4cfb308 | |||
a4c1143664 | |||
0623674f44 | |||
2dd58db792 | |||
e11cfb93c0 | |||
bc0608955e | |||
36be19218e | |||
9fa39a46ba | |||
ff30b912a0 | |||
b0c10a88a3 | |||
ccbe6547a7 | |||
32afd60336 | |||
02e47b8d6e | |||
44055cac4d | |||
1dfc09cb6b | |||
48c56024aa | |||
cf103266b3 | |||
d5cf8f606c | |||
ce7ab28cfa | |||
07ca6f6e66 | |||
15ec790a40 | |||
cb73b2d69c | |||
c931c87173 | |||
28a0a9343c | |||
56b666458c | |||
cd6ddb5a69 | |||
ecd55041a2 | |||
e7e8e6d5f7 | |||
49df8ac115 | |||
7397f4a390 | |||
8317873c06 | |||
deef63699e | |||
c6e07769e9 | |||
423df9b1f4 | |||
c879e5af11 | |||
63d9aca96f | |||
c3b1da9e41 | |||
46388e6aef | |||
484d439a7c | |||
ab6615134c | |||
b1149ebb36 | |||
1bfdae7933 | |||
4f09d31085 | |||
58d73ddb1d | |||
6b809ff59b | |||
afe08d2755 | |||
a7bc5d4eaf | |||
97cd0a2a6d | |||
49a92084a9 | |||
9bdeecaee4 | |||
843880f008 | |||
a6ed5e1273 | |||
74f94d0678 | |||
946c3e8a81 | |||
7b212c1f79 | |||
3b2046d263 | |||
1ffe030123 | |||
5255e641fa | |||
c86b6f40d7 | |||
5a718dce17 | |||
1b32750644 | |||
5aa103c3c3 | |||
fd3f690104 | |||
24b638bd9f | |||
9624c5eecb | |||
503dd339a8 | |||
36ea5df444 | |||
dce9dd6f70 | |||
88e28e15e4 | |||
399e48a1ed | |||
7ae571e7cb | |||
4264c5023b | |||
82b7adf90b | |||
71c4a3138f | |||
52991f239f | |||
3435f5491b | |||
aafe8609e5 | |||
a8d69fcf05 | |||
1e68497c03 | |||
74fc844787 | |||
4cda7603c4 | |||
11e1e27a42 | |||
4ea831bfa1 | |||
c1d7d708d4 | |||
3fa2b983c1 | |||
a1e9c05738 | |||
934deeff2d | |||
c162df60c8 | |||
98161fddb5 | |||
be614c625f | |||
87c4cb7419 | |||
93bb51fe7e | |||
713b66b6ed | |||
77bd2a469c | |||
97af919530 | |||
c91602316b | |||
a13573c24a | |||
02543a5c7f | |||
42b68f72e6 | |||
664d8a2765 | |||
e6263c2662 | |||
ae197dda23 | |||
4c116bafb8 | |||
df30017ff8 | |||
3f3ae19d63 | |||
72dc68323c | |||
593f917742 | |||
639419b049 | |||
c5ac2b9ddd | |||
81f293513e | |||
8b5f72b176 | |||
f23f75433f | |||
6d6b4e72d3 | |||
e434258592 | |||
3dc1a2d5b6 | |||
5d95558bae | |||
882c082369 | |||
9a38fa29c2 | |||
14f6c9cb8b | |||
2d55beeca0 | |||
9238cdf50d | |||
5d30f03826 | |||
14263ef989 | |||
e7cb4dc50d | |||
27d864210a | |||
f667f49dab | |||
866c556faf | |||
90d515c97d | |||
4dbe129284 | |||
747c3bc087 | |||
c23e257c5a | |||
16a18dadba | |||
5f76ac37b5 | |||
d74edc3d89 | |||
2f57a433b1 | |||
df7f04364b | |||
98c259b4c1 | |||
799b3d88bc | |||
db22e6b270 | |||
16f0afbfb5 | |||
d3d566f7bd | |||
c96b0de48f | |||
2ce159343b | |||
9e496ff6f1 | |||
8819d1f2f5 | |||
0f9218079a | |||
1cafbdc70d | |||
a3eb7b2cea | |||
d9b8e2c795 | |||
4bd2a9e42d | |||
cef03f4149 | |||
eeb19aeb2d | |||
6c96ec418d | |||
5e4b32706c | |||
30c3c5d66c | |||
e51be33807 | |||
70030b43d0 | |||
724de093dd | |||
ff86ef00a7 | |||
912b3f5bc9 | |||
a4acb6ef84 | |||
d7ee07d838 | |||
53705acece | |||
c8fff67d88 | |||
9fa55e09a7 | |||
e443902583 | |||
32dc4c4604 | |||
f39a900722 | |||
1fc82c41f2 | |||
d2b0c78e23 | |||
adfdc36936 | |||
d8594d87f1 | |||
f66f537da9 | |||
d44185c4a1 | |||
d53fbe2474 | |||
95bda2f25d | |||
c9756b40d1 | |||
8cd29fb24a | |||
505c5f0f76 | |||
2aaae9705e | |||
8aa67ee758 | |||
3865e27e96 | |||
f6c6e09a8a | |||
71282dd988 | |||
80db161e05 | |||
be10cdb122 | |||
7fde1a71ca | |||
a83674ad48 | |||
02f82148cf | |||
39f18b30b6 | |||
69d970a658 | |||
6d55603dcc | |||
3e395378bc | |||
bccdc5fa04 | |||
0bf7ba6c92 | |||
e6b599aa6c | |||
d757021f4c | |||
ee15af6bb8 | |||
3da9b7e0dd | |||
beaa683a52 | |||
33a88dafb9 | |||
224c65f8de | |||
f2b4b4b9fe | |||
ea9e559fc4 | |||
0cf14984cc | |||
7d07b73def | |||
3d3670d786 | |||
14291179ce | |||
e744de0eb0 | |||
98b1733760 | |||
fdac28fcec | |||
653e2031d2 | |||
01ca99da2d | |||
1c2f842a98 | |||
a4d1675513 | |||
2ab5acac5a | |||
27fde64794 | |||
fa3f0584bb | |||
d12720c796 | |||
a4e86972a4 | |||
3a3af6e2b6 | |||
482409641f | |||
9688f6de0f | |||
5b32820e93 | |||
f40b4fb05a | |||
6e1deb158a | |||
50ec1a8712 | |||
a74b026baa | |||
7e42ccdaf2 | |||
e713ee5c56 | |||
ec5f9d3525 | |||
d0463b67ca | |||
2ff4c2cd5f | |||
c3b090ac8a | |||
c47e294ea7 | |||
25455bd06d | |||
c1c4a18f48 | |||
91f5594c08 | |||
86f6f74114 | |||
13d9fe3a6c | |||
41e4388005 | |||
06a94edcf6 | |||
ef496e2c20 | |||
113c9b5981 | |||
956295cefe | |||
a26c27c8e6 | |||
0c1c492d48 | |||
255ed62166 | |||
b96b11cdb7 | |||
faa8e6948a | |||
8314ca9c10 | |||
538c2b6dcf | |||
e9b44bec01 | |||
65418a0763 | |||
aef4976801 | |||
295d4f4116 | |||
c47a900ceb | |||
1b1110581a | |||
eb13d9151a | |||
449e4a66fe | |||
217c22c754 | |||
ba5b8a3e76 | |||
ac5e9e770b | |||
b25deec0be | |||
cdf1da2872 | |||
3cfc56f5c2 | |||
37e53b4c07 | |||
77d634710e | |||
5c5181a252 | |||
67042466e8 | |||
757d0ccc76 | |||
4a55fa87d5 | |||
032cd1b862 | |||
ec2434fe3c | |||
34389132d9 | |||
78ee20d72d | |||
601e42ac35 | |||
e1897b363b | |||
cf063c1973 | |||
f58233a73a | |||
d257c2ecbd | |||
e4ee7b7ac8 | |||
1f0d23f792 | |||
bfcef26a99 | |||
ec01eeadc6 | |||
660a34892d | |||
d86034afec | |||
62593aba1e | |||
0eaef8eb84 | |||
e39974afbf | |||
dde18bbb85 | |||
a40e1b0e8b | |||
a0eb0cd372 | |||
41067870c6 | |||
33a87bc39a | |||
bed3e15f16 | |||
c687da9e8e | |||
be30e7d269 | |||
106603c58f | |||
7ba2c1c386 | |||
4327a8462a | |||
e193544b8e | |||
323b2f3dd6 | |||
7884e7ef4f | |||
fae11693f0 | |||
22231524e2 | |||
9634ca07db | |||
62f6a7e3d9 | |||
86443141b5 | |||
f6e964b96e | |||
c8bed1b4d7 | |||
a3970d6c1e | |||
cc83c13660 | |||
bf7e2a4648 | |||
e284073e4a | |||
3ec99affc8 | |||
a9649ddc44 | |||
4f9096a211 | |||
c3a4b5e2e1 | |||
7957fabff2 | |||
20a4e4e252 | |||
2774566b03 | |||
4459ffe30e | |||
d16ed66c88 | |||
3ec6e249b3 | |||
dfa517ad6c | |||
8b2ad84a25 | |||
3dacedce71 | |||
512d50a455 | |||
b53f637914 | |||
152a926149 | |||
7f388acea8 | |||
b2bfb46835 | |||
24406ebc0c | |||
1f24d9114c | |||
859fe9c1fb | |||
2107a5aebc | |||
3638341aa4 | |||
067fe514e6 | |||
8c6e5ce23c | |||
0351f23ba4 | |||
c1ff544eff | |||
69e5d71961 | |||
48e22a8900 | |||
a7a5f56daa | |||
05389a0109 | |||
b65390ebc9 | |||
3bad3e6e52 | |||
24be37e3f6 | |||
1008a69a13 | |||
521a0acb2e | |||
3b66040de6 | |||
af3a0ae7b1 | |||
4e36f78438 | |||
f28d9088ed | |||
56b814e378 | |||
0c136efe30 | |||
cdead6cd12 | |||
c950826e46 | |||
f91d58e157 |
1
.gitignore
vendored
@ -3,3 +3,4 @@ local.mak
|
||||
**/*.rs.bk
|
||||
/etc/proxmox-backup.service
|
||||
/etc/proxmox-backup-proxy.service
|
||||
build/
|
||||
|
17
Cargo.toml
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
version = "0.7.0"
|
||||
version = "0.8.16"
|
||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3"
|
||||
@ -14,6 +14,7 @@ name = "proxmox_backup"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
apt-pkg-native = "0.3.1" # custom patched version
|
||||
base64 = "0.12"
|
||||
bitflags = "1.2.1"
|
||||
bytes = "0.5"
|
||||
@ -25,7 +26,7 @@ futures = "0.3"
|
||||
h2 = { version = "0.2", features = ["stream"] }
|
||||
handlebars = "3.0"
|
||||
http = "0.2"
|
||||
hyper = "0.13"
|
||||
hyper = "0.13.6"
|
||||
lazy_static = "1.4"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
@ -37,12 +38,12 @@ pam = "0.7"
|
||||
pam-sys = "0.5"
|
||||
percent-encoding = "2.1"
|
||||
pin-utils = "0.1.0"
|
||||
pathpatterns = "0.1.1"
|
||||
proxmox = { version = "0.1.42", features = [ "sortable-macro", "api-macro" ] }
|
||||
pathpatterns = "0.1.2"
|
||||
proxmox = { version = "0.3.5", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
#proxmox = { git = "ssh://gitolite3@proxdev.maurer-it.com/rust/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||
proxmox-fuse = "0.1.0"
|
||||
pxar = { version = "0.2.0", features = [ "tokio-io", "futures-io" ] }
|
||||
pxar = { version = "0.6.0", features = [ "tokio-io", "futures-io" ] }
|
||||
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
||||
regex = "1.2"
|
||||
rustyline = "6"
|
||||
@ -50,11 +51,11 @@ serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
siphasher = "0.3"
|
||||
syslog = "4.0"
|
||||
tokio = { version = "0.2.9", features = [ "blocking", "fs", "io-util", "macros", "rt-threaded", "signal", "stream", "tcp", "time", "uds" ] }
|
||||
tokio = { version = "0.2.9", features = [ "blocking", "fs", "dns", "io-util", "macros", "process", "rt-threaded", "signal", "stream", "tcp", "time", "uds" ] }
|
||||
tokio-openssl = "0.4.0"
|
||||
tokio-util = { version = "0.3", features = [ "codec" ] }
|
||||
tower-service = "0.3.0"
|
||||
udev = "0.3"
|
||||
udev = ">= 0.3, <0.5"
|
||||
url = "2.1"
|
||||
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
||||
walkdir = "2"
|
||||
|
21
Makefile
@ -40,10 +40,12 @@ COMPILED_BINS := \
|
||||
export DEB_VERSION DEB_VERSION_UPSTREAM
|
||||
|
||||
SERVER_DEB=${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb
|
||||
SERVER_DBG_DEB=${PACKAGE}-server-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
||||
CLIENT_DBG_DEB=${PACKAGE}-client-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
||||
|
||||
DEBS=${SERVER_DEB} ${CLIENT_DEB}
|
||||
DEBS=${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB}
|
||||
|
||||
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
|
||||
|
||||
@ -67,10 +69,12 @@ doc:
|
||||
.PHONY: build
|
||||
build:
|
||||
rm -rf build
|
||||
rm -f debian/control
|
||||
debcargo package --config debian/debcargo.toml --changelog-ready --no-overlay-write-back --directory build proxmox-backup $(shell dpkg-parsechangelog -l debian/changelog -SVersion | sed -e 's/-.*//')
|
||||
sed -e '1,/^$$/ ! d' build/debian/control > build/debian/control.src
|
||||
cat build/debian/control.src build/debian/control.in > build/debian/control
|
||||
rm build/debian/control.in build/debian/control.src
|
||||
cp build/debian/control debian/control
|
||||
rm build/Cargo.lock
|
||||
find build/debian -name "*.hint" -delete
|
||||
$(foreach i,$(SUBDIRS), \
|
||||
@ -78,18 +82,21 @@ build:
|
||||
|
||||
|
||||
.PHONY: proxmox-backup-docs
|
||||
proxmox-backup-docs: $(DOC_DEB)
|
||||
$(DOC_DEB): build
|
||||
$(DOC_DEB) $(DEBS): proxmox-backup-docs
|
||||
proxmox-backup-docs: build
|
||||
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean
|
||||
lintian $(DOC_DEB)
|
||||
|
||||
# copy the local target/ dir as a build-cache
|
||||
.PHONY: deb
|
||||
deb: $(DEBS)
|
||||
$(DEBS): build
|
||||
$(DEBS): deb
|
||||
deb: build
|
||||
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean --build-profiles=nodoc
|
||||
lintian $(DEBS)
|
||||
|
||||
.PHONY: deb-all
|
||||
deb-all: $(DOC_DEB) $(DEBS)
|
||||
|
||||
.PHONY: dsc
|
||||
dsc: $(DSC)
|
||||
$(DSC): build
|
||||
@ -142,5 +149,5 @@ install: $(COMPILED_BINS)
|
||||
upload: ${SERVER_DEB} ${CLIENT_DEB} ${DOC_DEB}
|
||||
# check if working directory is clean
|
||||
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
||||
tar cf - ${SERVER_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
||||
tar cf - ${CLIENT_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve" --dist buster
|
||||
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
||||
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve,pmg" --dist buster
|
||||
|
324
debian/changelog
vendored
@ -1,3 +1,326 @@
|
||||
rust-proxmox-backup (0.8.16-1) unstable; urgency=medium
|
||||
|
||||
* BackupDir: make constructor fallible
|
||||
|
||||
* handle invalid mtime when formating entries
|
||||
|
||||
* ui/docs: add onlineHelp button for syncjobs
|
||||
|
||||
* docs: add section for calendar events
|
||||
|
||||
* tools/systemd/parse_time: enable */x syntax for calendar events
|
||||
|
||||
* docs: set html img width limitation through css
|
||||
|
||||
* docs: use alabaster theme
|
||||
|
||||
* server: set http2 max frame size
|
||||
|
||||
* doc: Add section "FAQ"
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 11 Sep 2020 15:54:57 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.15-1) unstable; urgency=medium
|
||||
|
||||
* verify: skip benchmark directory
|
||||
|
||||
* add benchmark flag to backup creation for proper cleanup when running
|
||||
a benchmark
|
||||
|
||||
* mount: fix mount subcommand
|
||||
|
||||
* ui: only mark backup encrypted if there are any files
|
||||
|
||||
* fix #2983: improve tcp performance
|
||||
|
||||
* improve ui and docs
|
||||
|
||||
* verify: rename corrupted chunks with .bad extension
|
||||
|
||||
* gc: remove .bad files on garbage collect
|
||||
|
||||
* ui: add translation support
|
||||
|
||||
* server/worker_task: fix upid_read_status
|
||||
|
||||
* tools/systemd/time: enable dates for calendarevents
|
||||
|
||||
* server/worker_task: fix 'unknown' status for some big task logs
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 10 Sep 2020 09:25:59 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.14-1) unstable; urgency=medium
|
||||
|
||||
* verify speed up: use separate IO thread, use datastore-wide cache (instead
|
||||
of per group)
|
||||
|
||||
* ui: datastore content: improve encrypted column
|
||||
|
||||
* ui: datastore content: show more granular verify state, especially for
|
||||
backup group rows
|
||||
|
||||
* verify: log progress in percent
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 02 Sep 2020 09:36:47 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.13-1) unstable; urgency=medium
|
||||
|
||||
* improve and add to documentation
|
||||
|
||||
* save last verify result in snapshot manifest and show it in the GUI
|
||||
|
||||
* gc: use human readable units for summary in task log
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 27 Aug 2020 16:12:07 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.12-1) unstable; urgency=medium
|
||||
|
||||
* verify: speedup - only verify chunks once
|
||||
|
||||
* verify: sort backup groups
|
||||
|
||||
* bump pxar dep to 0.4.0
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 25 Aug 2020 08:55:52 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.11-1) unstable; urgency=medium
|
||||
|
||||
* improve sync jobs, allow to stop them and better logging
|
||||
|
||||
* fix #2926: make network interfaces parser more flexible
|
||||
|
||||
* fix #2904: zpool status: parse also those vdevs without READ/ẀRITE/...
|
||||
statistics
|
||||
|
||||
* api2/node/services: turn service api calls into workers
|
||||
|
||||
* docs: add sections describing ACL related commands and describing
|
||||
benchmarking
|
||||
|
||||
* docs: general grammar, wording and typo improvements
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 19 Aug 2020 19:20:03 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.10-1) unstable; urgency=medium
|
||||
|
||||
* ui: acl: add improved permission selector
|
||||
|
||||
* services: make reload safer and default to it in gui
|
||||
|
||||
* ui: rework DataStore content Panel
|
||||
|
||||
* ui: add search box to DataStore content
|
||||
|
||||
* ui: DataStoreContent: keep selection and expansion on reload
|
||||
|
||||
* upload_chunk: allow upload of empty blobs
|
||||
|
||||
* fix #2856: also check whole device for device mapper
|
||||
|
||||
* ui: fix error when reloading DataStoreContent
|
||||
|
||||
* ui: fix in-progress snapshots always showing as "Encrypted"
|
||||
|
||||
* update to pxar 0.3 to support negative timestamps
|
||||
|
||||
* fix #2873: if --pattern is used, default to not extracting
|
||||
|
||||
* finish_backup: test/verify manifest at server side
|
||||
|
||||
* finish_backup: add chunk_upload_stats to manifest
|
||||
|
||||
* src/api2/admin/datastore.rs: add API to get/set Notes for backus
|
||||
|
||||
* list_snapshots: Returns new "comment" property (first line from notes)
|
||||
|
||||
* pxar: create: attempt to use O_NOATIME
|
||||
|
||||
* systemd/time: fix weekday wrapping on month
|
||||
|
||||
* pxar: better error handling on extract
|
||||
|
||||
* pxar/extract: fixup path stack for errors
|
||||
|
||||
* datastore: allow browsing signed pxar files
|
||||
|
||||
* GC: use time pre phase1 to calculate min_atime in phase2
|
||||
|
||||
* gui: user: fix #2898 add dialog to set password
|
||||
|
||||
* fix #2909: handle missing chunks gracefully in garbage collection
|
||||
|
||||
* finish_backup: mark backup as finished only after checks have passed
|
||||
|
||||
* fix: master-key: upload RSA encoded key with backup
|
||||
|
||||
* admin-guide: add section explaining master keys
|
||||
|
||||
* backup: only allow finished backups as base snapshot
|
||||
|
||||
* datastore api: only decode unencrypted indices
|
||||
|
||||
* datastore api: verify blob/index csum from manifest
|
||||
|
||||
* sync, blobs and chunk readers: add more checks and verification
|
||||
|
||||
* verify: add more checks, don't fail on first error
|
||||
|
||||
* mark signed manifests as such
|
||||
|
||||
* backup/prune/forget: improve locking
|
||||
|
||||
* backup: ensure base snapshots are still available after backup
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 11 Aug 2020 15:37:29 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.9-1) unstable; urgency=medium
|
||||
|
||||
* improve termprocy (console) behavior on updating proxmox-backup-server and
|
||||
other daemon restarts
|
||||
|
||||
* client: improve upload log output and speed calculation
|
||||
|
||||
* fix #2885: client upload: bail on duplicate backup targets
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 24 Jul 2020 11:24:07 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.8-1) unstable; urgency=medium
|
||||
|
||||
* pxar: .pxarexclude: match behavior from absolute paths to the one described
|
||||
in the documentation and use byte based paths
|
||||
|
||||
* catalog shell: add exit command
|
||||
|
||||
* manifest: revert signature canonicalization to old behaviour. Fallout from
|
||||
encrypted older backups is expected and was ignored due to the beta status
|
||||
of Proxmox Backup.
|
||||
|
||||
* documentation: various improvements and additions
|
||||
|
||||
* cached user info: print privilege path in error message
|
||||
|
||||
* docs: fix #2851 Add note about GC grace period
|
||||
|
||||
* api2/status: fix datastore full estimation bug if there where (almost) no
|
||||
change for several days
|
||||
|
||||
* schedules, calendar event: support the 'weekly' special expression
|
||||
|
||||
* ui: sync job: group remote fields and use "Source" in labels
|
||||
|
||||
* ui: add calendar event selector
|
||||
|
||||
* ui: sync job: change default to false for "remove-vanished" for new jobs
|
||||
|
||||
* fix #2860: skip in-progress snapshots when syncing
|
||||
|
||||
* fix #2865: detect and skip vanished snapshots
|
||||
|
||||
* fix #2871: close FDs when scanning backup group, avoid leaking
|
||||
|
||||
* backup: list images: handle walkdir error, catch "lost+found" special
|
||||
directory
|
||||
|
||||
* implement AsyncSeek for AsyncIndexReader
|
||||
|
||||
* client: rework logging upload info like size or bandwidth
|
||||
|
||||
* client writer: do not output chunklist for now on verbose=true
|
||||
|
||||
* add initial API for listing available updates and updating the APT
|
||||
database
|
||||
|
||||
* ui: add xterm.js console implementation
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 23 Jul 2020 12:16:05 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.7-2) unstable; urgency=medium
|
||||
|
||||
* support restoring file attributes from pxar archives
|
||||
|
||||
* docs: additions and fixes
|
||||
|
||||
* ui: running tasks: update limit to 100
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 14 Jul 2020 12:05:25 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.6-1) unstable; urgency=medium
|
||||
|
||||
* ui: add button for easily showing the server fingerprint dashboard
|
||||
|
||||
* proxmox-backup-client benchmark: add --verbose flag and improve output
|
||||
format
|
||||
|
||||
* docs: reference PDF variant in HTML output
|
||||
|
||||
* proxmox-backup-client: add simple version command
|
||||
|
||||
* improve keyfile and signature handling in catalog and manifest
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 10 Jul 2020 11:34:14 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.5-1) unstable; urgency=medium
|
||||
|
||||
* fix cross process task listing
|
||||
|
||||
* docs: expand datastore documentation
|
||||
|
||||
* docs: add remotes and sync-jobs and schedules
|
||||
|
||||
* bump pathpatterns to 0.1.2
|
||||
|
||||
* ui: align version and user-menu spacing with pve/pmg
|
||||
|
||||
* ui: make username a menu-button
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 15:32:39 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.4-1) unstable; urgency=medium
|
||||
|
||||
* add TaskButton in header
|
||||
|
||||
* simpler lost+found pattern
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 14:28:24 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.3-1) unstable; urgency=medium
|
||||
|
||||
* get_disks: don't fail on zfs_devices
|
||||
|
||||
* allow some more characters for zpool list
|
||||
|
||||
* ui: adapt for new sign-only crypt mode
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 13:55:06 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.2-1) unstable; urgency=medium
|
||||
|
||||
* buildsys: also upload debug packages
|
||||
|
||||
* src/backup/manifest.rs: rename into_string -> to_string
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 11:58:51 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.1-1) unstable; urgency=medium
|
||||
|
||||
* remove authhenticated data blobs (not needed)
|
||||
|
||||
* add signature to manifest
|
||||
|
||||
* improve docs
|
||||
|
||||
* client: introduce --keyfd parameter
|
||||
|
||||
* ui improvements
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jul 2020 10:01:25 +0200
|
||||
|
||||
rust-proxmox-backup (0.8.0-1) unstable; urgency=medium
|
||||
|
||||
* implement get_runtime_with_builder
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 07 Jul 2020 10:15:26 +0200
|
||||
|
||||
rust-proxmox-backup (0.7.0-1) unstable; urgency=medium
|
||||
|
||||
* implement clone for RemoteChunkReader
|
||||
@ -171,4 +494,3 @@ proxmox-backup (0.1-1) unstable; urgency=medium
|
||||
* first try
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 30 Nov 2018 13:03:28 +0100
|
||||
|
||||
|
133
debian/control
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
Source: rust-proxmox-backup
|
||||
Section: admin
|
||||
Priority: optional
|
||||
Build-Depends: debhelper (>= 11),
|
||||
dh-cargo (>= 18),
|
||||
cargo:native,
|
||||
rustc:native,
|
||||
libstd-rust-dev,
|
||||
librust-anyhow-1+default-dev,
|
||||
librust-apt-pkg-native-0.3+default-dev (>= 0.3.1-~~),
|
||||
librust-base64-0.12+default-dev,
|
||||
librust-bitflags-1+default-dev (>= 1.2.1-~~),
|
||||
librust-bytes-0.5+default-dev,
|
||||
librust-chrono-0.4+default-dev,
|
||||
librust-crc32fast-1+default-dev,
|
||||
librust-endian-trait-0.6+arrays-dev,
|
||||
librust-endian-trait-0.6+default-dev,
|
||||
librust-futures-0.3+default-dev,
|
||||
librust-h2-0.2+default-dev,
|
||||
librust-h2-0.2+stream-dev,
|
||||
librust-handlebars-3+default-dev,
|
||||
librust-http-0.2+default-dev,
|
||||
librust-hyper-0.13+default-dev,
|
||||
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
||||
librust-libc-0.2+default-dev,
|
||||
librust-log-0.4+default-dev,
|
||||
librust-nix-0.16+default-dev,
|
||||
librust-nom-5+default-dev (>= 5.1-~~),
|
||||
librust-num-traits-0.2+default-dev,
|
||||
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
||||
librust-openssl-0.10+default-dev,
|
||||
librust-pam-0.7+default-dev,
|
||||
librust-pam-sys-0.5+default-dev,
|
||||
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||
librust-pin-utils-0.1+default-dev,
|
||||
librust-proxmox-0.3+api-macro-dev (>= 0.3.5-~~),
|
||||
librust-proxmox-0.3+default-dev (>= 0.3.5-~~),
|
||||
librust-proxmox-0.3+sortable-macro-dev (>= 0.3.5-~~),
|
||||
librust-proxmox-0.3+websocket-dev (>= 0.3.5-~~),
|
||||
librust-proxmox-fuse-0.1+default-dev,
|
||||
librust-pxar-0.6+default-dev,
|
||||
librust-pxar-0.6+futures-io-dev,
|
||||
librust-pxar-0.6+tokio-io-dev,
|
||||
librust-regex-1+default-dev (>= 1.2-~~),
|
||||
librust-rustyline-6+default-dev,
|
||||
librust-serde-1+default-dev,
|
||||
librust-serde-1+derive-dev,
|
||||
librust-serde-json-1+default-dev,
|
||||
librust-siphasher-0.3+default-dev,
|
||||
librust-syslog-4+default-dev,
|
||||
librust-tokio-0.2+blocking-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+default-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+dns-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+fs-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+io-util-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+macros-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+process-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+rt-threaded-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+signal-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+stream-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+tcp-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+time-dev (>= 0.2.9-~~),
|
||||
librust-tokio-0.2+uds-dev (>= 0.2.9-~~),
|
||||
librust-tokio-openssl-0.4+default-dev,
|
||||
librust-tokio-util-0.3+codec-dev,
|
||||
librust-tokio-util-0.3+default-dev,
|
||||
librust-tower-service-0.3+default-dev,
|
||||
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
|
||||
librust-url-2+default-dev (>= 2.1-~~),
|
||||
librust-walkdir-2+default-dev,
|
||||
librust-xdg-2+default-dev (>= 2.2-~~),
|
||||
librust-zstd-0.4+bindgen-dev,
|
||||
librust-zstd-0.4+default-dev,
|
||||
libacl1-dev,
|
||||
libfuse3-dev,
|
||||
libsystemd-dev,
|
||||
uuid-dev,
|
||||
debhelper (>= 12~),
|
||||
bash-completion,
|
||||
python3-docutils,
|
||||
python3-pygments,
|
||||
rsync,
|
||||
fonts-dejavu-core <!nodoc>,
|
||||
fonts-lato <!nodoc>,
|
||||
fonts-open-sans <!nodoc>,
|
||||
graphviz <!nodoc>,
|
||||
latexmk <!nodoc>,
|
||||
python3-sphinx <!nodoc>,
|
||||
texlive-fonts-extra <!nodoc>,
|
||||
texlive-fonts-recommended <!nodoc>,
|
||||
texlive-xetex <!nodoc>,
|
||||
xindy <!nodoc>
|
||||
Maintainer: Proxmox Support Team <support@proxmox.com>
|
||||
Standards-Version: 4.4.1
|
||||
Vcs-Git:
|
||||
Vcs-Browser:
|
||||
Homepage: https://www.proxmox.com
|
||||
|
||||
Package: proxmox-backup-server
|
||||
Architecture: any
|
||||
Depends: fonts-font-awesome,
|
||||
libjs-extjs (>= 6.0.1),
|
||||
libzstd1 (>= 1.3.8),
|
||||
lvm2,
|
||||
pbs-i18n,
|
||||
proxmox-backup-docs,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit (>= 2.2-4),
|
||||
pve-xtermjs (>= 4.7.0-1),
|
||||
smartmontools,
|
||||
${misc:Depends},
|
||||
${shlibs:Depends},
|
||||
Recommends: zfsutils-linux,
|
||||
Description: Proxmox Backup Server daemon with tools and GUI
|
||||
This package contains the Proxmox Backup Server daemons and related
|
||||
tools. This includes a web-based graphical user interface.
|
||||
|
||||
Package: proxmox-backup-client
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends}, ${shlibs:Depends}
|
||||
Description: Proxmox Backup Client tools
|
||||
This package contains the Proxmox Backup client, which provides a
|
||||
simple command line tool to create and restore backups.
|
||||
|
||||
Package: proxmox-backup-docs
|
||||
Build-Profiles: <!nodoc>
|
||||
Section: doc
|
||||
Depends: libjs-extjs,
|
||||
${misc:Depends},
|
||||
Architecture: all
|
||||
Description: Proxmox Backup Documentation
|
||||
This package contains the Proxmox Backup Documentation files.
|
2
debian/control.in
vendored
@ -4,9 +4,11 @@ Depends: fonts-font-awesome,
|
||||
libjs-extjs (>= 6.0.1),
|
||||
libzstd1 (>= 1.3.8),
|
||||
lvm2,
|
||||
pbs-i18n,
|
||||
proxmox-backup-docs,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit (>= 2.2-4),
|
||||
pve-xtermjs (>= 4.7.0-1),
|
||||
smartmontools,
|
||||
${misc:Depends},
|
||||
${shlibs:Depends},
|
||||
|
2
debian/lintian-overrides
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
proxmox-backup-server: package-installs-apt-sources etc/apt/sources.list.d/pbstest-beta.list
|
||||
proxmox-backup-server: systemd-service-file-refers-to-unusual-wantedby-target lib/systemd/system/proxmox-backup-banner.service getty.target
|
6
debian/postinst
vendored
@ -14,6 +14,12 @@ case "$1" in
|
||||
_dh_action=start
|
||||
fi
|
||||
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
||||
|
||||
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
||||
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
||||
echo "Fixing up termproxy user id in task log..."
|
||||
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active
|
||||
fi
|
||||
;;
|
||||
|
||||
abort-upgrade|abort-remove|abort-deconfigure)
|
||||
|
1
debian/proxmox-backup-docs.links
vendored
Normal file
@ -0,0 +1 @@
|
||||
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
|
1
debian/proxmox-backup-server.install
vendored
@ -1,6 +1,7 @@
|
||||
etc/proxmox-backup-proxy.service /lib/systemd/system/
|
||||
etc/proxmox-backup.service /lib/systemd/system/
|
||||
etc/proxmox-backup-banner.service /lib/systemd/system/
|
||||
etc/pbstest-beta.list /etc/apt/sources.list.d/
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
||||
|
@ -28,7 +28,6 @@ COMPILEDIR := ../target/debug
|
||||
SPHINXOPTS += -t devbuild
|
||||
endif
|
||||
|
||||
|
||||
# Sphinx internal variables.
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) .
|
||||
|
||||
@ -68,9 +67,17 @@ proxmox-backup-manager.1: proxmox-backup-manager/man1.rst proxmox-backup-manage
|
||||
proxmox-backup-proxy.1: proxmox-backup-proxy/man1.rst proxmox-backup-proxy/description.rst
|
||||
rst2man $< >$@
|
||||
|
||||
.PHONY: onlinehelpinfo
|
||||
onlinehelpinfo:
|
||||
@echo "Generating OnlineHelpInfo.js..."
|
||||
$(SPHINXBUILD) -b proxmox-scanrefs $(ALLSPHINXOPTS) $(BUILDDIR)/scanrefs
|
||||
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
|
||||
|
||||
.PHONY: html
|
||||
html: ${GENERATED_SYNOPSIS}
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
cp images/proxmox-logo.svg $(BUILDDIR)/html/_static/
|
||||
cp custom.css $(BUILDDIR)/html/_static/
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
|
133
docs/_ext/proxmox-scanrefs.py
Normal file
@ -0,0 +1,133 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# debugging stuff
|
||||
from pprint import pprint
|
||||
|
||||
from typing import cast
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
import os
|
||||
import io
|
||||
from docutils import nodes
|
||||
|
||||
from sphinx.builders import Builder
|
||||
from sphinx.util import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# refs are added in the following manner before the title of a section (note underscore and newline before title):
|
||||
# .. _my-label:
|
||||
#
|
||||
# Section to ref
|
||||
# --------------
|
||||
#
|
||||
#
|
||||
# then referred to like (note missing underscore):
|
||||
# "see :ref:`my-label`"
|
||||
#
|
||||
# the benefit of using this is if a label is explicitly set for a section,
|
||||
# we can refer to it with this anchor #my-label in the html,
|
||||
# even if the section name changes.
|
||||
#
|
||||
# see https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html#role-ref
|
||||
|
||||
def scan_extjs_files(wwwdir="../www"): # a bit rough i know, but we can optimize later
|
||||
js_files = []
|
||||
used_anchors = []
|
||||
logger.info("scanning extjs files for onlineHelp definitions")
|
||||
for root, dirs, files in os.walk("{}".format(wwwdir)):
|
||||
#print(root, dirs, files)
|
||||
for filename in files:
|
||||
if filename.endswith('.js'):
|
||||
js_files.append(os.path.join(root, filename))
|
||||
for js_file in js_files:
|
||||
fd = open(js_file).read()
|
||||
match = re.search("onlineHelp:\s*[\'\"](.*?)[\'\"]", fd) # match object is tuple
|
||||
if match:
|
||||
anchor = match.groups()[0]
|
||||
anchor = re.sub('_', '-', anchor) # normalize labels
|
||||
logger.info("found onlineHelp: {} in {}".format(anchor, js_file))
|
||||
used_anchors.append(anchor)
|
||||
return used_anchors
|
||||
|
||||
|
||||
def setup(app):
|
||||
logger.info('Mapping reference labels...')
|
||||
app.add_builder(ReflabelMapper)
|
||||
return {
|
||||
'version': '0.1',
|
||||
'parallel_read_safe': True,
|
||||
'parallel_write_safe': True,
|
||||
}
|
||||
|
||||
class ReflabelMapper(Builder):
|
||||
name = 'proxmox-scanrefs'
|
||||
|
||||
def init(self):
|
||||
self.docnames = []
|
||||
self.env.online_help = {}
|
||||
self.env.online_help['pbs_documentation_index'] = {
|
||||
'link': '/docs/index.html',
|
||||
'title': 'Proxmox Backup Server Documentation Index',
|
||||
}
|
||||
self.env.used_anchors = scan_extjs_files()
|
||||
|
||||
if not os.path.isdir(self.outdir):
|
||||
os.mkdir(self.outdir)
|
||||
|
||||
self.output_filename = os.path.join(self.outdir, 'OnlineHelpInfo.js')
|
||||
self.output = io.open(self.output_filename, 'w', encoding='UTF-8')
|
||||
|
||||
def write_doc(self, docname, doctree):
|
||||
for node in doctree.traverse(nodes.section):
|
||||
#pprint(vars(node))
|
||||
|
||||
if hasattr(node, 'expect_referenced_by_id') and len(node['ids']) > 1: # explicit labels
|
||||
filename = self.env.doc2path(docname)
|
||||
filename_html = re.sub('.rst', '.html', filename)
|
||||
labelid = node['ids'][1] # [0] is predefined by sphinx, we need [1] for explicit ones
|
||||
title = cast(nodes.title, node[0])
|
||||
logger.info('traversing section {}'.format(title.astext()))
|
||||
ref_name = getattr(title, 'rawsource', title.astext())
|
||||
|
||||
self.env.online_help[labelid] = {'link': '', 'title': ''}
|
||||
self.env.online_help[labelid]['link'] = "/docs/" + os.path.basename(filename_html) + "#{}".format(labelid)
|
||||
self.env.online_help[labelid]['title'] = ref_name
|
||||
|
||||
return
|
||||
|
||||
|
||||
def get_outdated_docs(self):
|
||||
return 'all documents'
|
||||
|
||||
def prepare_writing(self, docnames):
|
||||
return
|
||||
|
||||
def get_target_uri(self, docname, typ=None):
|
||||
return ''
|
||||
|
||||
def validate_anchors(self):
|
||||
#pprint(self.env.online_help)
|
||||
to_remove = []
|
||||
for anchor in self.env.used_anchors:
|
||||
if anchor not in self.env.online_help:
|
||||
logger.info("[-] anchor {} is missing from onlinehelp!".format(anchor))
|
||||
for anchor in self.env.online_help:
|
||||
if anchor not in self.env.used_anchors and anchor != 'pbs_documentation_index':
|
||||
logger.info("[*] anchor {} not used! deleting...".format(anchor))
|
||||
to_remove.append(anchor)
|
||||
for anchor in to_remove:
|
||||
self.env.online_help.pop(anchor, None)
|
||||
return
|
||||
|
||||
def finish(self):
|
||||
# generate OnlineHelpInfo.js output
|
||||
self.validate_anchors()
|
||||
|
||||
self.output.write("const proxmoxOnlineHelpInfo = ")
|
||||
self.output.write(json.dumps(self.env.online_help, indent=2))
|
||||
self.output.write(";\n")
|
||||
self.output.close()
|
||||
return
|
100
docs/calendarevents.rst
Normal file
@ -0,0 +1,100 @@
|
||||
|
||||
.. _calendar-events:
|
||||
|
||||
Calendar Events
|
||||
===============
|
||||
|
||||
Introduction and Format
|
||||
-----------------------
|
||||
|
||||
Certain tasks, for example pruning and garbage collection, need to be
|
||||
performed on a regular basis. Proxmox Backup Server uses a format inspired
|
||||
by the systemd Time and Date Specification (see `systemd.time manpage`_)
|
||||
called `calendar events` for its schedules.
|
||||
|
||||
`Calendar events` are expressions to specify one or more points in time.
|
||||
They are mostly compatible with systemds calendar events.
|
||||
|
||||
The general format is as follows:
|
||||
|
||||
.. code-block:: console
|
||||
:caption: Calendar event
|
||||
|
||||
[WEEKDAY] [[YEARS-]MONTHS-DAYS] [HOURS:MINUTES[:SECONDS]]
|
||||
|
||||
Note that there either has to be at least a weekday, date or time part.
|
||||
If the weekday or date part is omitted, all (week)days are included.
|
||||
If the time part is omitted, the time 00:00:00 is implied.
|
||||
(e.g. '2020-01-01' refers to '2020-01-01 00:00:00')
|
||||
|
||||
Weekdays are specified with the abbreviated english version:
|
||||
`mon, tue, wed, thu, fri, sat, sun`.
|
||||
|
||||
Each field can contain multiple values in the following formats:
|
||||
|
||||
* comma-separated: e.g., 01,02,03
|
||||
* as a range: e.g., 01..10
|
||||
* as a repetition: e.g, 05/10 (means starting at 5 every 10)
|
||||
* and a combination of the above: e.g., 01,05..10,12/02
|
||||
* or a `*` for every possible value: e.g., \*:00
|
||||
|
||||
There are some special values that have specific meaning:
|
||||
|
||||
================================= ==============================
|
||||
Value Syntax
|
||||
================================= ==============================
|
||||
`minutely` `*-*-* *:*:00`
|
||||
`hourly` `*-*-* *:00:00`
|
||||
`daily` `*-*-* 00:00:00`
|
||||
`weekly` `mon *-*-* 00:00:00`
|
||||
`monthly` `*-*-01 00:00:00`
|
||||
`yearly` or `annualy` `*-01-01 00:00:00`
|
||||
`quarterly` `*-01,04,07,10-01 00:00:00`
|
||||
`semiannually` or `semi-annually` `*-01,07-01 00:00:00`
|
||||
================================= ==============================
|
||||
|
||||
|
||||
Here is a table with some useful examples:
|
||||
|
||||
======================== ============================= ===================================
|
||||
Example Alternative Explanation
|
||||
======================== ============================= ===================================
|
||||
`mon,tue,wed,thu,fri` `mon..fri` Every working day at 00:00
|
||||
`sat,sun` `sat..sun` Only on weekends at 00:00
|
||||
`mon,wed,fri` -- Monday, Wednesday, Friday at 00:00
|
||||
`12:05` -- Every day at 12:05 PM
|
||||
`*:00/5` `0/1:0/5` Every five minutes
|
||||
`mon..wed *:30/10` `mon,tue,wed *:30/10` Monday, Tuesday, Wednesday 30, 40 and 50 minutes after every full hour
|
||||
`mon..fri 8..17,22:0/15` -- Every working day every 15 minutes between 8 AM and 6 PM and between 10 PM and 11 PM
|
||||
`fri 12..13:5/20` `fri 12,13:5/20` Friday at 12:05, 12:25, 12:45, 13:05, 13:25 and 13:45
|
||||
`12,14,16,18,20,22:5` `12/2:5` Every day starting at 12:05 until 22:05, every 2 hours
|
||||
`*:*` `0/1:0/1` Every minute (minimum interval)
|
||||
`*-05` -- On the 5th day of every Month
|
||||
`Sat *-1..7 15:00` -- First Saturday each Month at 15:00
|
||||
`2015-10-21` -- 21st October 2015 at 00:00
|
||||
======================== ============================= ===================================
|
||||
|
||||
|
||||
Differences to systemd
|
||||
----------------------
|
||||
|
||||
Not all features of systemd calendar events are implemented:
|
||||
|
||||
* no unix timestamps (e.g. `@12345`): instead use date and time to specify
|
||||
a specific point in time
|
||||
* no timezone: all schedules use the set timezone on the server
|
||||
* no sub-second resolution
|
||||
* no reverse day syntax (e.g. 2020-03~01)
|
||||
* no repetition of ranges (e.g. 1..10/2)
|
||||
|
||||
Notes on scheduling
|
||||
-------------------
|
||||
|
||||
In `Proxmox Backup`_ scheduling for most tasks is done in the
|
||||
`proxmox-backup-proxy`. This daemon checks all job schedules
|
||||
if they are due every minute. This means that even if
|
||||
`calendar events` can contain seconds, it will only be checked
|
||||
once a minute.
|
||||
|
||||
Also, all schedules will be checked against the timezone set
|
||||
in the `Proxmox Backup`_ server.
|
40
docs/conf.py
@ -18,9 +18,12 @@
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#
|
||||
import os
|
||||
# import sys
|
||||
import sys
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# custom extensions
|
||||
sys.path.append(os.path.abspath("./_ext"))
|
||||
|
||||
# -- Implement custom formatter for code-blocks ---------------------------
|
||||
#
|
||||
# * use smaller font
|
||||
@ -46,7 +49,7 @@ PygmentsBridge.latex_formatter = CustomLatexFormatter
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
|
||||
extensions = ["sphinx.ext.graphviz", "sphinx.ext.todo"]
|
||||
extensions = ["sphinx.ext.graphviz", "sphinx.ext.todo", "proxmox-scanrefs"]
|
||||
|
||||
todo_link_only = True
|
||||
|
||||
@ -71,7 +74,7 @@ rst_epilog = epilog_file.read()
|
||||
|
||||
# General information about the project.
|
||||
project = 'Proxmox Backup'
|
||||
copyright = '2019-2020, Proxmox Support Team'
|
||||
copyright = '2019-2020, Proxmox Server Solutions GmbH'
|
||||
author = 'Proxmox Support Team'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
@ -112,7 +115,7 @@ exclude_patterns = [
|
||||
'pxar/man1.rst',
|
||||
'epilog.rst',
|
||||
'pbs-copyright.rst',
|
||||
'sysadmin.rst',
|
||||
'local-zfs.rst'
|
||||
'package-repositories.rst',
|
||||
]
|
||||
|
||||
@ -145,7 +148,7 @@ pygments_style = 'sphinx'
|
||||
# keep_warnings = False
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = True
|
||||
todo_include_todos = not tags.has('release')
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
@ -153,13 +156,32 @@ todo_include_todos = True
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
html_theme = 'sphinxdoc'
|
||||
html_theme = 'alabaster'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#
|
||||
# html_theme_options = {}
|
||||
html_theme_options = {
|
||||
'fixed_sidebar': True,
|
||||
#'sidebar_includehidden': False,
|
||||
'sidebar_collapse': False, # FIXME: documented, but does not works?!
|
||||
'show_relbar_bottom': True, # FIXME: documented, but does not works?!
|
||||
'show_powered_by': False,
|
||||
|
||||
'logo': 'proxmox-logo.svg',
|
||||
'logo_name': True, # show project name below logo
|
||||
#'logo_text_align': 'center',
|
||||
#'description': 'Fast, Secure & Efficient.',
|
||||
|
||||
'sidebar_width': '300px',
|
||||
'page_width': '1280px',
|
||||
# font styles
|
||||
'head_font_family': 'Lato, sans-serif',
|
||||
'caption_font_family': 'Lato, sans-serif',
|
||||
'caption_font_size': '20px',
|
||||
'font_family': 'Open Sans, sans-serif',
|
||||
}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
# html_theme_path = []
|
||||
@ -176,7 +198,7 @@ html_theme = 'sphinxdoc'
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#
|
||||
html_logo = 'images/proxmox-logo.svg'
|
||||
#html_logo = 'images/proxmox-logo.svg' # replaced by html_theme_options.logo
|
||||
|
||||
# The name of an image file (relative to this directory) to use as a favicon of
|
||||
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
@ -229,7 +251,7 @@ html_static_path = ['_static']
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#
|
||||
# html_show_sourcelink = True
|
||||
html_show_sourcelink = False
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#
|
||||
|
15
docs/custom.css
Normal file
@ -0,0 +1,15 @@
|
||||
div.sphinxsidebar {
|
||||
height: calc(100% - 20px);
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
h1.logo-name {
|
||||
font-size: 24px;
|
||||
}
|
||||
|
||||
div.body img {
|
||||
width: 250px;
|
||||
}
|
||||
pre {
|
||||
padding: 5px 10px;
|
||||
}
|
@ -11,8 +11,11 @@
|
||||
.. _Container: https://en.wikipedia.org/wiki/Container_(virtualization)
|
||||
.. _Zstandard: https://en.wikipedia.org/wiki/Zstandard
|
||||
.. _Proxmox: https://www.proxmox.com
|
||||
.. _Proxmox Community Forum: https://forum.proxmox.com
|
||||
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
|
||||
.. _Proxmox Backup: https://www.proxmox.com/proxmox-backup
|
||||
// FIXME
|
||||
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page
|
||||
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
|
||||
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
|
||||
.. _Rust: https://www.rust-lang.org/
|
||||
.. _SHA-256: https://en.wikipedia.org/wiki/SHA-2
|
||||
@ -35,3 +38,6 @@
|
||||
.. _RFC3399: https://tools.ietf.org/html/rfc3339
|
||||
.. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time
|
||||
.. _ISO Week date: https://en.wikipedia.org/wiki/ISO_week_date
|
||||
|
||||
.. _systemd.time manpage: https://manpages.debian.org/buster/systemd/systemd.time.7.en.html
|
||||
|
||||
|
71
docs/faq.rst
Normal file
@ -0,0 +1,71 @@
|
||||
FAQ
|
||||
===
|
||||
|
||||
What distribution is Proxmox Backup Server (PBS) based on?
|
||||
----------------------------------------------------------
|
||||
|
||||
Proxmox Backup Server is based on `Debian GNU/Linux <https://www.debian.org/>`_.
|
||||
|
||||
|
||||
Which platforms are supported as a backup source (client)?
|
||||
----------------------------------------------------------
|
||||
|
||||
The client tool works on most modern Linux systems, meaning you are not limited
|
||||
to Debian-based backups.
|
||||
|
||||
|
||||
Will Proxmox Backup Server run on a 32-bit processor?
|
||||
-----------------------------------------------------
|
||||
|
||||
Proxmox Backup Server only supports 64-bit CPUs (AMD or Intel). There are no
|
||||
future plans to support 32-bit processors.
|
||||
|
||||
|
||||
How long will my Proxmox Backup Server version be supported?
|
||||
------------------------------------------------------------
|
||||
|
||||
+-----------------------+--------------------+---------------+------------+--------------------+
|
||||
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
|
||||
+=======================+====================+===============+============+====================+
|
||||
|Proxmox Backup 1.x | Debian 10 (Buster) | tba | tba | tba |
|
||||
+-----------------------+--------------------+---------------+------------+--------------------+
|
||||
|
||||
|
||||
Can I copy or synchronize my datastore to another location?
|
||||
-----------------------------------------------------------
|
||||
|
||||
Proxmox Backup Server allows you to copy or synchroize datastores to other
|
||||
locations, through the use of *Remotes* and *Sync Jobs*. *Remote* is the term
|
||||
given to a separate server, which has a datastore that can be synced to a local store.
|
||||
A *Sync Job* is the process which is used to pull the contents of a datastore from
|
||||
a *Remote* to a local datastore.
|
||||
|
||||
|
||||
Can Proxmox Backup Server verify data integrity of a backup archive?
|
||||
--------------------------------------------------------------------
|
||||
|
||||
Proxmox Backup Server uses a built-in SHA-256 checksum algorithm, to ensure
|
||||
data integrity. Within each backup, a manifest file (index.json) is created,
|
||||
which contains a list of all the backup files, along with their sizes and
|
||||
checksums. This manifest file is used to verify the integrity of each backup.
|
||||
|
||||
|
||||
When backing up to remote servers, do I have to trust the remote server?
|
||||
------------------------------------------------------------------------
|
||||
|
||||
Proxmox Backup Server supports client-side encryption, meaning your data is
|
||||
encrypted before it reaches the server. Thus, in the event that an attacker
|
||||
gains access to the server, they will not be able to read the data.
|
||||
|
||||
.. note:: Encryption is not enabled by default. To set up encryption, see the
|
||||
`Encryption
|
||||
<https://pbs.proxmox.com/docs/administration-guide.html#encryption>`_ section
|
||||
of the Proxmox Backup Server Administration Guide.
|
||||
|
||||
|
||||
Is the backup incremental/deduplicated?
|
||||
---------------------------------------
|
||||
|
||||
With Proxmox Backup Server, backups are sent incremental and data is
|
||||
deduplicated on the server.
|
||||
This minimizes both the storage consumed and the network impact.
|
@ -16,7 +16,7 @@ Glossary
|
||||
Datastore
|
||||
|
||||
A place to store backups. A directory which contains the backup data.
|
||||
The current implemenation is file-system based.
|
||||
The current implementation is file-system based.
|
||||
|
||||
`Rust`_
|
||||
|
||||
@ -46,3 +46,8 @@ Glossary
|
||||
kernel driver handles filesystem requests and sends them to a
|
||||
userspace application.
|
||||
|
||||
Remote
|
||||
|
||||
A remote Proxmox Backup Server installation and credentials for a user on it.
|
||||
You can pull datastores from a remote to a local datastore in order to
|
||||
have redundant backups.
|
||||
|
BIN
docs/images/screenshots/pbs-gui-datastore-create-general.png
Normal file
After Width: | Height: | Size: 18 KiB |
BIN
docs/images/screenshots/pbs-gui-datastore.png
Normal file
After Width: | Height: | Size: 60 KiB |
BIN
docs/images/screenshots/pbs-gui-disks-dir-create.png
Normal file
After Width: | Height: | Size: 12 KiB |
BIN
docs/images/screenshots/pbs-gui-disks-zfs-create.png
Normal file
After Width: | Height: | Size: 43 KiB |
BIN
docs/images/screenshots/pbs-gui-disks.png
Normal file
After Width: | Height: | Size: 79 KiB |
BIN
docs/images/screenshots/pbs-gui-network-create-bond.png
Normal file
After Width: | Height: | Size: 26 KiB |
BIN
docs/images/screenshots/pbs-gui-permissions-add.png
Normal file
After Width: | Height: | Size: 14 KiB |
BIN
docs/images/screenshots/pbs-gui-remote-add.png
Normal file
After Width: | Height: | Size: 20 KiB |
BIN
docs/images/screenshots/pbs-gui-syncjob-add.png
Normal file
After Width: | Height: | Size: 21 KiB |
BIN
docs/images/screenshots/pbs-gui-user-management-add-user.png
Normal file
After Width: | Height: | Size: 18 KiB |
BIN
docs/images/screenshots/pbs-gui-user-management.png
Normal file
After Width: | Height: | Size: 54 KiB |
@ -12,6 +12,10 @@ Front-Cover Texts, and no Back-Cover Texts. A copy of the license is included
|
||||
in the section entitled "GNU Free Documentation License".
|
||||
|
||||
|
||||
.. only:: html
|
||||
|
||||
A `PDF` version of the documentation is `also available here <./proxmox-backup.pdf>`_
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 3
|
||||
:caption: Table of Contents
|
||||
@ -19,6 +23,8 @@ in the section entitled "GNU Free Documentation License".
|
||||
introduction.rst
|
||||
installation.rst
|
||||
administration-guide.rst
|
||||
sysadmin.rst
|
||||
faq.rst
|
||||
|
||||
.. raw:: latex
|
||||
|
||||
@ -31,6 +37,7 @@ in the section entitled "GNU Free Documentation License".
|
||||
command-syntax.rst
|
||||
file-formats.rst
|
||||
backup-protocol.rst
|
||||
calendarevents.rst
|
||||
glossary.rst
|
||||
GFDL.rst
|
||||
|
||||
@ -44,4 +51,3 @@ in the section entitled "GNU Free Documentation License".
|
||||
|
||||
|
||||
* :ref:`genindex`
|
||||
|
||||
|
@ -19,9 +19,9 @@ for various management tasks such as disk management.
|
||||
The disk image (ISO file) provided by Proxmox includes a complete Debian system
|
||||
("buster" for version 1.x) as well as all necessary packages for the `Proxmox Backup`_ server.
|
||||
|
||||
The installer will guide you through the setup process and allows
|
||||
The installer will guide you through the setup process and allow
|
||||
you to partition the local disk(s), apply basic system configurations
|
||||
(e.g. timezone, language, network), and installs all required packages.
|
||||
(e.g. timezone, language, network), and install all required packages.
|
||||
The provided ISO will get you started in just a few minutes, and is the
|
||||
recommended method for new and existing users.
|
||||
|
||||
@ -36,11 +36,11 @@ It includes the following:
|
||||
|
||||
* The `Proxmox Backup`_ server installer, which partitions the local
|
||||
disk(s) with ext4, ext3, xfs or ZFS, and installs the operating
|
||||
system.
|
||||
system
|
||||
|
||||
* Complete operating system (Debian Linux, 64-bit)
|
||||
|
||||
* Our Linux kernel with ZFS support.
|
||||
* Our Linux kernel with ZFS support
|
||||
|
||||
* Complete tool-set to administer backups and all necessary resources
|
||||
|
||||
@ -54,7 +54,7 @@ Install `Proxmox Backup`_ server on Debian
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Proxmox ships as a set of Debian packages which can be installed on top of a
|
||||
standard Debian installation. After configuring the
|
||||
standard Debian installation. After configuring the
|
||||
:ref:`sysadmin_package_repositories`, you need to run:
|
||||
|
||||
.. code-block:: console
|
||||
@ -76,12 +76,15 @@ does, please use the following:
|
||||
This will install all required packages, the Proxmox kernel with ZFS_
|
||||
support, and a set of common and useful packages.
|
||||
|
||||
Installing `Proxmox Backup`_ on top of an existing Debian_ installation looks easy, but
|
||||
it presumes that the base system and local storage has been set up correctly.
|
||||
.. caution:: Installing `Proxmox Backup`_ on top of an existing Debian_
|
||||
installation looks easy, but it assumes that the base system and local
|
||||
storage have been set up correctly. In general this is not trivial, especially
|
||||
when LVM_ or ZFS_ is used. The network configuration is completely up to you
|
||||
as well.
|
||||
|
||||
In general this is not trivial, especially when LVM_ or ZFS_ is used.
|
||||
|
||||
The network configuration is completely up to you as well.
|
||||
.. note:: You can access the webinterface of the Proxmox Backup Server with
|
||||
your web browser, using HTTPS on port 8007. For example at
|
||||
``https://<ip-or-dns-name>:8007``
|
||||
|
||||
Install Proxmox Backup server on `Proxmox VE`_
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -99,6 +102,10 @@ After configuring the
|
||||
server to store backups. Should the hypervisor server fail, you can
|
||||
still access the backups.
|
||||
|
||||
.. note::
|
||||
You can access the webinterface of the Proxmox Backup Server with your web
|
||||
browser, using HTTPS on port 8007. For example at ``https://<ip-or-dns-name>:8007``
|
||||
|
||||
Client installation
|
||||
-------------------
|
||||
|
||||
|
@ -1,33 +1,29 @@
|
||||
Introduction
|
||||
============
|
||||
|
||||
This documentation is written in :term:`reStructuredText` and formatted with
|
||||
:term:`Sphinx`.
|
||||
|
||||
|
||||
What is Proxmox Backup Server
|
||||
-----------------------------
|
||||
|
||||
Proxmox Backup Server is an enterprise-class client-server backup software that
|
||||
backups :term:`virtual machine`\ s, :term:`container`\ s, and physical hosts.
|
||||
It is specially optimized for the `Proxmox Virtual Environment`_ platform and
|
||||
allows you to backup your data securely, even between remote sites, providing
|
||||
easy management with a web-based user interface.
|
||||
Proxmox Backup Server is an enterprise-class, client-server backup software
|
||||
package that backs up :term:`virtual machine`\ s, :term:`container`\ s, and
|
||||
physical hosts. It is specially optimized for the `Proxmox Virtual Environment`_
|
||||
platform and allows you to back up your data securely, even between remote
|
||||
sites, providing easy management with a web-based user interface.
|
||||
|
||||
Proxmox Backup Server supports deduplication, compression, and authenticated
|
||||
encryption (AE_). Using :term:`Rust` as implementation language guarantees high
|
||||
performance, low resource usage, and a safe, high quality code base.
|
||||
encryption (AE_). Using :term:`Rust` as the implementation language guarantees high
|
||||
performance, low resource usage, and a safe, high-quality codebase.
|
||||
|
||||
It features strong encryption done on the client side. Thus, it's possible to
|
||||
backup data to not fully trusted targets.
|
||||
It features strong client-side encryption. Thus, it's possible to
|
||||
backup data to targets that are not fully trusted.
|
||||
|
||||
|
||||
Architecture
|
||||
------------
|
||||
|
||||
Proxmox Backup Server uses a `client-server model`_. The server stores the
|
||||
backup data and provides an API to create backups and restore data. With the
|
||||
API it's also possible to manage disks and other server side resources.
|
||||
backup data and provides an API to create and manage datastores. With the
|
||||
API, it's also possible to manage disks and other server-side resources.
|
||||
|
||||
The backup client uses this API to access the backed up data. With the command
|
||||
line tool ``proxmox-backup-client`` you can create backups and restore data.
|
||||
@ -36,7 +32,7 @@ For QEMU_ with `Proxmox Virtual Environment`_ we deliver an integrated client.
|
||||
A single backup is allowed to contain several archives. For example, when you
|
||||
backup a :term:`virtual machine`, each disk is stored as a separate archive
|
||||
inside that backup. The VM configuration itself is stored as an extra file.
|
||||
This way, it is easy to access and restore only important parts of the backup
|
||||
This way, it's easy to access and restore only important parts of the backup,
|
||||
without the need to scan the whole backup.
|
||||
|
||||
|
||||
@ -48,46 +44,47 @@ Main Features
|
||||
:term:`container`\ s.
|
||||
|
||||
:Performance: The whole software stack is written in :term:`Rust`,
|
||||
to provide high speed and memory efficiency.
|
||||
in order to provide high speed and memory efficiency.
|
||||
|
||||
:Deduplication: Periodic backups produce large amounts of duplicate
|
||||
data. The deduplication layer avoids redundancy and minimizes the used
|
||||
storage space.
|
||||
data. The deduplication layer avoids redundancy and minimizes the storage
|
||||
space used.
|
||||
|
||||
:Incremental backups: Changes between backups are typically low. Reading and
|
||||
sending only the delta reduces storage and network impact of backups.
|
||||
sending only the delta reduces the storage and network impact of backups.
|
||||
|
||||
:Data Integrity: The built in `SHA-256`_ checksum algorithm assures the
|
||||
accuracy and consistency of your backups.
|
||||
:Data Integrity: The built-in `SHA-256`_ checksum algorithm ensures accuracy and
|
||||
consistency in your backups.
|
||||
|
||||
:Remote Sync: It is possible to efficiently synchronize data to remote
|
||||
sites. Only deltas containing new data are transferred.
|
||||
|
||||
:Compression: The ultra fast Zstandard_ compression is able to compress
|
||||
:Compression: The ultra-fast Zstandard_ compression is able to compress
|
||||
several gigabytes of data per second.
|
||||
|
||||
:Encryption: Backups can be encrypted on the client-side using AES-256 in
|
||||
GCM_ mode. This authenticated encryption mode (AE_) provides very
|
||||
high performance on modern hardware.
|
||||
:Encryption: Backups can be encrypted on the client-side, using AES-256 in
|
||||
Galois/Counter Mode (GCM_) mode. This authenticated encryption (AE_) mode
|
||||
provides very high performance on modern hardware.
|
||||
|
||||
:Web interface: Manage Proxmox backups with the integrated web-based user
|
||||
interface.
|
||||
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
||||
user interface.
|
||||
|
||||
:Open Source: No secrets. Proxmox Backup Server is free and open-source
|
||||
software. The source code is licensed under AGPL, v3.
|
||||
software. The source code is licensed under AGPL, v3.
|
||||
|
||||
:Support: Enterprise support is available from `Proxmox`_.
|
||||
:Support: Enterprise support will be available from `Proxmox`_ once the beta
|
||||
phase is over.
|
||||
|
||||
|
||||
Reasons for Data Backup?
|
||||
------------------------
|
||||
|
||||
The main purpose of a backup is to protect against data loss. Data loss can be
|
||||
caused by faulty hardware but also by human error.
|
||||
caused by both faulty hardware and human error.
|
||||
|
||||
A common mistake is to accidentally delete a file or folder which is still
|
||||
required. Virtualization can even amplify this problem; it easily happens that
|
||||
a whole virtual machine is deleted by just pressing a single button.
|
||||
required. Virtualization can even amplify this problem, as deleting a whole
|
||||
virtual machine can be as easy as pressing a single button.
|
||||
|
||||
For administrators, backups can serve as a useful toolkit for temporarily
|
||||
storing data. For example, it is common practice to perform full backups before
|
||||
@ -105,8 +102,57 @@ Therefore, ensure that you perform regular backups and run restore tests.
|
||||
Software Stack
|
||||
--------------
|
||||
|
||||
.. todo:: Eplain why we use Rust (and Flutter)
|
||||
|
||||
Proxmox Backup Server consists of multiple components:
|
||||
|
||||
* A server-daemon providing, among other things, a RESTfull API, super-fast
|
||||
asynchronous tasks, lightweight usage statistic collection, scheduling
|
||||
events, strict separation of privileged and unprivileged execution
|
||||
environments
|
||||
* A JavaScript management web interface
|
||||
* A management CLI tool for the server (`proxmox-backup-manager`)
|
||||
* A client CLI tool (`proxmox-backup-client`) to access the server easily from
|
||||
any `Linux amd64` environment
|
||||
|
||||
Aside from the web interface, everything is written in the Rust programming
|
||||
language.
|
||||
|
||||
"The Rust programming language helps you write faster, more reliable software.
|
||||
High-level ergonomics and low-level control are often at odds in programming
|
||||
language design; Rust challenges that conflict. Through balancing powerful
|
||||
technical capacity and a great developer experience, Rust gives you the option
|
||||
to control low-level details (such as memory usage) without all the hassle
|
||||
traditionally associated with such control."
|
||||
|
||||
-- `The Rust Programming Language <https://doc.rust-lang.org/book/ch00-00-introduction.html>`_
|
||||
|
||||
.. todo:: further explain the software stack
|
||||
|
||||
Getting Help
|
||||
------------
|
||||
|
||||
Community Support Forum
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
We always encourage our users to discuss and share their knowledge using the
|
||||
`Proxmox Community Forum`_. The forum is moderated by the Proxmox support team.
|
||||
The large user base is spread out all over the world. Needless to say that such
|
||||
a large forum is a great place to get information.
|
||||
|
||||
Mailing Lists
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Proxmox Backup Server is fully open-source and contributions are welcome! Here
|
||||
is the primary communication channel for developers:
|
||||
|
||||
:Mailing list for developers: `PBS Development List`_
|
||||
|
||||
Bug Tracker
|
||||
~~~~~~~~~~~
|
||||
|
||||
Proxmox runs a public bug tracker at `<https://bugzilla.proxmox.com>`_. If an
|
||||
issue appears, file your report there. An issue can be a bug as well as a
|
||||
request for a new feature or enhancement. The bug tracker helps to keep track
|
||||
of the issue and will send a notification once it has been solved.
|
||||
|
||||
License
|
||||
-------
|
||||
|
403
docs/local-zfs.rst
Normal file
@ -0,0 +1,403 @@
|
||||
|
||||
.. _chapter-zfs:
|
||||
|
||||
ZFS on Linux
|
||||
------------
|
||||
|
||||
ZFS is a combined file system and logical volume manager designed by
|
||||
Sun Microsystems. There is no need to manually compile ZFS modules - all
|
||||
packages are included.
|
||||
|
||||
By using ZFS, it's possible to achieve maximum enterprise features with
|
||||
low budget hardware, but also high performance systems by leveraging
|
||||
SSD caching or even SSD only setups. ZFS can replace cost intense
|
||||
hardware raid cards by moderate CPU and memory load combined with easy
|
||||
management.
|
||||
|
||||
General ZFS advantages
|
||||
|
||||
* Easy configuration and management with GUI and CLI.
|
||||
* Reliable
|
||||
* Protection against data corruption
|
||||
* Data compression on file system level
|
||||
* Snapshots
|
||||
* Copy-on-write clone
|
||||
* Various raid levels: RAID0, RAID1, RAID10, RAIDZ-1, RAIDZ-2 and RAIDZ-3
|
||||
* Can use SSD for cache
|
||||
* Self healing
|
||||
* Continuous integrity checking
|
||||
* Designed for high storage capacities
|
||||
* Asynchronous replication over network
|
||||
* Open Source
|
||||
* Encryption
|
||||
|
||||
Hardware
|
||||
~~~~~~~~~
|
||||
|
||||
ZFS depends heavily on memory, so you need at least 8GB to start. In
|
||||
practice, use as much you can get for your hardware/budget. To prevent
|
||||
data corruption, we recommend the use of high quality ECC RAM.
|
||||
|
||||
If you use a dedicated cache and/or log disk, you should use an
|
||||
enterprise class SSD (e.g. Intel SSD DC S3700 Series). This can
|
||||
increase the overall performance significantly.
|
||||
|
||||
IMPORTANT: Do not use ZFS on top of hardware controller which has its
|
||||
own cache management. ZFS needs to directly communicate with disks. An
|
||||
HBA adapter is the way to go, or something like LSI controller flashed
|
||||
in ``IT`` mode.
|
||||
|
||||
|
||||
ZFS Administration
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This section gives you some usage examples for common tasks. ZFS
|
||||
itself is really powerful and provides many options. The main commands
|
||||
to manage ZFS are `zfs` and `zpool`. Both commands come with great
|
||||
manual pages, which can be read with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# man zpool
|
||||
# man zfs
|
||||
|
||||
Create a new zpool
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To create a new pool, at least one disk is needed. The `ashift` should
|
||||
have the same sector-size (2 power of `ashift`) or larger as the
|
||||
underlying disk.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> <device>
|
||||
|
||||
Create a new pool with RAID-0
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Minimum 1 disk
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> <device1> <device2>
|
||||
|
||||
Create a new pool with RAID-1
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Minimum 2 disks
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> mirror <device1> <device2>
|
||||
|
||||
Create a new pool with RAID-10
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Minimum 4 disks
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> mirror <device1> <device2> mirror <device3> <device4>
|
||||
|
||||
Create a new pool with RAIDZ-1
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Minimum 3 disks
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> raidz1 <device1> <device2> <device3>
|
||||
|
||||
Create a new pool with RAIDZ-2
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Minimum 4 disks
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> raidz2 <device1> <device2> <device3> <device4>
|
||||
|
||||
Create a new pool with cache (L2ARC)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
It is possible to use a dedicated cache drive partition to increase
|
||||
the performance (use SSD).
|
||||
|
||||
As `<device>` it is possible to use more devices, like it's shown in
|
||||
"Create a new pool with RAID*".
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> <device> cache <cache_device>
|
||||
|
||||
Create a new pool with log (ZIL)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
It is possible to use a dedicated cache drive partition to increase
|
||||
the performance (SSD).
|
||||
|
||||
As `<device>` it is possible to use more devices, like it's shown in
|
||||
"Create a new pool with RAID*".
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> <device> log <log_device>
|
||||
|
||||
Add cache and log to an existing pool
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you have a pool without cache and log. First partition the SSD in
|
||||
2 partition with `parted` or `gdisk`
|
||||
|
||||
.. important:: Always use GPT partition tables.
|
||||
|
||||
The maximum size of a log device should be about half the size of
|
||||
physical memory, so this is usually quite small. The rest of the SSD
|
||||
can be used as cache.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool add -f <pool> log <device-part1> cache <device-part2>
|
||||
|
||||
|
||||
Changing a failed device
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool replace -f <pool> <old device> <new device>
|
||||
|
||||
|
||||
Changing a failed bootable device
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Depending on how Proxmox Backup was installed it is either using `grub` or `systemd-boot`
|
||||
as bootloader.
|
||||
|
||||
The first steps of copying the partition table, reissuing GUIDs and replacing
|
||||
the ZFS partition are the same. To make the system bootable from the new disk,
|
||||
different steps are needed which depend on the bootloader in use.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# sgdisk <healthy bootable device> -R <new device>
|
||||
# sgdisk -G <new device>
|
||||
# zpool replace -f <pool> <old zfs partition> <new zfs partition>
|
||||
|
||||
.. NOTE:: Use the `zpool status -v` command to monitor how far the resilvering process of the new disk has progressed.
|
||||
|
||||
With `systemd-boot`:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pve-efiboot-tool format <new disk's ESP>
|
||||
# pve-efiboot-tool init <new disk's ESP>
|
||||
|
||||
.. NOTE:: `ESP` stands for EFI System Partition, which is setup as partition #2 on
|
||||
bootable disks setup by the {pve} installer since version 5.4. For details, see
|
||||
xref:sysboot_systemd_boot_setup[Setting up a new partition for use as synced ESP].
|
||||
|
||||
With `grub`:
|
||||
|
||||
Usually `grub.cfg` is located in `/boot/grub/grub.cfg`
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# grub-install <new disk>
|
||||
# grub-mkconfig -o /path/to/grub.cfg
|
||||
|
||||
|
||||
Activate E-Mail Notification
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
ZFS comes with an event daemon, which monitors events generated by the
|
||||
ZFS kernel module. The daemon can also send emails on ZFS events like
|
||||
pool errors. Newer ZFS packages ship the daemon in a separate package,
|
||||
and you can install it using `apt-get`:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# apt-get install zfs-zed
|
||||
|
||||
To activate the daemon it is necessary to edit `/etc/zfs/zed.d/zed.rc` with your
|
||||
favourite editor, and uncomment the `ZED_EMAIL_ADDR` setting:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
ZED_EMAIL_ADDR="root"
|
||||
|
||||
Please note Proxmox Backup forwards mails to `root` to the email address
|
||||
configured for the root user.
|
||||
|
||||
IMPORTANT: The only setting that is required is `ZED_EMAIL_ADDR`. All
|
||||
other settings are optional.
|
||||
|
||||
Limit ZFS Memory Usage
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
It is good to use at most 50 percent (which is the default) of the
|
||||
system memory for ZFS ARC to prevent performance shortage of the
|
||||
host. Use your preferred editor to change the configuration in
|
||||
`/etc/modprobe.d/zfs.conf` and insert:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
options zfs zfs_arc_max=8589934592
|
||||
|
||||
This example setting limits the usage to 8GB.
|
||||
|
||||
.. IMPORTANT:: If your root file system is ZFS you must update your initramfs every time this value changes:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# update-initramfs -u
|
||||
|
||||
|
||||
SWAP on ZFS
|
||||
^^^^^^^^^^^
|
||||
|
||||
Swap-space created on a zvol may generate some troubles, like blocking the
|
||||
server or generating a high IO load, often seen when starting a Backup
|
||||
to an external Storage.
|
||||
|
||||
We strongly recommend to use enough memory, so that you normally do not
|
||||
run into low memory situations. Should you need or want to add swap, it is
|
||||
preferred to create a partition on a physical disk and use it as swapdevice.
|
||||
You can leave some space free for this purpose in the advanced options of the
|
||||
installer. Additionally, you can lower the `swappiness` value.
|
||||
A good value for servers is 10:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# sysctl -w vm.swappiness=10
|
||||
|
||||
To make the swappiness persistent, open `/etc/sysctl.conf` with
|
||||
an editor of your choice and add the following line:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
vm.swappiness = 10
|
||||
|
||||
.. table:: Linux kernel `swappiness` parameter values
|
||||
:widths:auto
|
||||
|
||||
==================== ===============================================================
|
||||
Value Strategy
|
||||
==================== ===============================================================
|
||||
vm.swappiness = 0 The kernel will swap only to avoid an 'out of memory' condition
|
||||
vm.swappiness = 1 Minimum amount of swapping without disabling it entirely.
|
||||
vm.swappiness = 10 Sometimes recommended to improve performance when sufficient memory exists in a system.
|
||||
vm.swappiness = 60 The default value.
|
||||
vm.swappiness = 100 The kernel will swap aggressively.
|
||||
==================== ===============================================================
|
||||
|
||||
ZFS Compression
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
To activate compression:
|
||||
.. code-block:: console
|
||||
|
||||
# zpool set compression=lz4 <pool>
|
||||
|
||||
We recommend using the `lz4` algorithm, since it adds very little CPU overhead.
|
||||
Other algorithms such as `lzjb` and `gzip-N` (where `N` is an integer `1-9` representing
|
||||
the compression ratio, 1 is fastest and 9 is best compression) are also available.
|
||||
Depending on the algorithm and how compressible the data is, having compression enabled can even increase
|
||||
I/O performance.
|
||||
|
||||
You can disable compression at any time with:
|
||||
.. code-block:: console
|
||||
|
||||
# zfs set compression=off <dataset>
|
||||
|
||||
Only new blocks will be affected by this change.
|
||||
|
||||
ZFS Special Device
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Since version 0.8.0 ZFS supports `special` devices. A `special` device in a
|
||||
pool is used to store metadata, deduplication tables, and optionally small
|
||||
file blocks.
|
||||
|
||||
A `special` device can improve the speed of a pool consisting of slow spinning
|
||||
hard disks with a lot of metadata changes. For example workloads that involve
|
||||
creating, updating or deleting a large number of files will benefit from the
|
||||
presence of a `special` device. ZFS datasets can also be configured to store
|
||||
whole small files on the `special` device which can further improve the
|
||||
performance. Use fast SSDs for the `special` device.
|
||||
|
||||
.. IMPORTANT:: The redundancy of the `special` device should match the one of the
|
||||
pool, since the `special` device is a point of failure for the whole pool.
|
||||
|
||||
.. WARNING:: Adding a `special` device to a pool cannot be undone!
|
||||
|
||||
Create a pool with `special` device and RAID-1:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool create -f -o ashift=12 <pool> mirror <device1> <device2> special mirror <device3> <device4>
|
||||
|
||||
Adding a `special` device to an existing pool with RAID-1:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool add <pool> special mirror <device1> <device2>
|
||||
|
||||
ZFS datasets expose the `special_small_blocks=<size>` property. `size` can be
|
||||
`0` to disable storing small file blocks on the `special` device or a power of
|
||||
two in the range between `512B` to `128K`. After setting the property new file
|
||||
blocks smaller than `size` will be allocated on the `special` device.
|
||||
|
||||
.. IMPORTANT:: If the value for `special_small_blocks` is greater than or equal to
|
||||
the `recordsize` (default `128K`) of the dataset, *all* data will be written to
|
||||
the `special` device, so be careful!
|
||||
|
||||
Setting the `special_small_blocks` property on a pool will change the default
|
||||
value of that property for all child ZFS datasets (for example all containers
|
||||
in the pool will opt in for small file blocks).
|
||||
|
||||
Opt in for all file smaller than 4K-blocks pool-wide:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zfs set special_small_blocks=4K <pool>
|
||||
|
||||
Opt in for small file blocks for a single dataset:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zfs set special_small_blocks=4K <pool>/<filesystem>
|
||||
|
||||
Opt out from small file blocks for a single dataset:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zfs set special_small_blocks=0 <pool>/<filesystem>
|
||||
|
||||
Troubleshooting
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Corrupted cachefile
|
||||
|
||||
In case of a corrupted ZFS cachefile, some volumes may not be mounted during
|
||||
boot until mounted manually later.
|
||||
|
||||
For each pool, run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool set cachefile=/etc/zfs/zpool.cache POOLNAME
|
||||
|
||||
and afterwards update the `initramfs` by running:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# update-initramfs -u -k all
|
||||
|
||||
and finally reboot your node.
|
||||
|
||||
Sometimes the ZFS cachefile can get corrupted, and `zfs-import-cache.service`
|
||||
doesn't import the pools that aren't present in the cachefile.
|
||||
|
||||
Another workaround to this problem is enabling the `zfs-import-scan.service`,
|
||||
which searches and imports pools via device scanning (usually slower).
|
@ -3,100 +3,149 @@
|
||||
Debian Package Repositories
|
||||
---------------------------
|
||||
|
||||
All Debian based systems use APT_ as package
|
||||
management tool. The list of repositories is defined in
|
||||
``/etc/apt/sources.list`` and ``.list`` files found in the
|
||||
``/etc/apt/sources.d/`` directory. Updates can be installed directly with
|
||||
the ``apt`` command line tool, or via the GUI.
|
||||
All Debian based systems use APT_ as a package management tool. The lists of
|
||||
repositories are defined in ``/etc/apt/sources.list`` and the ``.list`` files found
|
||||
in the ``/etc/apt/sources.d/`` directory. Updates can be installed directly
|
||||
with the ``apt`` command line tool, or via the GUI.
|
||||
|
||||
APT_ ``sources.list`` files list one package repository per line, with
|
||||
the most preferred source listed first. Empty lines are ignored and a
|
||||
``#`` character anywhere on a line marks the remainder of that line as a
|
||||
comment. The information available from the configured sources is
|
||||
acquired by ``apt update``.
|
||||
APT_ ``sources.list`` files list one package repository per line, with the most
|
||||
preferred source listed first. Empty lines are ignored and a ``#`` character
|
||||
anywhere on a line marks the remainder of that line as a comment. The
|
||||
information available from the configured sources is acquired by ``apt
|
||||
update``.
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list``
|
||||
|
||||
|
||||
deb http://ftp.debian.org/debian buster main contrib
|
||||
deb http://ftp.debian.org/debian buster-updates main contrib
|
||||
|
||||
# security updates
|
||||
deb http://security.debian.org/debian-security buster/updates main contrib
|
||||
|
||||
|
||||
|
||||
.. FIXME for 7.0: change security update suite to bullseye-security
|
||||
|
||||
In addition, Proxmox provides three different package repositories for
|
||||
the backup server binaries.
|
||||
In addition, you need a package repository from Proxmox to get Proxmox Backup updates.
|
||||
|
||||
`Proxmox Backup`_ Enterprise Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
During the Proxmox Backup beta phase, only one repository (pbstest) will be
|
||||
available. Once released, an Enterprise repository for production use and a
|
||||
no-subscription repository will be provided.
|
||||
|
||||
This is the default, stable, and recommended repository. It is available for
|
||||
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
||||
and is suitable for production use. The ``pbs-enterprise`` repository is
|
||||
enabled by default:
|
||||
SecureApt
|
||||
~~~~~~~~~
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
|
||||
The `Release` files in the repositories are signed with GnuPG. APT is using
|
||||
these signatures to verify that all packages are from a trusted source.
|
||||
|
||||
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
||||
If you install Proxmox Backup Server from an official ISO image, the
|
||||
verification key is already installed.
|
||||
|
||||
If you install Proxmox Backup Server on top of Debian, download and install the
|
||||
key with the following commands:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# wget http://download.proxmox.com/debian/proxmox-ve-release-6.x.gpg -O /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||
|
||||
Verify the SHA512 checksum afterwards with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# sha512sum /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||
|
||||
The output should be:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
acca6f416917e8e11490a08a1e2842d500b3a5d9f322c6319db0927b2901c3eae23cfb5cd5df6facf2b57399d3cfa52ad7769ebdd75d9b204549ca147da52626 /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||
|
||||
and the md5sum:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# md5sum /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||
|
||||
Here, the output should be:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||
|
||||
.. comment
|
||||
`Proxmox Backup`_ Enterprise Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This will be the default, stable, and recommended repository. It is available for
|
||||
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
||||
and is suitable for production use. The ``pbs-enterprise`` repository is
|
||||
enabled by default:
|
||||
|
||||
.. note:: During the Proxmox Backup beta phase only one repository (pbstest)
|
||||
will be available.
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
|
||||
|
||||
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
||||
|
||||
|
||||
To never miss important security fixes, the superuser (``root@pam`` user) is
|
||||
notified via email about new packages as soon as they are available. The
|
||||
change-log and details of each package can be viewed in the GUI (if available).
|
||||
To never miss important security fixes, the superuser (``root@pam`` user) is
|
||||
notified via email about new packages as soon as they are available. The
|
||||
change-log and details of each package can be viewed in the GUI (if available).
|
||||
|
||||
Please note that you need a valid subscription key to access this
|
||||
repository. More information regarding subscription levels and pricing can be
|
||||
found at https://www.proxmox.com/en/proxmox-backup/pricing.
|
||||
Please note that you need a valid subscription key to access this
|
||||
repository. More information regarding subscription levels and pricing can be
|
||||
found at https://www.proxmox.com/en/proxmox-backup/pricing.
|
||||
|
||||
.. note:: You can disable this repository by commenting out the above
|
||||
line using a `#` (at the start of the line). This prevents error
|
||||
messages if you do not have a subscription key. Please configure the
|
||||
``pbs-no-subscription`` repository in that case.
|
||||
.. note:: You can disable this repository by commenting out the above
|
||||
line using a `#` (at the start of the line). This prevents error
|
||||
messages if you do not have a subscription key. Please configure the
|
||||
``pbs-no-subscription`` repository in that case.
|
||||
|
||||
|
||||
`Proxmox Backup`_ No-Subscription Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
`Proxmox Backup`_ No-Subscription Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
As the name suggests, you do not need a subscription key to access
|
||||
this repository. It can be used for testing and non-production
|
||||
use. It is not recommended to use it on production servers, because these
|
||||
packages are not always heavily tested and validated.
|
||||
As the name suggests, you do not need a subscription key to access
|
||||
this repository. It can be used for testing and non-production
|
||||
use. It is not recommended to use it on production servers, because these
|
||||
packages are not always heavily tested and validated.
|
||||
|
||||
We recommend to configure this repository in ``/etc/apt/sources.list``.
|
||||
We recommend to configure this repository in ``/etc/apt/sources.list``.
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list``
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list``
|
||||
|
||||
deb http://ftp.debian.org/debian buster main contrib
|
||||
deb http://ftp.debian.org/debian buster-updates main contrib
|
||||
deb http://ftp.debian.org/debian buster main contrib
|
||||
deb http://ftp.debian.org/debian buster-updates main contrib
|
||||
|
||||
# PBS pbs-no-subscription repository provided by proxmox.com,
|
||||
# NOT recommended for production use
|
||||
deb http://download.proxmox.com/debian/bps buster pbs-no-subscription
|
||||
# PBS pbs-no-subscription repository provided by proxmox.com,
|
||||
# NOT recommended for production use
|
||||
deb http://download.proxmox.com/debian/pbs buster pbs-no-subscription
|
||||
|
||||
# security updates
|
||||
deb http://security.debian.org/debian-security buster/updates main contrib
|
||||
# security updates
|
||||
deb http://security.debian.org/debian-security buster/updates main contrib
|
||||
|
||||
|
||||
`Proxmox Backup`_ Test Repository
|
||||
`Proxmox Backup`_ Beta Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Finally, there is a repository called ``pbstest``. This one contains the
|
||||
latest packages and is heavily used by developers to test new
|
||||
During the public beta, there is a repository called ``pbstest``. This one
|
||||
contains the latest packages and is heavily used by developers to test new
|
||||
features.
|
||||
|
||||
.. warning:: the ``pbstest`` repository should (as the name implies)
|
||||
.. .. warning:: the ``pbstest`` repository should (as the name implies)
|
||||
only be used to test new features or bug fixes.
|
||||
|
||||
You can configure this using ``/etc/apt/sources.list`` by
|
||||
adding the following line:
|
||||
You can access this repository by adding the following line to
|
||||
``/etc/apt/sources.list``:
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: sources.list entry for ``pbstest``
|
||||
|
||||
deb http://download.proxmox.com/debian/bps buster pbstest
|
||||
deb http://download.proxmox.com/debian/pbs buster pbstest
|
||||
|
||||
If you installed Proxmox Backup Server from the official beta ISO, you should
|
||||
have this repository already configured in
|
||||
``/etc/apt/sources.list.d/pbstest-beta.list``
|
||||
|
@ -24,7 +24,7 @@ This daemon is normally started and managed as ``systemd`` service::
|
||||
|
||||
systemctl status proxmox-backup-proxy
|
||||
|
||||
For debugging, you can start the daemon in forground using::
|
||||
For debugging, you can start the daemon in foreground using::
|
||||
|
||||
proxmox-backup-proxy
|
||||
|
||||
|
@ -9,7 +9,7 @@ which caters to a similar use-case.
|
||||
The ``.pxar`` format is adapted to fulfill the specific needs of the Proxmox
|
||||
Backup Server, for example, efficient storage of hardlinks.
|
||||
The format is designed to reduce storage space needed on the server by achieving
|
||||
a high level of de-duplication.
|
||||
a high level of deduplication.
|
||||
|
||||
Creating an Archive
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
@ -18,7 +18,7 @@ Run the following command to create an archive of a folder named ``source``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pxar create archive.pxar source
|
||||
# pxar create archive.pxar /path/to/source
|
||||
|
||||
This will create a new archive called ``archive.pxar`` with the contents of the
|
||||
``source`` folder.
|
||||
@ -29,45 +29,44 @@ This will create a new archive called ``archive.pxar`` with the contents of the
|
||||
|
||||
By default, ``pxar`` will skip certain mountpoints and will not follow device
|
||||
boundaries. This design decision is based on the primary use case of creating
|
||||
archives for backups. It is sensible to not back up the contents of certain
|
||||
archives for backups. It makes sense to not back up the contents of certain
|
||||
temporary or system specific files.
|
||||
To alter this behavior and follow device boundaries, use the
|
||||
``--all-file-systems`` flag.
|
||||
|
||||
It is possible to exclude certain files and/or folders from the archive by
|
||||
passing glob match patterns as additional parameters. Whenever a file is matched
|
||||
by one of the patterns, you will get a warning stating that this file is skipped
|
||||
and therefore not included in the archive.
|
||||
passing the ``--exclude`` parameter with ``gitignore``\-style match patterns.
|
||||
|
||||
For example, you can exclude all files ending in ``.txt`` from the archive
|
||||
by running:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pxar create archive.pxar source '**/*.txt'
|
||||
# pxar create archive.pxar /path/to/source --exclude '**/*.txt'
|
||||
|
||||
Be aware that the shell itself will try to expand all of the glob patterns before
|
||||
invoking ``pxar``.
|
||||
In order to avoid this, all globs have to be quoted correctly.
|
||||
|
||||
It is possible to pass a list of match patterns to fulfill more complex
|
||||
file exclusion/inclusion behavior, although it is recommended to use the
|
||||
|
||||
It is possible to pass the ``--exclude`` parameter multiple times, in order to
|
||||
match more than one pattern. This allows you to use more complex
|
||||
file exclusion/inclusion behavior. However, it is recommended to use
|
||||
``.pxarexclude`` files instead for such cases.
|
||||
|
||||
For example you might want to exclude all ``.txt`` files except for a specific
|
||||
one from the archive. This is achieved via the negated match pattern, prefixed
|
||||
by ``!``.
|
||||
All the glob pattern are relative to the ``source`` directory.
|
||||
All the glob patterns are relative to the ``source`` directory.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pxar create archive.pxar source '**/*.txt' '!/folder/file.txt'
|
||||
# pxar create archive.pxar /path/to/source --exclude '**/*.txt' --exclude '!/folder/file.txt'
|
||||
|
||||
.. NOTE:: The order of the glob match patterns matters as later ones win over
|
||||
.. NOTE:: The order of the glob match patterns matters as later ones override
|
||||
previous ones. Permutations of the same patterns lead to different results.
|
||||
|
||||
``pxar`` will store the list of glob match patterns passed as parameters via the
|
||||
command line in a file called ``.pxarexclude-cli`` and stores it at the root of
|
||||
command line, in a file called ``.pxarexclude-cli`` at the root of
|
||||
the archive.
|
||||
If a file with this name is already present in the source folder during archive
|
||||
creation, this file is not included in the archive and the file containing the
|
||||
@ -86,23 +85,23 @@ The behavior is the same as described in :ref:`creating-backups`.
|
||||
Extracting an Archive
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
An existing archive ``archive.pxar`` is extracted to a ``target`` directory
|
||||
An existing archive, ``archive.pxar``, is extracted to a ``target`` directory
|
||||
with the following command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pxar extract archive.pxar --target target
|
||||
# pxar extract archive.pxar /path/to/target
|
||||
|
||||
If no target is provided, the content of the archive is extracted to the current
|
||||
working directory.
|
||||
|
||||
In order to restore only parts of an archive, single files and/or folders,
|
||||
In order to restore only parts of an archive, single files, and/or folders,
|
||||
it is possible to pass the corresponding glob match patterns as additional
|
||||
parameters or use the patterns stored in a file:
|
||||
parameters or to use the patterns stored in a file:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# pxar extract etc.pxar '**/*.conf' --target /restore/target/etc
|
||||
# pxar extract etc.pxar /restore/target/etc --pattern '**/*.conf'
|
||||
|
||||
The above example restores all ``.conf`` files encountered in any of the
|
||||
sub-folders in the archive ``etc.pxar`` to the target ``/restore/target/etc``.
|
||||
|
@ -1,5 +1,5 @@
|
||||
Host System Administration
|
||||
--------------------------
|
||||
==========================
|
||||
|
||||
`Proxmox Backup`_ is based on the famous Debian_ Linux
|
||||
distribution. That means that you have access to the whole world of
|
||||
@ -23,8 +23,4 @@ either explain things which are different on `Proxmox Backup`_, or
|
||||
tasks which are commonly used on `Proxmox Backup`_. For other topics,
|
||||
please refer to the standard Debian documentation.
|
||||
|
||||
ZFS
|
||||
~~~
|
||||
|
||||
.. todo:: Add local ZFS admin guide (local.zfs.adoc)
|
||||
|
||||
.. include:: local-zfs.rst
|
||||
|
6
docs/todos.rst
Normal file
@ -0,0 +1,6 @@
|
||||
Documentation Todo List
|
||||
=======================
|
||||
|
||||
This is an auto-generated list of the todo references in the documentation.
|
||||
|
||||
.. todolist::
|
@ -7,7 +7,7 @@ DYNAMIC_UNITS := \
|
||||
proxmox-backup.service \
|
||||
proxmox-backup-proxy.service
|
||||
|
||||
all: $(UNITS) $(DYNAMIC_UNITS)
|
||||
all: $(UNITS) $(DYNAMIC_UNITS) pbstest-beta.list
|
||||
|
||||
clean:
|
||||
rm -f $(DYNAMIC_UNITS)
|
||||
|
1
etc/pbstest-beta.list
Normal file
@ -0,0 +1 @@
|
||||
deb http://download.proxmox.com/debian/pbs buster pbstest
|
@ -4,6 +4,7 @@ use anyhow::{Error};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
use proxmox_backup::api2::types::Userid;
|
||||
use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader};
|
||||
|
||||
pub struct DummyWriter {
|
||||
@ -27,7 +28,7 @@ async fn run() -> Result<(), Error> {
|
||||
|
||||
let host = "localhost";
|
||||
|
||||
let username = "root@pam";
|
||||
let username = Userid::root_userid();
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.interactive(true)
|
||||
|
@ -1,13 +1,14 @@
|
||||
use anyhow::{Error};
|
||||
|
||||
use proxmox_backup::api2::types::Userid;
|
||||
use proxmox_backup::client::*;
|
||||
|
||||
async fn upload_speed() -> Result<usize, Error> {
|
||||
async fn upload_speed() -> Result<f64, Error> {
|
||||
|
||||
let host = "localhost";
|
||||
let datastore = "store2";
|
||||
|
||||
let username = "root@pam";
|
||||
let username = Userid::root_userid();
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.interactive(true)
|
||||
@ -17,10 +18,10 @@ async fn upload_speed() -> Result<usize, Error> {
|
||||
|
||||
let backup_time = chrono::Utc::now();
|
||||
|
||||
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false).await?;
|
||||
let client = BackupWriter::start(client, None, datastore, "host", "speedtest", backup_time, false, true).await?;
|
||||
|
||||
println!("start upload speed test");
|
||||
let res = client.upload_speedtest().await?;
|
||||
let res = client.upload_speedtest(true).await?;
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ pub mod backup;
|
||||
pub mod config;
|
||||
pub mod node;
|
||||
pub mod reader;
|
||||
mod subscription;
|
||||
pub mod status;
|
||||
pub mod types;
|
||||
pub mod version;
|
||||
@ -26,7 +25,6 @@ pub const SUBDIRS: SubdirMap = &[
|
||||
("pull", &pull::ROUTER),
|
||||
("reader", &reader::ROUTER),
|
||||
("status", &status::ROUTER),
|
||||
("subscription", &subscription::ROUTER),
|
||||
("version", &version::ROUTER),
|
||||
];
|
||||
|
||||
|
@ -2,56 +2,110 @@ use anyhow::{bail, format_err, Error};
|
||||
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, RpcEnvironment, Permission, UserInformation};
|
||||
use proxmox::api::{api, RpcEnvironment, Permission};
|
||||
use proxmox::api::router::{Router, SubdirMap};
|
||||
use proxmox::{sortable, identity};
|
||||
use proxmox::{http_err, list_subdirs_api_method};
|
||||
|
||||
use crate::tools;
|
||||
use crate::tools::ticket::*;
|
||||
use crate::tools::ticket::{self, Empty, Ticket};
|
||||
use crate::auth_helpers::*;
|
||||
use crate::api2::types::*;
|
||||
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::config::acl::PRIV_PERMISSIONS_MODIFY;
|
||||
use crate::config::acl::{PRIVILEGES, PRIV_PERMISSIONS_MODIFY};
|
||||
|
||||
pub mod user;
|
||||
pub mod domain;
|
||||
pub mod acl;
|
||||
pub mod role;
|
||||
|
||||
fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
||||
|
||||
/// returns Ok(true) if a ticket has to be created
|
||||
/// and Ok(false) if not
|
||||
fn authenticate_user(
|
||||
userid: &Userid,
|
||||
password: &str,
|
||||
path: Option<String>,
|
||||
privs: Option<String>,
|
||||
port: Option<u16>,
|
||||
) -> Result<bool, Error> {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
if !user_info.is_active_user(&username) {
|
||||
if !user_info.is_active_user(&userid) {
|
||||
bail!("user account disabled or expired.");
|
||||
}
|
||||
|
||||
let ticket_lifetime = tools::ticket::TICKET_LIFETIME;
|
||||
|
||||
if password.starts_with("PBS:") {
|
||||
if let Ok((_age, Some(ticket_username))) = tools::ticket::verify_rsa_ticket(public_auth_key(), "PBS", password, None, -300, ticket_lifetime) {
|
||||
if ticket_username == username {
|
||||
return Ok(());
|
||||
} else {
|
||||
bail!("ticket login failed - wrong username");
|
||||
if let Ok(ticket_userid) = Ticket::<Userid>::parse(password)
|
||||
.and_then(|ticket| ticket.verify(public_auth_key(), "PBS", None))
|
||||
{
|
||||
if *userid == ticket_userid {
|
||||
return Ok(true);
|
||||
}
|
||||
bail!("ticket login failed - wrong userid");
|
||||
}
|
||||
} else if password.starts_with("PBSTERM:") {
|
||||
if path.is_none() || privs.is_none() || port.is_none() {
|
||||
bail!("cannot check termnal ticket without path, priv and port");
|
||||
}
|
||||
|
||||
let path = path.ok_or_else(|| format_err!("missing path for termproxy ticket"))?;
|
||||
let privilege_name = privs
|
||||
.ok_or_else(|| format_err!("missing privilege name for termproxy ticket"))?;
|
||||
let port = port.ok_or_else(|| format_err!("missing port for termproxy ticket"))?;
|
||||
|
||||
if let Ok(Empty) = Ticket::parse(password)
|
||||
.and_then(|ticket| ticket.verify(
|
||||
public_auth_key(),
|
||||
ticket::TERM_PREFIX,
|
||||
Some(&ticket::term_aad(userid, &path, port)),
|
||||
))
|
||||
{
|
||||
for (name, privilege) in PRIVILEGES {
|
||||
if *name == privilege_name {
|
||||
let mut path_vec = Vec::new();
|
||||
for part in path.split('/') {
|
||||
if part != "" {
|
||||
path_vec.push(part);
|
||||
}
|
||||
}
|
||||
|
||||
user_info.check_privs(userid, &path_vec, *privilege, false)?;
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
bail!("No such privilege");
|
||||
}
|
||||
}
|
||||
|
||||
crate::auth::authenticate_user(username, password)
|
||||
let _ = crate::auth::authenticate_user(userid, password)?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
username: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
password: {
|
||||
schema: PASSWORD_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
type: String,
|
||||
description: "Path for verifying terminal tickets.",
|
||||
optional: true,
|
||||
},
|
||||
privs: {
|
||||
type: String,
|
||||
description: "Privilege for verifying terminal tickets.",
|
||||
optional: true,
|
||||
},
|
||||
port: {
|
||||
type: Integer,
|
||||
description: "Port for verifying terminal tickets.",
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
@ -78,11 +132,16 @@ fn authenticate_user(username: &str, password: &str) -> Result<(), Error> {
|
||||
/// Create or verify authentication ticket.
|
||||
///
|
||||
/// Returns: An authentication ticket with additional infos.
|
||||
fn create_ticket(username: String, password: String) -> Result<Value, Error> {
|
||||
match authenticate_user(&username, &password) {
|
||||
Ok(_) => {
|
||||
|
||||
let ticket = assemble_rsa_ticket( private_auth_key(), "PBS", Some(&username), None)?;
|
||||
fn create_ticket(
|
||||
username: Userid,
|
||||
password: String,
|
||||
path: Option<String>,
|
||||
privs: Option<String>,
|
||||
port: Option<u16>,
|
||||
) -> Result<Value, Error> {
|
||||
match authenticate_user(&username, &password, path, privs, port) {
|
||||
Ok(true) => {
|
||||
let ticket = Ticket::new("PBS", &username)?.sign(private_auth_key(), None)?;
|
||||
|
||||
let token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
||||
|
||||
@ -94,10 +153,13 @@ fn create_ticket(username: String, password: String) -> Result<Value, Error> {
|
||||
"CSRFPreventionToken": token,
|
||||
}))
|
||||
}
|
||||
Ok(false) => Ok(json!({
|
||||
"username": username,
|
||||
})),
|
||||
Err(err) => {
|
||||
let client_ip = "unknown"; // $rpcenv->get_client_ip() || '';
|
||||
log::error!("authentication failure; rhost={} user={} msg={}", client_ip, username, err.to_string());
|
||||
Err(http_err!(UNAUTHORIZED, "permission check failed.".into()))
|
||||
Err(http_err!(UNAUTHORIZED, "permission check failed."))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -106,7 +168,7 @@ fn create_ticket(username: String, password: String) -> Result<Value, Error> {
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
password: {
|
||||
schema: PASSWORD_SCHEMA,
|
||||
@ -124,13 +186,15 @@ fn create_ticket(username: String, password: String) -> Result<Value, Error> {
|
||||
/// Each user is allowed to change his own password. Superuser
|
||||
/// can change all passwords.
|
||||
fn change_password(
|
||||
userid: String,
|
||||
userid: Userid,
|
||||
password: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let current_user = rpcenv.get_user()
|
||||
.ok_or_else(|| format_err!("unknown user"))?;
|
||||
let current_user: Userid = rpcenv
|
||||
.get_user()
|
||||
.ok_or_else(|| format_err!("unknown user"))?
|
||||
.parse()?;
|
||||
|
||||
let mut allowed = userid == current_user;
|
||||
|
||||
@ -146,9 +210,8 @@ fn change_password(
|
||||
bail!("you are not authorized to change the password.");
|
||||
}
|
||||
|
||||
let (username, realm) = crate::auth::parse_userid(&userid)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(&realm)?;
|
||||
authenticator.store_password(&username, &password)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
|
||||
authenticator.store_password(userid.name(), &password)?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ use anyhow::{bail, Error};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl;
|
||||
@ -141,7 +142,7 @@ pub fn read_acl(
|
||||
},
|
||||
userid: {
|
||||
optional: true,
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
group: {
|
||||
optional: true,
|
||||
@ -167,14 +168,14 @@ pub fn update_acl(
|
||||
path: String,
|
||||
role: String,
|
||||
propagate: Option<bool>,
|
||||
userid: Option<String>,
|
||||
userid: Option<Userid>,
|
||||
group: Option<String>,
|
||||
delete: Option<bool>,
|
||||
digest: Option<String>,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut tree, expected_digest) = acl::config()?;
|
||||
|
||||
@ -192,7 +193,7 @@ pub fn update_acl(
|
||||
} else if let Some(ref userid) = userid {
|
||||
if !delete { // Note: we allow to delete non-existent users
|
||||
let user_cfg = crate::config::user::cached_config()?;
|
||||
if user_cfg.sections.get(userid).is_none() {
|
||||
if user_cfg.sections.get(&userid.to_string()).is_none() {
|
||||
bail!("no such user.");
|
||||
}
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ use serde_json::Value;
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::schema::{Schema, StringSchema};
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::user;
|
||||
@ -48,7 +49,7 @@ pub fn list_users(
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
comment: {
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
@ -87,25 +88,24 @@ pub fn list_users(
|
||||
/// Create new user.
|
||||
pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let user: user::User = serde_json::from_value(param)?;
|
||||
|
||||
let (mut config, _digest) = user::config()?;
|
||||
|
||||
if let Some(_) = config.sections.get(&user.userid) {
|
||||
if let Some(_) = config.sections.get(user.userid.as_str()) {
|
||||
bail!("user '{}' already exists.", user.userid);
|
||||
}
|
||||
|
||||
let (username, realm) = crate::auth::parse_userid(&user.userid)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(&realm)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(&user.userid.realm())?;
|
||||
|
||||
config.set_data(&user.userid, "user", &user)?;
|
||||
config.set_data(user.userid.as_str(), "user", &user)?;
|
||||
|
||||
user::save_config(&config)?;
|
||||
|
||||
if let Some(password) = password {
|
||||
authenticator.store_password(&username, &password)?;
|
||||
authenticator.store_password(user.userid.name(), &password)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -115,7 +115,7 @@ pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error>
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -128,9 +128,9 @@ pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error>
|
||||
},
|
||||
)]
|
||||
/// Read user configuration data.
|
||||
pub fn read_user(userid: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<user::User, Error> {
|
||||
pub fn read_user(userid: Userid, mut rpcenv: &mut dyn RpcEnvironment) -> Result<user::User, Error> {
|
||||
let (config, digest) = user::config()?;
|
||||
let user = config.lookup("user", &userid)?;
|
||||
let user = config.lookup("user", userid.as_str())?;
|
||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||
Ok(user)
|
||||
}
|
||||
@ -140,7 +140,7 @@ pub fn read_user(userid: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
@ -182,7 +182,7 @@ pub fn read_user(userid: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
|
||||
)]
|
||||
/// Update user configuration.
|
||||
pub fn update_user(
|
||||
userid: String,
|
||||
userid: Userid,
|
||||
comment: Option<String>,
|
||||
enable: Option<bool>,
|
||||
expire: Option<i64>,
|
||||
@ -193,7 +193,7 @@ pub fn update_user(
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = user::config()?;
|
||||
|
||||
@ -202,7 +202,7 @@ pub fn update_user(
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
let mut data: user::User = config.lookup("user", &userid)?;
|
||||
let mut data: user::User = config.lookup("user", userid.as_str())?;
|
||||
|
||||
if let Some(comment) = comment {
|
||||
let comment = comment.trim().to_string();
|
||||
@ -222,9 +222,8 @@ pub fn update_user(
|
||||
}
|
||||
|
||||
if let Some(password) = password {
|
||||
let (username, realm) = crate::auth::parse_userid(&userid)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(&realm)?;
|
||||
authenticator.store_password(&username, &password)?;
|
||||
let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
|
||||
authenticator.store_password(userid.name(), &password)?;
|
||||
}
|
||||
|
||||
if let Some(firstname) = firstname {
|
||||
@ -238,7 +237,7 @@ pub fn update_user(
|
||||
data.email = if email.is_empty() { None } else { Some(email) };
|
||||
}
|
||||
|
||||
config.set_data(&userid, "user", &data)?;
|
||||
config.set_data(userid.as_str(), "user", &data)?;
|
||||
|
||||
user::save_config(&config)?;
|
||||
|
||||
@ -250,7 +249,7 @@ pub fn update_user(
|
||||
input: {
|
||||
properties: {
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
digest: {
|
||||
optional: true,
|
||||
@ -263,9 +262,9 @@ pub fn update_user(
|
||||
},
|
||||
)]
|
||||
/// Remove a user from the configuration file.
|
||||
pub fn delete_user(userid: String, digest: Option<String>) -> Result<(), Error> {
|
||||
pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = user::config()?;
|
||||
|
||||
@ -274,8 +273,8 @@ pub fn delete_user(userid: String, digest: Option<String>) -> Result<(), Error>
|
||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||
}
|
||||
|
||||
match config.sections.get(&userid) {
|
||||
Some(_) => { config.sections.remove(&userid); },
|
||||
match config.sections.get(userid.as_str()) {
|
||||
Some(_) => { config.sections.remove(userid.as_str()); },
|
||||
None => bail!("user '{}' does not exist.", userid),
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
@ -10,7 +11,8 @@ use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{
|
||||
api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
|
||||
RpcEnvironment, RpcEnvironmentType, Permission, UserInformation};
|
||||
RpcEnvironment, RpcEnvironmentType, Permission
|
||||
};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
@ -36,7 +38,11 @@ use crate::config::acl::{
|
||||
PRIV_DATASTORE_BACKUP,
|
||||
};
|
||||
|
||||
fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> Result<(), Error> {
|
||||
fn check_backup_owner(
|
||||
store: &DataStore,
|
||||
group: &BackupGroup,
|
||||
userid: &Userid,
|
||||
) -> Result<(), Error> {
|
||||
let owner = store.get_owner(group)?;
|
||||
if &owner != userid {
|
||||
bail!("backup owner check failed ({} != {})", userid, owner);
|
||||
@ -44,7 +50,10 @@ fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> R
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
|
||||
fn read_backup_index(
|
||||
store: &DataStore,
|
||||
backup_dir: &BackupDir,
|
||||
) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
|
||||
|
||||
let (manifest, index_size) = store.load_manifest(backup_dir)?;
|
||||
|
||||
@ -52,25 +61,29 @@ fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<Ba
|
||||
for item in manifest.files() {
|
||||
result.push(BackupContent {
|
||||
filename: item.filename.clone(),
|
||||
encrypted: item.encrypted,
|
||||
crypt_mode: Some(item.crypt_mode),
|
||||
size: Some(item.size),
|
||||
});
|
||||
}
|
||||
|
||||
result.push(BackupContent {
|
||||
filename: MANIFEST_BLOB_NAME.to_string(),
|
||||
encrypted: Some(false),
|
||||
crypt_mode: match manifest.signature {
|
||||
Some(_) => Some(CryptMode::SignOnly),
|
||||
None => Some(CryptMode::None),
|
||||
},
|
||||
size: Some(index_size),
|
||||
});
|
||||
|
||||
Ok(result)
|
||||
Ok((manifest, result))
|
||||
}
|
||||
|
||||
fn get_all_snapshot_files(
|
||||
store: &DataStore,
|
||||
info: &BackupInfo,
|
||||
) -> Result<Vec<BackupContent>, Error> {
|
||||
let mut files = read_backup_index(&store, &info.backup_dir)?;
|
||||
) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
|
||||
|
||||
let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
|
||||
|
||||
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
|
||||
acc.insert(item.filename.clone());
|
||||
@ -79,10 +92,14 @@ fn get_all_snapshot_files(
|
||||
|
||||
for file in &info.files {
|
||||
if file_set.contains(file) { continue; }
|
||||
files.push(BackupContent { filename: file.to_string(), size: None, encrypted: None });
|
||||
files.push(BackupContent {
|
||||
filename: file.to_string(),
|
||||
size: None,
|
||||
crypt_mode: None,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(files)
|
||||
Ok((manifest, files))
|
||||
}
|
||||
|
||||
fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
|
||||
@ -126,9 +143,9 @@ fn list_groups(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<GroupListItem>, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
@ -149,7 +166,7 @@ fn list_groups(
|
||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||
let owner = datastore.get_owner(group)?;
|
||||
if !list_all {
|
||||
if owner != username { continue; }
|
||||
if owner != userid { continue; }
|
||||
}
|
||||
|
||||
let result_item = GroupListItem {
|
||||
@ -207,20 +224,22 @@ pub fn list_snapshot_files(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<BackupContent>, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
|
||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
|
||||
|
||||
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
|
||||
|
||||
get_all_snapshot_files(&datastore, &info)
|
||||
let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
|
||||
|
||||
Ok(files)
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -257,18 +276,18 @@ fn delete_snapshot(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
|
||||
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
|
||||
|
||||
datastore.remove_backup_dir(&snapshot)?;
|
||||
datastore.remove_backup_dir(&snapshot, false)?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
@ -313,9 +332,9 @@ pub fn list_snapshots (
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<SnapshotListItem>, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
@ -338,19 +357,46 @@ pub fn list_snapshots (
|
||||
let owner = datastore.get_owner(group)?;
|
||||
|
||||
if !list_all {
|
||||
if owner != username { continue; }
|
||||
if owner != userid { continue; }
|
||||
}
|
||||
|
||||
let mut size = None;
|
||||
|
||||
let files = match get_all_snapshot_files(&datastore, &info) {
|
||||
Ok(files) => {
|
||||
let (comment, verification, files) = match get_all_snapshot_files(&datastore, &info) {
|
||||
Ok((manifest, files)) => {
|
||||
size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
|
||||
files
|
||||
// extract the first line from notes
|
||||
let comment: Option<String> = manifest.unprotected["notes"]
|
||||
.as_str()
|
||||
.and_then(|notes| notes.lines().next())
|
||||
.map(String::from);
|
||||
|
||||
let verify = manifest.unprotected["verify_state"].clone();
|
||||
let verify: Option<SnapshotVerifyState> = match serde_json::from_value(verify) {
|
||||
Ok(verify) => verify,
|
||||
Err(err) => {
|
||||
eprintln!("error parsing verification state : '{}'", err);
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
(comment, verify, files)
|
||||
},
|
||||
Err(err) => {
|
||||
eprintln!("error during snapshot file listing: '{}'", err);
|
||||
info.files.iter().map(|x| BackupContent { filename: x.to_string(), size: None, encrypted: None }).collect()
|
||||
(
|
||||
None,
|
||||
None,
|
||||
info
|
||||
.files
|
||||
.iter()
|
||||
.map(|x| BackupContent {
|
||||
filename: x.to_string(),
|
||||
size: None,
|
||||
crypt_mode: None,
|
||||
})
|
||||
.collect()
|
||||
)
|
||||
},
|
||||
};
|
||||
|
||||
@ -358,6 +404,8 @@ pub fn list_snapshots (
|
||||
backup_type: group.backup_type().to_string(),
|
||||
backup_id: group.backup_id().to_string(),
|
||||
backup_time: info.backup_dir.backup_time().timestamp(),
|
||||
comment,
|
||||
verification,
|
||||
files,
|
||||
size,
|
||||
owner: Some(owner),
|
||||
@ -442,7 +490,7 @@ pub fn verify(
|
||||
match (backup_type, backup_id, backup_time) {
|
||||
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
|
||||
worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
|
||||
let dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
backup_dir = Some(dir);
|
||||
}
|
||||
(Some(backup_type), Some(backup_id), None) => {
|
||||
@ -453,27 +501,50 @@ pub fn verify(
|
||||
(None, None, None) => {
|
||||
worker_id = store.clone();
|
||||
}
|
||||
_ => bail!("parameters do not spefify a backup group or snapshot"),
|
||||
_ => bail!("parameters do not specify a backup group or snapshot"),
|
||||
}
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"verify", Some(worker_id.clone()), &username, to_stdout, move |worker|
|
||||
{
|
||||
let success = if let Some(backup_dir) = backup_dir {
|
||||
verify_backup_dir(&datastore, &backup_dir, &worker)?
|
||||
"verify",
|
||||
Some(worker_id.clone()),
|
||||
userid,
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
||||
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
||||
|
||||
let failed_dirs = if let Some(backup_dir) = backup_dir {
|
||||
let mut res = Vec::new();
|
||||
if !verify_backup_dir(datastore, &backup_dir, verified_chunks, corrupt_chunks, worker.clone())? {
|
||||
res.push(backup_dir.to_string());
|
||||
}
|
||||
res
|
||||
} else if let Some(backup_group) = backup_group {
|
||||
verify_backup_group(&datastore, &backup_group, &worker)?
|
||||
let (_count, failed_dirs) = verify_backup_group(
|
||||
datastore,
|
||||
&backup_group,
|
||||
verified_chunks,
|
||||
corrupt_chunks,
|
||||
None,
|
||||
worker.clone(),
|
||||
)?;
|
||||
failed_dirs
|
||||
} else {
|
||||
verify_all_backups(&datastore, &worker)?
|
||||
verify_all_backups(datastore, worker.clone())?
|
||||
};
|
||||
if !success {
|
||||
bail!("verfication failed - please check the log for details");
|
||||
if failed_dirs.len() > 0 {
|
||||
worker.log("Failed to verify following snapshots:");
|
||||
for dir in failed_dirs {
|
||||
worker.log(format!("\t{}", dir));
|
||||
}
|
||||
bail!("verification failed - please check the log for details");
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(json!(upid_str))
|
||||
}
|
||||
@ -523,7 +594,7 @@ macro_rules! add_common_prune_prameters {
|
||||
|
||||
pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
|
||||
"Returns the list of snapshots and a flag indicating if there are kept or removed.",
|
||||
PruneListItem::API_SCHEMA
|
||||
&PruneListItem::API_SCHEMA
|
||||
).schema();
|
||||
|
||||
const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
|
||||
@ -558,9 +629,9 @@ fn prune(
|
||||
let backup_type = tools::required_string_param(¶m, "backup-type")?;
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
|
||||
|
||||
@ -569,7 +640,7 @@ fn prune(
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, &group, &username)?; }
|
||||
if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
|
||||
|
||||
let prune_options = PruneOptions {
|
||||
keep_last: param["keep-last"].as_u64(),
|
||||
@ -611,7 +682,7 @@ fn prune(
|
||||
|
||||
|
||||
// We use a WorkerTask just to have a task log, but run synchrounously
|
||||
let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
|
||||
let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
|
||||
|
||||
let result = try_block! {
|
||||
if keep_all {
|
||||
@ -648,7 +719,7 @@ fn prune(
|
||||
}));
|
||||
|
||||
if !(dry_run || keep) {
|
||||
datastore.remove_backup_dir(&info.backup_dir)?;
|
||||
datastore.remove_backup_dir(&info.backup_dir, true)?;
|
||||
}
|
||||
}
|
||||
|
||||
@ -693,11 +764,15 @@ fn start_garbage_collection(
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
|
||||
{
|
||||
"garbage_collection",
|
||||
Some(store.clone()),
|
||||
Userid::root_userid().clone(),
|
||||
to_stdout,
|
||||
move |worker| {
|
||||
worker.log(format!("starting garbage collection on store {}", store));
|
||||
datastore.garbage_collection(&worker)
|
||||
})?;
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(json!(upid_str))
|
||||
}
|
||||
@ -761,13 +836,13 @@ fn get_datastore_list(
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (store, (_, data)) in &config.sections {
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||
if allowed {
|
||||
let mut entry = json!({ "store": store });
|
||||
@ -812,9 +887,9 @@ fn download_file(
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
||||
|
||||
@ -822,10 +897,10 @@ fn download_file(
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
|
||||
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
|
||||
|
||||
@ -834,8 +909,8 @@ fn download_file(
|
||||
path.push(&file_name);
|
||||
|
||||
let file = tokio::fs::File::open(&path)
|
||||
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
|
||||
.await?;
|
||||
.await
|
||||
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
|
||||
@ -885,9 +960,9 @@ fn download_file_decoded(
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
|
||||
|
||||
@ -895,14 +970,14 @@ fn download_file_decoded(
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
|
||||
let files = read_backup_index(&datastore, &backup_dir)?;
|
||||
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
||||
for file in files {
|
||||
if file.filename == file_name && file.encrypted == Some(true) {
|
||||
if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
||||
bail!("cannot decode '{}' - is encrypted", file_name);
|
||||
}
|
||||
}
|
||||
@ -919,8 +994,10 @@ fn download_file_decoded(
|
||||
"didx" => {
|
||||
let index = DynamicIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(&file_name, &csum, size)?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||
Body::wrap_stream(AsyncReaderStream::new(reader)
|
||||
.map_err(move |err| {
|
||||
@ -932,7 +1009,10 @@ fn download_file_decoded(
|
||||
let index = FixedIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(&file_name, &csum, size)?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
||||
Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
|
||||
.map_err(move |err| {
|
||||
@ -942,7 +1022,9 @@ fn download_file_decoded(
|
||||
},
|
||||
"blob" => {
|
||||
let file = std::fs::File::open(&path)
|
||||
.map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))?;
|
||||
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
||||
|
||||
// FIXME: load full blob to verify index checksum?
|
||||
|
||||
Body::wrap_stream(
|
||||
WrappedReaderStream::new(DataBlobReader::new(file, None)?)
|
||||
@ -1001,10 +1083,10 @@ fn upload_backup_log(
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
check_backup_owner(&datastore, backup_dir.group(), &username)?;
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
check_backup_owner(&datastore, backup_dir.group(), &userid)?;
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
@ -1025,11 +1107,10 @@ fn upload_backup_log(
|
||||
})
|
||||
.await?;
|
||||
|
||||
let blob = DataBlob::from_raw(data)?;
|
||||
// always verify CRC at server side
|
||||
blob.verify_crc()?;
|
||||
let raw_data = blob.raw_data();
|
||||
replace_file(&path, raw_data, CreateOptions::new())?;
|
||||
// always verify blob/CRC at server side
|
||||
let blob = DataBlob::load_from_reader(&mut &data[..])?;
|
||||
|
||||
replace_file(&path, blob.raw_data(), CreateOptions::new())?;
|
||||
|
||||
// fixme: use correct formatter
|
||||
Ok(crate::server::formatter::json_response(Ok(Value::Null)))
|
||||
@ -1074,23 +1155,35 @@ fn catalog(
|
||||
) -> Result<Value, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
|
||||
let file_name = CATALOG_NAME;
|
||||
|
||||
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
||||
for file in files {
|
||||
if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
||||
bail!("cannot decode '{}' - is encrypted", file_name);
|
||||
}
|
||||
}
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(CATALOG_NAME);
|
||||
path.push(file_name);
|
||||
|
||||
let index = DynamicIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(&file_name, &csum, size)?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
|
||||
let mut catalog_reader = CatalogReader::new(reader);
|
||||
@ -1146,7 +1239,7 @@ fn catalog(
|
||||
pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&pxar_file_download),
|
||||
&ObjectSchema::new(
|
||||
"Download single file from pxar file of a bacup snapshot. Only works if it's not encrypted.",
|
||||
"Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
|
||||
&sorted!([
|
||||
("store", false, &DATASTORE_SCHEMA),
|
||||
("backup-type", false, &BACKUP_TYPE_SCHEMA),
|
||||
@ -1173,9 +1266,9 @@ fn pxar_file_download(
|
||||
let store = tools::required_string_param(¶m, "store")?;
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let filepath = tools::required_string_param(¶m, "filepath")?.to_owned();
|
||||
|
||||
@ -1183,13 +1276,10 @@ fn pxar_file_download(
|
||||
let backup_id = tools::required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
|
||||
let mut components = base64::decode(&filepath)?;
|
||||
if components.len() > 0 && components[0] == '/' as u8 {
|
||||
@ -1197,15 +1287,26 @@ fn pxar_file_download(
|
||||
}
|
||||
|
||||
let mut split = components.splitn(2, |c| *c == '/' as u8);
|
||||
let pxar_name = split.next().unwrap();
|
||||
let pxar_name = std::str::from_utf8(split.next().unwrap())?;
|
||||
let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
|
||||
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
||||
for file in files {
|
||||
if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
||||
bail!("cannot decode '{}' - is encrypted", pxar_name);
|
||||
}
|
||||
}
|
||||
|
||||
path.push(OsStr::from_bytes(&pxar_name));
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(pxar_name);
|
||||
|
||||
let index = DynamicIndexReader::open(&path)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None);
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(&pxar_name, &csum, size)?;
|
||||
|
||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let archive_size = reader.archive_size();
|
||||
let reader = LocalDynamicReadAt::new(reader);
|
||||
@ -1281,6 +1382,108 @@ fn get_rrd_stats(
|
||||
)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
|
||||
},
|
||||
)]
|
||||
/// Get "notes" for a specific backup
|
||||
fn get_notes(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
|
||||
let manifest = datastore.load_manifest_json(&backup_dir)?;
|
||||
|
||||
let notes = manifest["unprotected"]["notes"]
|
||||
.as_str()
|
||||
.unwrap_or("");
|
||||
|
||||
Ok(String::from(notes))
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
notes: {
|
||||
description: "A multiline text.",
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
|
||||
},
|
||||
)]
|
||||
/// Set "notes" for a specific backup
|
||||
fn set_notes(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
notes: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
|
||||
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
|
||||
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
|
||||
|
||||
let mut manifest = datastore.load_manifest_json(&backup_dir)?;
|
||||
|
||||
manifest["unprotected"]["notes"] = notes.into();
|
||||
|
||||
datastore.store_manifest(&backup_dir, manifest)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||
(
|
||||
@ -1314,6 +1517,12 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
||||
&Router::new()
|
||||
.get(&API_METHOD_LIST_GROUPS)
|
||||
),
|
||||
(
|
||||
"notes",
|
||||
&Router::new()
|
||||
.get(&API_METHOD_GET_NOTES)
|
||||
.put(&API_METHOD_SET_NOTES)
|
||||
),
|
||||
(
|
||||
"prune",
|
||||
&Router::new()
|
||||
|
@ -1,15 +1,15 @@
|
||||
use anyhow::{Error};
|
||||
use anyhow::{format_err, Error};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::{list_subdirs_api_method, sortable};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::api2::pull::{get_pull_parameters};
|
||||
use crate::api2::pull::do_sync_job;
|
||||
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
|
||||
use crate::server::{self, TaskListInfo, WorkerTask};
|
||||
use crate::server::UPID;
|
||||
use crate::config::jobstate::{Job, JobState};
|
||||
use crate::tools::systemd::time::{
|
||||
parse_calendar_event, compute_next_event};
|
||||
|
||||
@ -33,38 +33,32 @@ pub fn list_sync_jobs(
|
||||
|
||||
let mut list: Vec<SyncJobStatus> = config.convert_to_typed_array("sync")?;
|
||||
|
||||
let mut last_tasks: HashMap<String, &TaskListInfo> = HashMap::new();
|
||||
let tasks = server::read_task_list()?;
|
||||
|
||||
for info in tasks.iter() {
|
||||
let worker_id = match &info.upid.worker_id {
|
||||
Some(id) => id,
|
||||
_ => { continue; },
|
||||
};
|
||||
if let Some(last) = last_tasks.get(worker_id) {
|
||||
if last.upid.starttime < info.upid.starttime {
|
||||
last_tasks.insert(worker_id.to_string(), &info);
|
||||
}
|
||||
} else {
|
||||
last_tasks.insert(worker_id.to_string(), &info);
|
||||
}
|
||||
}
|
||||
|
||||
for job in &mut list {
|
||||
let mut last = 0;
|
||||
if let Some(task) = last_tasks.get(&job.id) {
|
||||
job.last_run_upid = Some(task.upid_str.clone());
|
||||
if let Some((endtime, status)) = &task.state {
|
||||
job.last_run_state = Some(String::from(status));
|
||||
job.last_run_endtime = Some(*endtime);
|
||||
last = *endtime;
|
||||
}
|
||||
}
|
||||
let last_state = JobState::load("syncjob", &job.id)
|
||||
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
||||
let (upid, endtime, state, starttime) = match last_state {
|
||||
JobState::Created { time } => (None, None, None, time),
|
||||
JobState::Started { upid } => {
|
||||
let parsed_upid: UPID = upid.parse()?;
|
||||
(Some(upid), None, None, parsed_upid.starttime)
|
||||
},
|
||||
JobState::Finished { upid, state } => {
|
||||
let parsed_upid: UPID = upid.parse()?;
|
||||
(Some(upid), Some(state.endtime()), Some(state.to_string()), parsed_upid.starttime)
|
||||
},
|
||||
};
|
||||
|
||||
job.last_run_upid = upid;
|
||||
job.last_run_state = state;
|
||||
job.last_run_endtime = endtime;
|
||||
|
||||
let last = job.last_run_endtime.unwrap_or_else(|| starttime);
|
||||
|
||||
job.next_run = (|| -> Option<i64> {
|
||||
let schedule = job.schedule.as_ref()?;
|
||||
let event = parse_calendar_event(&schedule).ok()?;
|
||||
compute_next_event(&event, last, false).ok()
|
||||
// ignore errors
|
||||
compute_next_event(&event, last, false).unwrap_or_else(|_| None)
|
||||
})();
|
||||
}
|
||||
|
||||
@ -83,7 +77,7 @@ pub fn list_sync_jobs(
|
||||
}
|
||||
)]
|
||||
/// Runs the sync jobs manually.
|
||||
async fn run_sync_job(
|
||||
fn run_sync_job(
|
||||
id: String,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
@ -92,21 +86,11 @@ async fn run_sync_job(
|
||||
let (config, _digest) = sync::config()?;
|
||||
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
let delete = sync_job.remove_vanished.unwrap_or(true);
|
||||
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
|
||||
let job = Job::new("syncjob", &id)?;
|
||||
|
||||
let upid_str = WorkerTask::spawn("syncjob", Some(id.clone()), &username.clone(), false, move |worker| async move {
|
||||
|
||||
worker.log(format!("sync job '{}' start", &id));
|
||||
|
||||
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, String::from("backup@pam")).await?;
|
||||
|
||||
worker.log(format!("sync job '{}' end", &id));
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
let upid_str = do_sync_job(job, sync_job, &userid, None)?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ use crate::backup::*;
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::PRIV_DATASTORE_BACKUP;
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
use crate::tools::fs::lock_dir_noblock;
|
||||
|
||||
mod environment;
|
||||
use environment::*;
|
||||
@ -37,6 +38,7 @@ pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
|
||||
("backup-id", false, &BACKUP_ID_SCHEMA),
|
||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||
("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
|
||||
("benchmark", true, &BooleanSchema::new("Job is a benchmark (do not keep data).").schema()),
|
||||
]),
|
||||
)
|
||||
).access(
|
||||
@ -55,13 +57,14 @@ fn upgrade_to_backup_protocol(
|
||||
|
||||
async move {
|
||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||
let benchmark = param["benchmark"].as_bool().unwrap_or(false);
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
@ -88,35 +91,55 @@ async move {
|
||||
let env_type = rpcenv.env_type();
|
||||
|
||||
let backup_group = BackupGroup::new(backup_type, backup_id);
|
||||
let owner = datastore.create_backup_group(&backup_group, &username)?;
|
||||
|
||||
let worker_type = if backup_type == "host" && backup_id == "benchmark" {
|
||||
if !benchmark {
|
||||
bail!("unable to run benchmark without --benchmark flags");
|
||||
}
|
||||
"benchmark"
|
||||
} else {
|
||||
if benchmark {
|
||||
bail!("benchmark flags is only allowed on 'host/benchmark'");
|
||||
}
|
||||
"backup"
|
||||
};
|
||||
|
||||
// lock backup group to only allow one backup per group at a time
|
||||
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &userid)?;
|
||||
|
||||
// permission check
|
||||
if owner != username { // only the owner is allowed to create additional snapshots
|
||||
bail!("backup owner check failed ({} != {})", username, owner);
|
||||
if owner != userid && worker_type != "benchmark" {
|
||||
// only the owner is allowed to create additional snapshots
|
||||
bail!("backup owner check failed ({} != {})", userid, owner);
|
||||
}
|
||||
|
||||
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group).unwrap_or(None);
|
||||
let backup_dir = BackupDir::new_with_group(backup_group, backup_time);
|
||||
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None);
|
||||
let backup_dir = BackupDir::new_with_group(backup_group.clone(), backup_time)?;
|
||||
|
||||
if let Some(last) = &last_backup {
|
||||
let _last_guard = if let Some(last) = &last_backup {
|
||||
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
|
||||
bail!("backup timestamp is older than last backup.");
|
||||
}
|
||||
// fixme: abort if last backup is still running - howto test?
|
||||
// Idea: write upid into a file inside snapshot dir. then test if
|
||||
// it is still running here.
|
||||
}
|
||||
|
||||
let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
|
||||
// lock last snapshot to prevent forgetting/pruning it during backup
|
||||
let full_path = datastore.snapshot_path(&last.backup_dir);
|
||||
Some(lock_dir_noblock(&full_path, "snapshot", "base snapshot is already locked by another operation")?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (path, is_new, _snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
|
||||
if !is_new { bail!("backup directory already exists."); }
|
||||
|
||||
WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| {
|
||||
|
||||
WorkerTask::spawn(worker_type, Some(worker_id), userid.clone(), true, move |worker| {
|
||||
let mut env = BackupEnvironment::new(
|
||||
env_type, username.clone(), worker.clone(), datastore, backup_dir);
|
||||
env_type, userid, worker.clone(), datastore, backup_dir);
|
||||
|
||||
env.debug = debug;
|
||||
env.last_backup = last_backup;
|
||||
|
||||
env.log(format!("starting new backup on datastore '{}': {:?}", store, path));
|
||||
env.log(format!("starting new {} on datastore '{}': {:?}", worker_type, store, path));
|
||||
|
||||
let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
|
||||
|
||||
@ -136,6 +159,7 @@ async move {
|
||||
let window_size = 32*1024*1024; // max = (1 << 31) - 2
|
||||
http.http2_initial_stream_window_size(window_size);
|
||||
http.http2_initial_connection_window_size(window_size);
|
||||
http.http2_max_frame_size(4*1024*1024);
|
||||
|
||||
http.serve_connection(conn, service)
|
||||
.map_err(Error::from)
|
||||
@ -144,11 +168,20 @@ async move {
|
||||
.map(|_| Err(format_err!("task aborted")));
|
||||
|
||||
async move {
|
||||
// keep flock until task ends
|
||||
let _group_guard = _group_guard;
|
||||
let _snap_guard = _snap_guard;
|
||||
let _last_guard = _last_guard;
|
||||
|
||||
let res = select!{
|
||||
req = req_fut => req,
|
||||
abrt = abort_future => abrt,
|
||||
};
|
||||
|
||||
if benchmark {
|
||||
env.log("benchmark finished successfully");
|
||||
env.remove_backup()?;
|
||||
return Ok(());
|
||||
}
|
||||
match (res, env.ensure_finished()) {
|
||||
(Ok(_), Ok(())) => {
|
||||
env.log("backup finished successfully");
|
||||
|
@ -1,18 +1,21 @@
|
||||
use anyhow::{bail, Error};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use ::serde::{Serialize};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::tools::digest_to_hex;
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
use crate::api2::types::Userid;
|
||||
use crate::backup::*;
|
||||
use crate::server::WorkerTask;
|
||||
use crate::server::formatter::*;
|
||||
use hyper::{Body, Response};
|
||||
|
||||
#[derive(Copy, Clone, Serialize)]
|
||||
struct UploadStatistic {
|
||||
count: u64,
|
||||
size: u64,
|
||||
@ -31,6 +34,19 @@ impl UploadStatistic {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Add for UploadStatistic {
|
||||
type Output = Self;
|
||||
|
||||
fn add(self, other: Self) -> Self {
|
||||
Self {
|
||||
count: self.count + other.count,
|
||||
size: self.size + other.size,
|
||||
compressed_size: self.compressed_size + other.compressed_size,
|
||||
duplicates: self.duplicates + other.duplicates,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct DynamicWriterState {
|
||||
name: String,
|
||||
index: DynamicIndexWriter,
|
||||
@ -57,6 +73,8 @@ struct SharedBackupState {
|
||||
dynamic_writers: HashMap<usize, DynamicWriterState>,
|
||||
fixed_writers: HashMap<usize, FixedWriterState>,
|
||||
known_chunks: HashMap<[u8;32], u32>,
|
||||
backup_size: u64, // sums up size of all files
|
||||
backup_stat: UploadStatistic,
|
||||
}
|
||||
|
||||
impl SharedBackupState {
|
||||
@ -82,7 +100,7 @@ impl SharedBackupState {
|
||||
pub struct BackupEnvironment {
|
||||
env_type: RpcEnvironmentType,
|
||||
result_attributes: Value,
|
||||
user: String,
|
||||
user: Userid,
|
||||
pub debug: bool,
|
||||
pub formatter: &'static OutputFormatter,
|
||||
pub worker: Arc<WorkerTask>,
|
||||
@ -95,7 +113,7 @@ pub struct BackupEnvironment {
|
||||
impl BackupEnvironment {
|
||||
pub fn new(
|
||||
env_type: RpcEnvironmentType,
|
||||
user: String,
|
||||
user: Userid,
|
||||
worker: Arc<WorkerTask>,
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: BackupDir,
|
||||
@ -108,6 +126,8 @@ impl BackupEnvironment {
|
||||
dynamic_writers: HashMap::new(),
|
||||
fixed_writers: HashMap::new(),
|
||||
known_chunks: HashMap::new(),
|
||||
backup_size: 0,
|
||||
backup_stat: UploadStatistic::new(),
|
||||
};
|
||||
|
||||
Self {
|
||||
@ -353,7 +373,6 @@ impl BackupEnvironment {
|
||||
|
||||
let expected_csum = data.index.close()?;
|
||||
|
||||
println!("server checksum {:?} client: {:?}", expected_csum, csum);
|
||||
if csum != expected_csum {
|
||||
bail!("dynamic writer '{}' close failed - got unexpected checksum", data.name);
|
||||
}
|
||||
@ -361,6 +380,8 @@ impl BackupEnvironment {
|
||||
self.log_upload_stat(&data.name, &csum, &uuid, size, chunk_count, &data.upload_stat);
|
||||
|
||||
state.file_counter += 1;
|
||||
state.backup_size += size;
|
||||
state.backup_stat = state.backup_stat + data.upload_stat;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -395,7 +416,6 @@ impl BackupEnvironment {
|
||||
let uuid = data.index.uuid;
|
||||
let expected_csum = data.index.close()?;
|
||||
|
||||
println!("server checksum: {:?} client: {:?} (incremental: {})", expected_csum, csum, data.incremental);
|
||||
if csum != expected_csum {
|
||||
bail!("fixed writer '{}' close failed - got unexpected checksum", data.name);
|
||||
}
|
||||
@ -403,6 +423,8 @@ impl BackupEnvironment {
|
||||
self.log_upload_stat(&data.name, &expected_csum, &uuid, size, chunk_count, &data.upload_stat);
|
||||
|
||||
state.file_counter += 1;
|
||||
state.backup_size += size;
|
||||
state.backup_stat = state.backup_stat + data.upload_stat;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -416,9 +438,8 @@ impl BackupEnvironment {
|
||||
let blob_len = data.len();
|
||||
let orig_len = data.len(); // fixme:
|
||||
|
||||
let blob = DataBlob::from_raw(data)?;
|
||||
// always verify CRC at server side
|
||||
blob.verify_crc()?;
|
||||
// always verify blob/CRC at server side
|
||||
let blob = DataBlob::load_from_reader(&mut &data[..])?;
|
||||
|
||||
let raw_data = blob.raw_data();
|
||||
replace_file(&path, raw_data, CreateOptions::new())?;
|
||||
@ -427,6 +448,8 @@ impl BackupEnvironment {
|
||||
|
||||
let mut state = self.state.lock().unwrap();
|
||||
state.file_counter += 1;
|
||||
state.backup_size += orig_len as u64;
|
||||
state.backup_stat.size += blob_len as u64;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -434,11 +457,11 @@ impl BackupEnvironment {
|
||||
/// Mark backup as finished
|
||||
pub fn finish_backup(&self) -> Result<(), Error> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
// test if all writer are correctly closed
|
||||
|
||||
state.ensure_unfinished()?;
|
||||
|
||||
if state.dynamic_writers.len() != 0 {
|
||||
// test if all writer are correctly closed
|
||||
if state.dynamic_writers.len() != 0 || state.fixed_writers.len() != 0 {
|
||||
bail!("found open index writer - unable to finish backup");
|
||||
}
|
||||
|
||||
@ -446,6 +469,28 @@ impl BackupEnvironment {
|
||||
bail!("backup does not contain valid files (file count == 0)");
|
||||
}
|
||||
|
||||
// check manifest
|
||||
let mut manifest = self.datastore.load_manifest_json(&self.backup_dir)
|
||||
.map_err(|err| format_err!("unable to load manifest blob - {}", err))?;
|
||||
|
||||
let stats = serde_json::to_value(state.backup_stat)?;
|
||||
|
||||
manifest["unprotected"]["chunk_upload_stats"] = stats;
|
||||
|
||||
self.datastore.store_manifest(&self.backup_dir, manifest)
|
||||
.map_err(|err| format_err!("unable to store manifest blob - {}", err))?;
|
||||
|
||||
if let Some(base) = &self.last_backup {
|
||||
let path = self.datastore.snapshot_path(&base.backup_dir);
|
||||
if !path.exists() {
|
||||
bail!(
|
||||
"base snapshot {} was removed during backup, cannot finish as chunks might be missing",
|
||||
base.backup_dir
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// marks the backup as successful
|
||||
state.finished = true;
|
||||
|
||||
Ok(())
|
||||
@ -480,7 +525,7 @@ impl BackupEnvironment {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
state.finished = true;
|
||||
|
||||
self.datastore.remove_backup_dir(&self.backup_dir)?;
|
||||
self.datastore.remove_backup_dir(&self.backup_dir, true)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -505,7 +550,7 @@ impl RpcEnvironment for BackupEnvironment {
|
||||
}
|
||||
|
||||
fn get_user(&self) -> Option<String> {
|
||||
Some(self.user.clone())
|
||||
Some(self.user.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -243,7 +243,7 @@ pub const API_METHOD_UPLOAD_BLOB: ApiMethod = ApiMethod::new(
|
||||
&sorted!([
|
||||
("file-name", false, &crate::api2::types::BACKUP_ARCHIVE_NAME_SCHEMA),
|
||||
("encoded-size", false, &IntegerSchema::new("Encoded blob size.")
|
||||
.minimum((std::mem::size_of::<DataBlobHeader>() as isize) +1)
|
||||
.minimum(std::mem::size_of::<DataBlobHeader>() as isize)
|
||||
.maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize))
|
||||
.schema()
|
||||
)
|
||||
|
@ -5,6 +5,7 @@ use serde_json::Value;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::backup::*;
|
||||
@ -99,7 +100,7 @@ pub fn list_datastores(
|
||||
/// Create new datastore config.
|
||||
pub fn create_datastore(param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let datastore: datastore::DataStoreConfig = serde_json::from_value(param.clone())?;
|
||||
|
||||
@ -253,7 +254,7 @@ pub fn update_datastore(
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
// pass/compare digest
|
||||
let (mut config, expected_digest) = datastore::config()?;
|
||||
@ -327,7 +328,7 @@ pub fn update_datastore(
|
||||
/// Remove a datastore configuration.
|
||||
pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = datastore::config()?;
|
||||
|
||||
|
@ -4,6 +4,7 @@ use ::serde::{Deserialize, Serialize};
|
||||
use base64;
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::remote;
|
||||
@ -60,7 +61,7 @@ pub fn list_remotes(
|
||||
schema: DNS_NAME_OR_IP_SCHEMA,
|
||||
},
|
||||
userid: {
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
password: {
|
||||
schema: remote::REMOTE_PASSWORD_SCHEMA,
|
||||
@ -78,7 +79,7 @@ pub fn list_remotes(
|
||||
/// Create new remote.
|
||||
pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let mut data = param.clone();
|
||||
data["password"] = Value::from(base64::encode(password.as_bytes()));
|
||||
@ -154,7 +155,7 @@ pub enum DeletableProperty {
|
||||
},
|
||||
userid: {
|
||||
optional: true,
|
||||
schema: PROXMOX_USER_ID_SCHEMA,
|
||||
type: Userid,
|
||||
},
|
||||
password: {
|
||||
optional: true,
|
||||
@ -187,14 +188,14 @@ pub fn update_remote(
|
||||
name: String,
|
||||
comment: Option<String>,
|
||||
host: Option<String>,
|
||||
userid: Option<String>,
|
||||
userid: Option<Userid>,
|
||||
password: Option<String>,
|
||||
fingerprint: Option<String>,
|
||||
delete: Option<Vec<DeletableProperty>>,
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = remote::config()?;
|
||||
|
||||
@ -255,7 +256,7 @@ pub fn update_remote(
|
||||
/// Remove a remote from the configuration file.
|
||||
pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = remote::config()?;
|
||||
|
||||
|
@ -3,6 +3,7 @@ use serde_json::Value;
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment};
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::sync::{self, SyncJobConfig};
|
||||
@ -68,7 +69,7 @@ pub fn list_sync_jobs(
|
||||
/// Create a new sync job.
|
||||
pub fn create_sync_job(param: Value) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
|
||||
|
||||
@ -82,6 +83,8 @@ pub fn create_sync_job(param: Value) -> Result<(), Error> {
|
||||
|
||||
sync::save_config(&config)?;
|
||||
|
||||
crate::config::jobstate::create_state_file("syncjob", &sync_job.id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -184,7 +187,7 @@ pub fn update_sync_job(
|
||||
digest: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
// pass/compare digest
|
||||
let (mut config, expected_digest) = sync::config()?;
|
||||
@ -247,7 +250,7 @@ pub fn update_sync_job(
|
||||
/// Remove a sync job configuration
|
||||
pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = sync::config()?;
|
||||
|
||||
@ -263,6 +266,8 @@ pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error>
|
||||
|
||||
sync::save_config(&config)?;
|
||||
|
||||
crate::config::jobstate::remove_state_file("syncjob", &id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -1,13 +1,19 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Error;
|
||||
use futures::*;
|
||||
use futures::stream::TryStreamExt;
|
||||
use hyper::{Body, Response, StatusCode, header};
|
||||
use proxmox::http_err;
|
||||
|
||||
use proxmox::http_bail;
|
||||
|
||||
pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, Error> {
|
||||
let file = tokio::fs::File::open(path.clone())
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path.clone(), err)))
|
||||
.await?;
|
||||
let file = match tokio::fs::File::open(path.clone()).await {
|
||||
Ok(file) => file,
|
||||
Err(ref err) if err.kind() == std::io::ErrorKind::NotFound => {
|
||||
http_bail!(NOT_FOUND, "open file {:?} failed - not found", path);
|
||||
}
|
||||
Err(err) => http_bail!(BAD_REQUEST, "open file {:?} failed: {}", path, err),
|
||||
};
|
||||
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
||||
|
312
src/api2/node.rs
@ -1,18 +1,308 @@
|
||||
use proxmox::api::router::{Router, SubdirMap};
|
||||
use proxmox::list_subdirs_api_method;
|
||||
use std::net::TcpListener;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
|
||||
pub mod tasks;
|
||||
mod time;
|
||||
pub mod network;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::future::{FutureExt, TryFutureExt};
|
||||
use hyper::body::Body;
|
||||
use hyper::http::request::Parts;
|
||||
use hyper::upgrade::Upgraded;
|
||||
use nix::fcntl::{fcntl, FcntlArg, FdFlag};
|
||||
use serde_json::{json, Value};
|
||||
use tokio::io::{AsyncBufReadExt, BufReader};
|
||||
|
||||
use proxmox::api::router::{Router, SubdirMap};
|
||||
use proxmox::api::{
|
||||
api, schema::*, ApiHandler, ApiMethod, ApiResponseFuture, Permission, RpcEnvironment,
|
||||
};
|
||||
use proxmox::list_subdirs_api_method;
|
||||
use proxmox::tools::websocket::WebSocket;
|
||||
use proxmox::{identity, sortable};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::PRIV_SYS_CONSOLE;
|
||||
use crate::server::WorkerTask;
|
||||
use crate::tools;
|
||||
use crate::tools::ticket::{self, Empty, Ticket};
|
||||
|
||||
pub mod disks;
|
||||
pub mod dns;
|
||||
mod syslog;
|
||||
pub mod network;
|
||||
pub mod tasks;
|
||||
|
||||
pub(crate) mod rrd;
|
||||
|
||||
mod apt;
|
||||
mod journal;
|
||||
mod services;
|
||||
mod status;
|
||||
pub(crate) mod rrd;
|
||||
pub mod disks;
|
||||
mod subscription;
|
||||
mod syslog;
|
||||
mod time;
|
||||
|
||||
pub const SHELL_CMD_SCHEMA: Schema = StringSchema::new("The command to run.")
|
||||
.format(&ApiStringFormat::Enum(&[
|
||||
EnumEntry::new("login", "Login"),
|
||||
EnumEntry::new("upgrade", "Upgrade"),
|
||||
]))
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
cmd: {
|
||||
schema: SHELL_CMD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
type: Object,
|
||||
description: "Object with the user, ticket, port and upid",
|
||||
properties: {
|
||||
user: {
|
||||
description: "",
|
||||
type: String,
|
||||
},
|
||||
ticket: {
|
||||
description: "",
|
||||
type: String,
|
||||
},
|
||||
port: {
|
||||
description: "",
|
||||
type: String,
|
||||
},
|
||||
upid: {
|
||||
description: "",
|
||||
type: String,
|
||||
},
|
||||
}
|
||||
},
|
||||
access: {
|
||||
description: "Restricted to users on realm 'pam'",
|
||||
permission: &Permission::Privilege(&["system"], PRIV_SYS_CONSOLE, false),
|
||||
}
|
||||
)]
|
||||
/// Call termproxy and return shell ticket
|
||||
async fn termproxy(
|
||||
cmd: Option<String>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let userid: Userid = rpcenv
|
||||
.get_user()
|
||||
.ok_or_else(|| format_err!("unknown user"))?
|
||||
.parse()?;
|
||||
|
||||
if userid.realm() != "pam" {
|
||||
bail!("only pam users can use the console");
|
||||
}
|
||||
|
||||
let path = "/system";
|
||||
|
||||
// use port 0 and let the kernel decide which port is free
|
||||
let listener = TcpListener::bind("localhost:0")?;
|
||||
let port = listener.local_addr()?.port();
|
||||
|
||||
let ticket = Ticket::new(ticket::TERM_PREFIX, &Empty)?
|
||||
.sign(
|
||||
crate::auth_helpers::private_auth_key(),
|
||||
Some(&ticket::term_aad(&userid, &path, port)),
|
||||
)?;
|
||||
|
||||
let mut command = Vec::new();
|
||||
match cmd.as_ref().map(|x| x.as_str()) {
|
||||
Some("login") | None => {
|
||||
command.push("login");
|
||||
if userid == "root@pam" {
|
||||
command.push("-f");
|
||||
command.push("root");
|
||||
}
|
||||
}
|
||||
Some("upgrade") => {
|
||||
if userid != "root@pam" {
|
||||
bail!("only root@pam can upgrade");
|
||||
}
|
||||
// TODO: add nicer/safer wrapper like in PVE instead
|
||||
command.push("sh");
|
||||
command.push("-c");
|
||||
command.push("apt full-upgrade; bash -l");
|
||||
}
|
||||
_ => bail!("invalid command"),
|
||||
};
|
||||
|
||||
let username = userid.name().to_owned();
|
||||
let upid = WorkerTask::spawn(
|
||||
"termproxy",
|
||||
None,
|
||||
userid,
|
||||
false,
|
||||
move |worker| async move {
|
||||
// move inside the worker so that it survives and does not close the port
|
||||
// remove CLOEXEC from listenere so that we can reuse it in termproxy
|
||||
let fd = listener.as_raw_fd();
|
||||
let mut flags = match fcntl(fd, FcntlArg::F_GETFD) {
|
||||
Ok(bits) => FdFlag::from_bits_truncate(bits),
|
||||
Err(err) => bail!("could not get fd: {}", err),
|
||||
};
|
||||
flags.remove(FdFlag::FD_CLOEXEC);
|
||||
if let Err(err) = fcntl(fd, FcntlArg::F_SETFD(flags)) {
|
||||
bail!("could not set fd: {}", err);
|
||||
}
|
||||
|
||||
let mut arguments: Vec<&str> = Vec::new();
|
||||
let fd_string = fd.to_string();
|
||||
arguments.push(&fd_string);
|
||||
arguments.extend_from_slice(&[
|
||||
"--path",
|
||||
&path,
|
||||
"--perm",
|
||||
"Sys.Console",
|
||||
"--authport",
|
||||
"82",
|
||||
"--port-as-fd",
|
||||
"--",
|
||||
]);
|
||||
arguments.extend_from_slice(&command);
|
||||
|
||||
let mut cmd = tokio::process::Command::new("/usr/bin/termproxy");
|
||||
|
||||
cmd.args(&arguments)
|
||||
.stdout(std::process::Stdio::piped())
|
||||
.stderr(std::process::Stdio::piped());
|
||||
|
||||
let mut child = cmd.spawn().expect("error executing termproxy");
|
||||
|
||||
let stdout = child.stdout.take().expect("no child stdout handle");
|
||||
let stderr = child.stderr.take().expect("no child stderr handle");
|
||||
|
||||
let worker_stdout = worker.clone();
|
||||
let stdout_fut = async move {
|
||||
let mut reader = BufReader::new(stdout).lines();
|
||||
while let Some(line) = reader.next_line().await? {
|
||||
worker_stdout.log(line);
|
||||
}
|
||||
Ok::<(), Error>(())
|
||||
};
|
||||
|
||||
let worker_stderr = worker.clone();
|
||||
let stderr_fut = async move {
|
||||
let mut reader = BufReader::new(stderr).lines();
|
||||
while let Some(line) = reader.next_line().await? {
|
||||
worker_stderr.warn(line);
|
||||
}
|
||||
Ok::<(), Error>(())
|
||||
};
|
||||
|
||||
let mut needs_kill = false;
|
||||
let res = tokio::select!{
|
||||
res = &mut child => {
|
||||
let exit_code = res?;
|
||||
if !exit_code.success() {
|
||||
match exit_code.code() {
|
||||
Some(code) => bail!("termproxy exited with {}", code),
|
||||
None => bail!("termproxy exited by signal"),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
},
|
||||
res = stdout_fut => res,
|
||||
res = stderr_fut => res,
|
||||
res = worker.abort_future() => {
|
||||
needs_kill = true;
|
||||
res.map_err(Error::from)
|
||||
}
|
||||
};
|
||||
|
||||
if needs_kill {
|
||||
if res.is_ok() {
|
||||
child.kill()?;
|
||||
child.await?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Err(err) = child.kill() {
|
||||
worker.warn(format!("error killing termproxy: {}", err));
|
||||
} else if let Err(err) = child.await {
|
||||
worker.warn(format!("error awaiting termproxy: {}", err));
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
},
|
||||
)?;
|
||||
|
||||
// FIXME: We're returning the user NAME only?
|
||||
Ok(json!({
|
||||
"user": username,
|
||||
"ticket": ticket,
|
||||
"port": port,
|
||||
"upid": upid,
|
||||
}))
|
||||
}
|
||||
|
||||
#[sortable]
|
||||
pub const API_METHOD_WEBSOCKET: ApiMethod = ApiMethod::new(
|
||||
&ApiHandler::AsyncHttp(&upgrade_to_websocket),
|
||||
&ObjectSchema::new(
|
||||
"Upgraded to websocket",
|
||||
&sorted!([
|
||||
("node", false, &NODE_SCHEMA),
|
||||
(
|
||||
"vncticket",
|
||||
false,
|
||||
&StringSchema::new("Terminal ticket").schema()
|
||||
),
|
||||
("port", false, &IntegerSchema::new("Terminal port").schema()),
|
||||
]),
|
||||
),
|
||||
)
|
||||
.access(
|
||||
Some("The user needs Sys.Console on /system."),
|
||||
&Permission::Privilege(&["system"], PRIV_SYS_CONSOLE, false),
|
||||
);
|
||||
|
||||
fn upgrade_to_websocket(
|
||||
parts: Parts,
|
||||
req_body: Body,
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: Box<dyn RpcEnvironment>,
|
||||
) -> ApiResponseFuture {
|
||||
async move {
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let ticket = tools::required_string_param(¶m, "vncticket")?;
|
||||
let port: u16 = tools::required_integer_param(¶m, "port")? as u16;
|
||||
|
||||
// will be checked again by termproxy
|
||||
Ticket::<Empty>::parse(ticket)?
|
||||
.verify(
|
||||
crate::auth_helpers::public_auth_key(),
|
||||
ticket::TERM_PREFIX,
|
||||
Some(&ticket::term_aad(&userid, "/system", port)),
|
||||
)?;
|
||||
|
||||
let (ws, response) = WebSocket::new(parts.headers)?;
|
||||
|
||||
crate::server::spawn_internal_task(async move {
|
||||
let conn: Upgraded = match req_body.on_upgrade().map_err(Error::from).await {
|
||||
Ok(upgraded) => upgraded,
|
||||
_ => bail!("error"),
|
||||
};
|
||||
|
||||
let local = tokio::net::TcpStream::connect(format!("localhost:{}", port)).await?;
|
||||
ws.serve_connection(conn, local).await
|
||||
});
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
pub const SUBDIRS: SubdirMap = &[
|
||||
("apt", &apt::ROUTER),
|
||||
("disks", &disks::ROUTER),
|
||||
("dns", &dns::ROUTER),
|
||||
("journal", &journal::ROUTER),
|
||||
@ -20,9 +310,15 @@ pub const SUBDIRS: SubdirMap = &[
|
||||
("rrd", &rrd::ROUTER),
|
||||
("services", &services::ROUTER),
|
||||
("status", &status::ROUTER),
|
||||
("subscription", &subscription::ROUTER),
|
||||
("syslog", &syslog::ROUTER),
|
||||
("tasks", &tasks::ROUTER),
|
||||
("termproxy", &Router::new().post(&API_METHOD_TERMPROXY)),
|
||||
("time", &time::ROUTER),
|
||||
(
|
||||
"vncwebsocket",
|
||||
&Router::new().upgrade(&API_METHOD_WEBSOCKET),
|
||||
),
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
|
268
src/api2/node/apt.rs
Normal file
@ -0,0 +1,268 @@
|
||||
use apt_pkg_native::Cache;
|
||||
use anyhow::{Error, bail};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::{list_subdirs_api_method, const_regex};
|
||||
use proxmox::api::{api, RpcEnvironment, RpcEnvironmentType, Permission};
|
||||
use proxmox::api::router::{Router, SubdirMap};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::api2::types::{APTUpdateInfo, NODE_SCHEMA, Userid, UPID_SCHEMA};
|
||||
|
||||
const_regex! {
|
||||
VERSION_EPOCH_REGEX = r"^\d+:";
|
||||
FILENAME_EXTRACT_REGEX = r"^.*/.*?_(.*)_Packages$";
|
||||
}
|
||||
|
||||
// FIXME: Replace with call to 'apt changelog <pkg> --print-uris'. Currently
|
||||
// not possible as our packages do not have a URI set in their Release file
|
||||
fn get_changelog_url(
|
||||
package: &str,
|
||||
filename: &str,
|
||||
source_pkg: &str,
|
||||
version: &str,
|
||||
source_version: &str,
|
||||
origin: &str,
|
||||
component: &str,
|
||||
) -> Result<String, Error> {
|
||||
if origin == "" {
|
||||
bail!("no origin available for package {}", package);
|
||||
}
|
||||
|
||||
if origin == "Debian" {
|
||||
let source_version = (VERSION_EPOCH_REGEX.regex_obj)().replace_all(source_version, "");
|
||||
|
||||
let prefix = if source_pkg.starts_with("lib") {
|
||||
source_pkg.get(0..4)
|
||||
} else {
|
||||
source_pkg.get(0..1)
|
||||
};
|
||||
|
||||
let prefix = match prefix {
|
||||
Some(p) => p,
|
||||
None => bail!("cannot get starting characters of package name '{}'", package)
|
||||
};
|
||||
|
||||
// note: security updates seem to not always upload a changelog for
|
||||
// their package version, so this only works *most* of the time
|
||||
return Ok(format!("https://metadata.ftp-master.debian.org/changelogs/main/{}/{}/{}_{}_changelog",
|
||||
prefix, source_pkg, source_pkg, source_version));
|
||||
|
||||
} else if origin == "Proxmox" {
|
||||
let version = (VERSION_EPOCH_REGEX.regex_obj)().replace_all(version, "");
|
||||
|
||||
let base = match (FILENAME_EXTRACT_REGEX.regex_obj)().captures(filename) {
|
||||
Some(captures) => {
|
||||
let base_capture = captures.get(1);
|
||||
match base_capture {
|
||||
Some(base_underscore) => base_underscore.as_str().replace("_", "/"),
|
||||
None => bail!("incompatible filename, cannot find regex group")
|
||||
}
|
||||
},
|
||||
None => bail!("incompatible filename, doesn't match regex")
|
||||
};
|
||||
|
||||
return Ok(format!("http://download.proxmox.com/{}/{}_{}.changelog",
|
||||
base, package, version));
|
||||
}
|
||||
|
||||
bail!("unknown origin ({}) or component ({})", origin, component)
|
||||
}
|
||||
|
||||
fn list_installed_apt_packages<F: Fn(&str, &str, &str) -> bool>(filter: F)
|
||||
-> Vec<APTUpdateInfo> {
|
||||
|
||||
let mut ret = Vec::new();
|
||||
|
||||
// note: this is not an 'apt update', it just re-reads the cache from disk
|
||||
let mut cache = Cache::get_singleton();
|
||||
cache.reload();
|
||||
|
||||
let mut cache_iter = cache.iter();
|
||||
|
||||
loop {
|
||||
let view = match cache_iter.next() {
|
||||
Some(view) => view,
|
||||
None => break
|
||||
};
|
||||
|
||||
let current_version = match view.current_version() {
|
||||
Some(vers) => vers,
|
||||
None => continue
|
||||
};
|
||||
let candidate_version = match view.candidate_version() {
|
||||
Some(vers) => vers,
|
||||
// if there's no candidate (i.e. no update) get info of currently
|
||||
// installed version instead
|
||||
None => current_version.clone()
|
||||
};
|
||||
|
||||
let package = view.name();
|
||||
if filter(&package, ¤t_version, &candidate_version) {
|
||||
let mut origin_res = "unknown".to_owned();
|
||||
let mut section_res = "unknown".to_owned();
|
||||
let mut priority_res = "unknown".to_owned();
|
||||
let mut change_log_url = "".to_owned();
|
||||
let mut short_desc = package.clone();
|
||||
let mut long_desc = "".to_owned();
|
||||
|
||||
// get additional information via nested APT 'iterators'
|
||||
let mut view_iter = view.versions();
|
||||
while let Some(ver) = view_iter.next() {
|
||||
if ver.version() == candidate_version {
|
||||
if let Some(section) = ver.section() {
|
||||
section_res = section;
|
||||
}
|
||||
|
||||
if let Some(prio) = ver.priority_type() {
|
||||
priority_res = prio;
|
||||
}
|
||||
|
||||
// assume every package has only one origin file (not
|
||||
// origin, but origin *file*, for some reason those seem to
|
||||
// be different concepts in APT)
|
||||
let mut origin_iter = ver.origin_iter();
|
||||
let origin = origin_iter.next();
|
||||
if let Some(origin) = origin {
|
||||
|
||||
if let Some(sd) = origin.short_desc() {
|
||||
short_desc = sd;
|
||||
}
|
||||
|
||||
if let Some(ld) = origin.long_desc() {
|
||||
long_desc = ld;
|
||||
}
|
||||
|
||||
// the package files appear in priority order, meaning
|
||||
// the one for the candidate version is first
|
||||
let mut pkg_iter = origin.file();
|
||||
let pkg_file = pkg_iter.next();
|
||||
if let Some(pkg_file) = pkg_file {
|
||||
if let Some(origin_name) = pkg_file.origin() {
|
||||
origin_res = origin_name;
|
||||
}
|
||||
|
||||
let filename = pkg_file.file_name();
|
||||
let source_pkg = ver.source_package();
|
||||
let source_ver = ver.source_version();
|
||||
let component = pkg_file.component();
|
||||
|
||||
// build changelog URL from gathered information
|
||||
// ignore errors, use empty changelog instead
|
||||
let url = get_changelog_url(&package, &filename, &source_pkg,
|
||||
&candidate_version, &source_ver, &origin_res, &component);
|
||||
if let Ok(url) = url {
|
||||
change_log_url = url;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let info = APTUpdateInfo {
|
||||
package,
|
||||
title: short_desc,
|
||||
arch: view.arch(),
|
||||
description: long_desc,
|
||||
change_log_url,
|
||||
origin: origin_res,
|
||||
version: candidate_version,
|
||||
old_version: current_version,
|
||||
priority: priority_res,
|
||||
section: section_res,
|
||||
};
|
||||
ret.push(info);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "A list of packages with available updates.",
|
||||
type: Array,
|
||||
items: { type: APTUpdateInfo },
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// List available APT updates
|
||||
fn apt_update_available(_param: Value) -> Result<Value, Error> {
|
||||
let ret = list_installed_apt_packages(|_pkg, cur_ver, can_ver| cur_ver != can_ver);
|
||||
Ok(json!(ret))
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
quiet: {
|
||||
description: "Only produces output suitable for logging, omitting progress indicators.",
|
||||
type: bool,
|
||||
default: false,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
schema: UPID_SCHEMA,
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&[], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Update the APT database
|
||||
pub fn apt_update_database(
|
||||
quiet: Option<bool>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
|
||||
|
||||
let upid_str = WorkerTask::new_thread("aptupdate", None, userid, to_stdout, move |worker| {
|
||||
if !quiet { worker.log("starting apt-get update") }
|
||||
|
||||
// TODO: set proxy /etc/apt/apt.conf.d/76pbsproxy like PVE
|
||||
|
||||
let mut command = std::process::Command::new("apt-get");
|
||||
command.arg("update");
|
||||
|
||||
let output = crate::tools::run_command(command, None)?;
|
||||
if !quiet { worker.log(output) }
|
||||
|
||||
// TODO: add mail notify for new updates like PVE
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
const SUBDIRS: SubdirMap = &[
|
||||
("update", &Router::new()
|
||||
.get(&API_METHOD_APT_UPDATE_AVAILABLE)
|
||||
.post(&API_METHOD_APT_UPDATE_DATABASE)
|
||||
),
|
||||
];
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||
.subdirs(SUBDIRS);
|
@ -13,7 +13,7 @@ use crate::tools::disks::{
|
||||
};
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::{UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
|
||||
use crate::api2::types::{Userid, UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
|
||||
|
||||
pub mod directory;
|
||||
pub mod zfs;
|
||||
@ -140,7 +140,7 @@ pub fn initialize_disk(
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
let info = get_disk_usage_info(&disk, true)?;
|
||||
|
||||
@ -149,7 +149,7 @@ pub fn initialize_disk(
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"diskinit", Some(disk.clone()), &username.clone(), to_stdout, move |worker|
|
||||
"diskinit", Some(disk.clone()), userid, to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("initialize disk {}", disk));
|
||||
|
||||
|
@ -16,6 +16,7 @@ use crate::tools::systemd::{self, types::*};
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::datastore::DataStoreConfig;
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
@ -133,7 +134,7 @@ pub fn create_datastore_disk(
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
let info = get_disk_usage_info(&disk, true)?;
|
||||
|
||||
@ -142,7 +143,7 @@ pub fn create_datastore_disk(
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"dircreate", Some(name.clone()), &username.clone(), to_stdout, move |worker|
|
||||
"dircreate", Some(name.clone()), userid, to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("create datastore '{}' on disk {}", name, disk));
|
||||
|
||||
@ -175,9 +176,69 @@ pub fn create_datastore_disk(
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
#[api(
|
||||
protected: true,
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
}
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
|
||||
},
|
||||
)]
|
||||
/// Remove a Filesystem mounted under '/mnt/datastore/<name>'.".
|
||||
pub fn delete_datastore_disk(name: String) -> Result<(), Error> {
|
||||
|
||||
let path = format!("/mnt/datastore/{}", name);
|
||||
// path of datastore cannot be changed
|
||||
let (config, _) = crate::config::datastore::config()?;
|
||||
let datastores: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
|
||||
let conflicting_datastore: Option<DataStoreConfig> = datastores.into_iter()
|
||||
.filter(|ds| ds.path == path)
|
||||
.next();
|
||||
|
||||
if let Some(conflicting_datastore) = conflicting_datastore {
|
||||
bail!("Can't remove '{}' since it's required by datastore '{}'",
|
||||
conflicting_datastore.path, conflicting_datastore.name);
|
||||
}
|
||||
|
||||
// disable systemd mount-unit
|
||||
let mut mount_unit_name = systemd::escape_unit(&path, true);
|
||||
mount_unit_name.push_str(".mount");
|
||||
systemd::disable_unit(&mount_unit_name)?;
|
||||
|
||||
// delete .mount-file
|
||||
let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
|
||||
let full_path = std::path::Path::new(&mount_unit_path);
|
||||
log::info!("removing systemd mount unit {:?}", full_path);
|
||||
std::fs::remove_file(&full_path)?;
|
||||
|
||||
// try to unmount, if that fails tell the user to reboot or unmount manually
|
||||
let mut command = std::process::Command::new("umount");
|
||||
command.arg(&path);
|
||||
match crate::tools::run_command(command, None) {
|
||||
Err(_) => bail!(
|
||||
"Could not umount '{}' since it is busy. It will stay mounted \
|
||||
until the next reboot or until unmounted manually!",
|
||||
path
|
||||
),
|
||||
Ok(_) => Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
const ITEM_ROUTER: Router = Router::new()
|
||||
.delete(&API_METHOD_DELETE_DATASTORE_DISK);
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_LIST_DATASTORE_MOUNTS)
|
||||
.post(&API_METHOD_CREATE_DATASTORE_DISK);
|
||||
.post(&API_METHOD_CREATE_DATASTORE_DISK)
|
||||
.match_all("name", &ITEM_ROUTER);
|
||||
|
||||
|
||||
fn create_datastore_mount_unit(
|
||||
|
@ -41,6 +41,9 @@ pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new(
|
||||
.default(12)
|
||||
.schema();
|
||||
|
||||
pub const ZPOOL_NAME_SCHEMA: Schema =StringSchema::new("ZFS Pool Name")
|
||||
.format(&ApiStringFormat::Pattern(&ZPOOL_NAME_REGEX))
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
default: "On",
|
||||
@ -157,7 +160,7 @@ pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
name: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
schema: ZPOOL_NAME_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -251,7 +254,7 @@ pub fn create_zpool(
|
||||
|
||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
let add_datastore = add_datastore.unwrap_or(false);
|
||||
|
||||
@ -311,7 +314,7 @@ pub fn create_zpool(
|
||||
}
|
||||
|
||||
let upid_str = WorkerTask::new_thread(
|
||||
"zfscreate", Some(name.clone()), &username.clone(), to_stdout, move |worker|
|
||||
"zfscreate", Some(name.clone()), userid, to_stdout, move |worker|
|
||||
{
|
||||
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));
|
||||
|
||||
|
@ -4,6 +4,7 @@ use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::schema::parse_property_string;
|
||||
use proxmox::tools::fs::open_file_locked;
|
||||
|
||||
use crate::config::network::{self, NetworkConfig};
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
@ -230,7 +231,7 @@ pub fn create_interface(
|
||||
let interface_type = crate::tools::required_string_param(¶m, "type")?;
|
||||
let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.into())?;
|
||||
|
||||
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, _digest) = network::config()?;
|
||||
|
||||
@ -463,7 +464,7 @@ pub fn update_interface(
|
||||
param: Value,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = network::config()?;
|
||||
|
||||
@ -586,7 +587,7 @@ pub fn update_interface(
|
||||
/// Remove network interface configuration.
|
||||
pub fn delete_interface(iface: String, digest: Option<String>) -> Result<(), Error> {
|
||||
|
||||
let _lock = crate::tools::open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
let _lock = open_file_locked(network::NETWORK_LOCKFILE, std::time::Duration::new(10, 0))?;
|
||||
|
||||
let (mut config, expected_digest) = network::config()?;
|
||||
|
||||
@ -624,9 +625,9 @@ pub async fn reload_network_config(
|
||||
|
||||
network::assert_ifupdown2_installed()?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), &username.clone(), true, |_worker| async {
|
||||
let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), userid, true, |_worker| async {
|
||||
|
||||
let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME);
|
||||
|
||||
|
@ -4,12 +4,13 @@ use anyhow::{bail, Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::{sortable, identity, list_subdirs_api_method};
|
||||
use proxmox::api::{api, Router, Permission};
|
||||
use proxmox::api::{api, Router, Permission, RpcEnvironment};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::api::schema::*;
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
static SERVICE_NAME_LIST: [&str; 7] = [
|
||||
"proxmox-backup",
|
||||
@ -181,30 +182,43 @@ fn get_service_state(
|
||||
Ok(json_service_state(&service, status))
|
||||
}
|
||||
|
||||
fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
|
||||
fn run_service_command(service: &str, cmd: &str, userid: Userid) -> Result<Value, Error> {
|
||||
|
||||
// fixme: run background worker (fork_worker) ???
|
||||
let workerid = format!("srv{}", &cmd);
|
||||
|
||||
match cmd {
|
||||
"start"|"stop"|"restart"|"reload" => {},
|
||||
let cmd = match cmd {
|
||||
"start"|"stop"|"restart"=> cmd.to_string(),
|
||||
"reload" => "try-reload-or-restart".to_string(), // some services do not implement reload
|
||||
_ => bail!("unknown service command '{}'", cmd),
|
||||
}
|
||||
};
|
||||
let service = service.to_string();
|
||||
|
||||
if service == "proxmox-backup" && cmd != "restart" {
|
||||
bail!("invalid service cmd '{} {}'", service, cmd);
|
||||
}
|
||||
let upid = WorkerTask::new_thread(
|
||||
&workerid,
|
||||
Some(service.clone()),
|
||||
userid,
|
||||
false,
|
||||
move |_worker| {
|
||||
|
||||
let real_service_name = real_service_name(service);
|
||||
if service == "proxmox-backup" && cmd == "stop" {
|
||||
bail!("invalid service cmd '{} {}' cannot stop essential service!", service, cmd);
|
||||
}
|
||||
|
||||
let status = Command::new("systemctl")
|
||||
.args(&[cmd, real_service_name])
|
||||
.status()?;
|
||||
let real_service_name = real_service_name(&service);
|
||||
|
||||
if !status.success() {
|
||||
bail!("systemctl {} failed with {}", cmd, status);
|
||||
}
|
||||
let status = Command::new("systemctl")
|
||||
.args(&[&cmd, real_service_name])
|
||||
.status()?;
|
||||
|
||||
Ok(Value::Null)
|
||||
if !status.success() {
|
||||
bail!("systemctl {} failed with {}", cmd, status);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
)?;
|
||||
|
||||
Ok(upid.into())
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -227,11 +241,14 @@ fn run_service_command(service: &str, cmd: &str) -> Result<Value, Error> {
|
||||
fn start_service(
|
||||
service: String,
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
log::info!("starting service {}", service);
|
||||
|
||||
run_service_command(&service, "start")
|
||||
run_service_command(&service, "start", userid)
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -254,11 +271,14 @@ fn start_service(
|
||||
fn stop_service(
|
||||
service: String,
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
log::info!("stopping service {}", service);
|
||||
|
||||
run_service_command(&service, "stop")
|
||||
run_service_command(&service, "stop", userid)
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -281,15 +301,18 @@ fn stop_service(
|
||||
fn restart_service(
|
||||
service: String,
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
log::info!("re-starting service {}", service);
|
||||
|
||||
if &service == "proxmox-backup-proxy" {
|
||||
// special case, avoid aborting running tasks
|
||||
run_service_command(&service, "reload")
|
||||
run_service_command(&service, "reload", userid)
|
||||
} else {
|
||||
run_service_command(&service, "restart")
|
||||
run_service_command(&service, "restart", userid)
|
||||
}
|
||||
}
|
||||
|
||||
@ -313,11 +336,14 @@ fn restart_service(
|
||||
fn reload_service(
|
||||
service: String,
|
||||
_param: Value,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
log::info!("reloading service {}", service);
|
||||
|
||||
run_service_command(&service, "reload")
|
||||
run_service_command(&service, "reload", userid)
|
||||
}
|
||||
|
||||
|
||||
|
@ -10,6 +10,7 @@ use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||
|
||||
use crate::api2::types::*;
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
|
||||
use crate::tools::cert::CertInfo;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -46,14 +47,24 @@ use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
|
||||
description: "Total CPU usage since last query.",
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
info: {
|
||||
type: Object,
|
||||
description: "contains node information",
|
||||
properties: {
|
||||
fingerprint: {
|
||||
description: "The SSL Fingerprint",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_AUDIT, false),
|
||||
},
|
||||
)]
|
||||
/// Read node memory, CPU and (root) disk usage
|
||||
fn get_usage(
|
||||
fn get_status(
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
@ -63,6 +74,10 @@ fn get_usage(
|
||||
let kstat: procfs::ProcFsStat = procfs::read_proc_stat()?;
|
||||
let disk_usage = crate::tools::disks::disk_usage(Path::new("/"))?;
|
||||
|
||||
// get fingerprint
|
||||
let cert = CertInfo::new()?;
|
||||
let fp = cert.fingerprint()?;
|
||||
|
||||
Ok(json!({
|
||||
"memory": {
|
||||
"total": meminfo.memtotal,
|
||||
@ -74,7 +89,10 @@ fn get_usage(
|
||||
"total": disk_usage.total,
|
||||
"used": disk_usage.used,
|
||||
"free": disk_usage.avail,
|
||||
}
|
||||
},
|
||||
"info": {
|
||||
"fingerprint": fp,
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
@ -122,5 +140,5 @@ fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> {
|
||||
}
|
||||
|
||||
pub const ROUTER: Router = Router::new()
|
||||
.get(&API_METHOD_GET_USAGE)
|
||||
.get(&API_METHOD_GET_STATUS)
|
||||
.post(&API_METHOD_REBOOT_OR_SHUTDOWN);
|
||||
|
@ -5,8 +5,16 @@ use proxmox::api::{api, Router, Permission};
|
||||
|
||||
use crate::tools;
|
||||
use crate::config::acl::PRIV_SYS_AUDIT;
|
||||
use crate::api2::types::NODE_SCHEMA;
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
node: {
|
||||
schema: NODE_SCHEMA,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
description: "Subscription status.",
|
||||
properties: {
|
@ -4,13 +4,13 @@ use std::io::{BufRead, BufReader};
|
||||
use anyhow::{Error};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission, UserInformation};
|
||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||
use proxmox::api::router::SubdirMap;
|
||||
use proxmox::{identity, list_subdirs_api_method, sortable};
|
||||
|
||||
use crate::tools;
|
||||
use crate::api2::types::*;
|
||||
use crate::server::{self, UPID};
|
||||
use crate::server::{self, UPID, TaskState};
|
||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||
use crate::config::cached_user_info::CachedUserInfo;
|
||||
|
||||
@ -84,11 +84,11 @@ async fn get_task_status(
|
||||
|
||||
let upid = extract_upid(¶m)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
if username != upid.username {
|
||||
if userid != upid.userid {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||
}
|
||||
|
||||
let mut result = json!({
|
||||
@ -99,15 +99,15 @@ async fn get_task_status(
|
||||
"starttime": upid.starttime,
|
||||
"type": upid.worker_type,
|
||||
"id": upid.worker_id,
|
||||
"user": upid.username,
|
||||
"user": upid.userid,
|
||||
});
|
||||
|
||||
if crate::server::worker_is_active(&upid).await? {
|
||||
result["status"] = Value::from("running");
|
||||
} else {
|
||||
let exitstatus = crate::server::upid_read_status(&upid).unwrap_or(String::from("unknown"));
|
||||
let exitstatus = crate::server::upid_read_status(&upid).unwrap_or(TaskState::Unknown { endtime: 0 });
|
||||
result["status"] = Value::from("stopped");
|
||||
result["exitstatus"] = Value::from(exitstatus);
|
||||
result["exitstatus"] = Value::from(exitstatus.to_string());
|
||||
};
|
||||
|
||||
Ok(result)
|
||||
@ -161,11 +161,11 @@ async fn read_task_log(
|
||||
|
||||
let upid = extract_upid(¶m)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
if username != upid.username {
|
||||
if userid != upid.userid {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
|
||||
}
|
||||
|
||||
let test_status = param["test-status"].as_bool().unwrap_or(false);
|
||||
@ -234,11 +234,11 @@ fn stop_task(
|
||||
|
||||
let upid = extract_upid(¶m)?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
|
||||
if username != upid.username {
|
||||
if userid != upid.userid {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
|
||||
user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
|
||||
}
|
||||
|
||||
server::abort_worker_async(upid);
|
||||
@ -281,7 +281,7 @@ fn stop_task(
|
||||
default: false,
|
||||
},
|
||||
userfilter: {
|
||||
optional:true,
|
||||
optional: true,
|
||||
type: String,
|
||||
description: "Only list tasks from this user.",
|
||||
},
|
||||
@ -307,9 +307,9 @@ pub fn list_tasks(
|
||||
mut rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TaskListItem>, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["system", "tasks"]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
|
||||
|
||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||
|
||||
@ -324,11 +324,11 @@ pub fn list_tasks(
|
||||
let mut count = 0;
|
||||
|
||||
for info in list {
|
||||
if !list_all && info.upid.username != username { continue; }
|
||||
if !list_all && info.upid.userid != userid { continue; }
|
||||
|
||||
|
||||
if let Some(username) = userfilter {
|
||||
if !info.upid.username.contains(username) { continue; }
|
||||
if let Some(userid) = userfilter {
|
||||
if !info.upid.userid.as_str().contains(userid) { continue; }
|
||||
}
|
||||
|
||||
if let Some(store) = store {
|
||||
@ -352,8 +352,9 @@ pub fn list_tasks(
|
||||
|
||||
if let Some(ref state) = info.state {
|
||||
if running { continue; }
|
||||
if errors && state.1 == "OK" {
|
||||
continue;
|
||||
match state {
|
||||
crate::server::TaskState::OK { .. } if errors => continue,
|
||||
_ => {},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
use std::sync::{Arc};
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::{select, future::FutureExt};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::{ApiMethod, Router, RpcEnvironment, Permission};
|
||||
@ -12,13 +13,15 @@ use crate::client::{HttpClient, HttpClientOptions, BackupRepository, pull::pull_
|
||||
use crate::api2::types::*;
|
||||
use crate::config::{
|
||||
remote,
|
||||
sync::SyncJobConfig,
|
||||
jobstate::Job,
|
||||
acl::{PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ},
|
||||
cached_user_info::CachedUserInfo,
|
||||
};
|
||||
|
||||
|
||||
pub fn check_pull_privs(
|
||||
username: &str,
|
||||
userid: &Userid,
|
||||
store: &str,
|
||||
remote: &str,
|
||||
remote_store: &str,
|
||||
@ -27,11 +30,11 @@ pub fn check_pull_privs(
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
user_info.check_privs(username, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
|
||||
user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
user_info.check_privs(userid, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
|
||||
|
||||
if delete {
|
||||
user_info.check_privs(username, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
|
||||
user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -62,6 +65,68 @@ pub async fn get_pull_parameters(
|
||||
Ok((client, src_repo, tgt_store))
|
||||
}
|
||||
|
||||
pub fn do_sync_job(
|
||||
mut job: Job,
|
||||
sync_job: SyncJobConfig,
|
||||
userid: &Userid,
|
||||
schedule: Option<String>,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let job_id = job.jobname().to_string();
|
||||
let worker_type = job.jobtype().to_string();
|
||||
|
||||
let upid_str = WorkerTask::spawn(
|
||||
&worker_type,
|
||||
Some(job.jobname().to_string()),
|
||||
userid.clone(),
|
||||
false,
|
||||
move |worker| async move {
|
||||
|
||||
job.start(&worker.upid().to_string())?;
|
||||
|
||||
let worker2 = worker.clone();
|
||||
|
||||
let worker_future = async move {
|
||||
|
||||
let delete = sync_job.remove_vanished.unwrap_or(true);
|
||||
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
|
||||
|
||||
worker.log(format!("Starting datastore sync job '{}'", job_id));
|
||||
if let Some(event_str) = schedule {
|
||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||
}
|
||||
worker.log(format!("Sync datastore '{}' from '{}/{}'",
|
||||
sync_job.store, sync_job.remote, sync_job.remote_store));
|
||||
|
||||
crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, Userid::backup_userid().clone()).await?;
|
||||
|
||||
worker.log(format!("sync job '{}' end", &job_id));
|
||||
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let mut abort_future = worker2.abort_future().map(|_| Err(format_err!("sync aborted")));
|
||||
|
||||
let res = select!{
|
||||
worker = worker_future.fuse() => worker,
|
||||
abort = abort_future => abort,
|
||||
};
|
||||
|
||||
let status = worker2.create_state(&res);
|
||||
|
||||
match job.finish(status) {
|
||||
Ok(_) => {},
|
||||
Err(err) => {
|
||||
eprintln!("could not finish job state: {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
})?;
|
||||
|
||||
Ok(upid_str)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -99,19 +164,19 @@ async fn pull (
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let delete = remove_vanished.unwrap_or(true);
|
||||
|
||||
check_pull_privs(&username, &store, &remote, &remote_store, delete)?;
|
||||
check_pull_privs(&userid, &store, &remote, &remote_store, delete)?;
|
||||
|
||||
let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?;
|
||||
|
||||
// fixme: set to_stdout to false?
|
||||
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), &username.clone(), true, move |worker| async move {
|
||||
let upid_str = WorkerTask::spawn("sync", Some(store.clone()), userid.clone(), true, move |worker| async move {
|
||||
|
||||
worker.log(format!("sync datastore '{}' start", store));
|
||||
|
||||
pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, username).await?;
|
||||
pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, userid).await?;
|
||||
|
||||
worker.log(format!("sync datastore '{}' end", store));
|
||||
|
||||
|
@ -55,11 +55,11 @@ fn upgrade_to_backup_reader_protocol(
|
||||
async move {
|
||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
user_info.check_privs(&username, &["datastore", &store], PRIV_DATASTORE_READ, false)?;
|
||||
user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_READ, false)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store)?;
|
||||
|
||||
@ -83,16 +83,21 @@ fn upgrade_to_backup_reader_protocol(
|
||||
|
||||
let env_type = rpcenv.env_type();
|
||||
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
||||
let path = datastore.base_path();
|
||||
|
||||
//let files = BackupInfo::list_files(&path, &backup_dir)?;
|
||||
|
||||
let worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_dir.backup_time().timestamp());
|
||||
|
||||
WorkerTask::spawn("reader", Some(worker_id), &username.clone(), true, move |worker| {
|
||||
WorkerTask::spawn("reader", Some(worker_id), userid.clone(), true, move |worker| {
|
||||
let mut env = ReaderEnvironment::new(
|
||||
env_type, username.clone(), worker.clone(), datastore, backup_dir);
|
||||
env_type,
|
||||
userid,
|
||||
worker.clone(),
|
||||
datastore,
|
||||
backup_dir,
|
||||
);
|
||||
|
||||
env.debug = debug;
|
||||
|
||||
@ -116,6 +121,7 @@ fn upgrade_to_backup_reader_protocol(
|
||||
let window_size = 32*1024*1024; // max = (1 << 31) - 2
|
||||
http.http2_initial_stream_window_size(window_size);
|
||||
http.http2_initial_connection_window_size(window_size);
|
||||
http.http2_max_frame_size(4*1024*1024);
|
||||
|
||||
http.serve_connection(conn, service)
|
||||
.map_err(Error::from)
|
||||
@ -225,8 +231,8 @@ fn download_chunk(
|
||||
env.debug(format!("download chunk {:?}", path));
|
||||
|
||||
let data = tokio::fs::read(path)
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("reading file {:?} failed: {}", path2, err)))
|
||||
.await?;
|
||||
.await
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, "reading file {:?} failed: {}", path2, err))?;
|
||||
|
||||
let body = Body::from(data);
|
||||
|
||||
@ -260,7 +266,7 @@ fn download_chunk_old(
|
||||
let path3 = path.clone();
|
||||
|
||||
let response_future = tokio::fs::File::open(path)
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, format!("open file {:?} failed: {}", path2, err)))
|
||||
.map_err(move |err| http_err!(BAD_REQUEST, "open file {:?} failed: {}", path2, err))
|
||||
.and_then(move |file| {
|
||||
env2.debug(format!("download chunk {:?}", path3));
|
||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
|
@ -5,9 +5,10 @@ use serde_json::{json, Value};
|
||||
|
||||
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
use crate::api2::types::Userid;
|
||||
use crate::backup::*;
|
||||
use crate::server::formatter::*;
|
||||
use crate::server::WorkerTask;
|
||||
|
||||
//use proxmox::tools;
|
||||
|
||||
@ -16,7 +17,7 @@ use crate::server::formatter::*;
|
||||
pub struct ReaderEnvironment {
|
||||
env_type: RpcEnvironmentType,
|
||||
result_attributes: Value,
|
||||
user: String,
|
||||
user: Userid,
|
||||
pub debug: bool,
|
||||
pub formatter: &'static OutputFormatter,
|
||||
pub worker: Arc<WorkerTask>,
|
||||
@ -28,7 +29,7 @@ pub struct ReaderEnvironment {
|
||||
impl ReaderEnvironment {
|
||||
pub fn new(
|
||||
env_type: RpcEnvironmentType,
|
||||
user: String,
|
||||
user: Userid,
|
||||
worker: Arc<WorkerTask>,
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: BackupDir,
|
||||
@ -77,7 +78,7 @@ impl RpcEnvironment for ReaderEnvironment {
|
||||
}
|
||||
|
||||
fn get_user(&self) -> Option<String> {
|
||||
Some(self.user.clone())
|
||||
Some(self.user.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10,14 +10,14 @@ use proxmox::api::{
|
||||
Router,
|
||||
RpcEnvironment,
|
||||
SubdirMap,
|
||||
UserInformation,
|
||||
};
|
||||
|
||||
use crate::api2::types::{
|
||||
DATASTORE_SCHEMA,
|
||||
RRDMode,
|
||||
RRDTimeFrameResolution,
|
||||
TaskListItem
|
||||
TaskListItem,
|
||||
Userid,
|
||||
};
|
||||
|
||||
use crate::server;
|
||||
@ -74,6 +74,9 @@ use crate::config::acl::{
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
permission: &Permission::Anybody,
|
||||
},
|
||||
)]
|
||||
/// List Datastore usages and estimates
|
||||
fn datastore_status(
|
||||
@ -84,13 +87,13 @@ fn datastore_status(
|
||||
|
||||
let (config, _digest) = datastore::config()?;
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let mut list = Vec::new();
|
||||
|
||||
for (store, (_, _)) in &config.sections {
|
||||
let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
|
||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
|
||||
if !allowed {
|
||||
continue;
|
||||
@ -161,6 +164,8 @@ fn datastore_status(
|
||||
if b != 0.0 {
|
||||
let estimate = (1.0 - a) / b;
|
||||
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
||||
} else {
|
||||
entry["estimated-full-date"] = Value::from(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -200,9 +205,9 @@ pub fn list_tasks(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<TaskListItem>, Error> {
|
||||
|
||||
let username = rpcenv.get_user().unwrap();
|
||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
let user_privs = user_info.lookup_privs(&username, &["system", "tasks"]);
|
||||
let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
|
||||
|
||||
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
|
||||
|
||||
@ -210,7 +215,7 @@ pub fn list_tasks(
|
||||
let list: Vec<TaskListItem> = server::read_task_list()?
|
||||
.into_iter()
|
||||
.map(TaskListItem::from)
|
||||
.filter(|entry| list_all || entry.user == username)
|
||||
.filter(|entry| list_all || entry.user == userid)
|
||||
.collect();
|
||||
|
||||
Ok(list.into())
|
||||
|
4
src/api2/types/macros.rs
Normal file
@ -0,0 +1,4 @@
|
||||
//! Macros exported from api2::types.
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => (r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)") }
|
@ -1,10 +1,23 @@
|
||||
use anyhow::{bail};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
use anyhow::bail;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::{api, schema::*};
|
||||
use proxmox::const_regex;
|
||||
use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
||||
|
||||
use crate::backup::CryptMode;
|
||||
use crate::server::UPID;
|
||||
|
||||
#[macro_use]
|
||||
mod macros;
|
||||
|
||||
#[macro_use]
|
||||
mod userid;
|
||||
pub use userid::{Realm, RealmRef};
|
||||
pub use userid::{Username, UsernameRef};
|
||||
pub use userid::Userid;
|
||||
pub use userid::PROXMOX_GROUP_ID_SCHEMA;
|
||||
|
||||
// File names: may not contain slashes, may not start with "."
|
||||
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
||||
if name.starts_with('.') {
|
||||
@ -19,19 +32,6 @@ pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
||||
macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
|
||||
macro_rules! DNS_NAME { () => (concat!(r"(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!())) }
|
||||
|
||||
// we only allow a limited set of characters
|
||||
// colon is not allowed, because we store usernames in
|
||||
// colon separated lists)!
|
||||
// slash is not allowed because it is used as pve API delimiter
|
||||
// also see "man useradd"
|
||||
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
|
||||
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
|
||||
|
||||
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => (r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)") }
|
||||
|
||||
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
|
||||
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
|
||||
|
||||
@ -65,17 +65,15 @@ const_regex!{
|
||||
|
||||
pub DNS_NAME_OR_IP_REGEX = concat!(r"^", DNS_NAME!(), "|", IPRE!(), r"$");
|
||||
|
||||
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$");
|
||||
|
||||
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE!() ,"):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
||||
|
||||
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
|
||||
|
||||
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
||||
|
||||
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
||||
|
||||
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
|
||||
|
||||
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
||||
}
|
||||
|
||||
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
||||
@ -111,12 +109,6 @@ pub const DNS_NAME_FORMAT: ApiStringFormat =
|
||||
pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
|
||||
|
||||
pub const PROXMOX_USER_ID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_USER_ID_REGEX);
|
||||
|
||||
pub const PROXMOX_GROUP_ID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_GROUP_ID_REGEX);
|
||||
|
||||
pub const PASSWORD_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PASSWORD_REGEX);
|
||||
|
||||
@ -339,24 +331,6 @@ pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP addr
|
||||
.format(&DNS_NAME_OR_IP_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = StringSchema::new("Authentication domain ID")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const PROXMOX_USER_ID_SCHEMA: Schema = StringSchema::new("User ID")
|
||||
.format(&PROXMOX_USER_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID")
|
||||
.format(&PROXMOX_GROUP_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
|
||||
.format(&BLOCKDEVICE_NAME_FORMAT)
|
||||
.min_length(3)
|
||||
@ -384,6 +358,10 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name
|
||||
schema: BACKUP_ARCHIVE_NAME_SCHEMA
|
||||
},
|
||||
},
|
||||
owner: {
|
||||
type: Userid,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@ -399,7 +377,26 @@ pub struct GroupListItem {
|
||||
pub files: Vec<String>,
|
||||
/// The owner of group
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub owner: Option<String>,
|
||||
pub owner: Option<Userid>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
upid: {
|
||||
schema: UPID_SCHEMA
|
||||
},
|
||||
state: {
|
||||
type: String
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// Task properties.
|
||||
pub struct SnapshotVerifyState {
|
||||
/// UPID of the verify task
|
||||
pub upid: UPID,
|
||||
/// State of the verification. "failed" or "ok"
|
||||
pub state: String,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -413,11 +410,23 @@ pub struct GroupListItem {
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
verification: {
|
||||
type: SnapshotVerifyState,
|
||||
optional: true,
|
||||
},
|
||||
files: {
|
||||
items: {
|
||||
schema: BACKUP_ARCHIVE_NAME_SCHEMA
|
||||
},
|
||||
},
|
||||
owner: {
|
||||
type: Userid,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@ -427,6 +436,12 @@ pub struct SnapshotListItem {
|
||||
pub backup_type: String, // enum
|
||||
pub backup_id: String,
|
||||
pub backup_time: i64,
|
||||
/// The first line from manifest "notes"
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// The result of the last run verify task
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub verification: Option<SnapshotVerifyState>,
|
||||
/// List of contained archive files.
|
||||
pub files: Vec<BackupContent>,
|
||||
/// Overall snapshot size (sum of all archive sizes).
|
||||
@ -434,7 +449,7 @@ pub struct SnapshotListItem {
|
||||
pub size: Option<u64>,
|
||||
/// The owner of the snapshots group
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub owner: Option<String>,
|
||||
pub owner: Option<Userid>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -496,6 +511,10 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
|
||||
"filename": {
|
||||
schema: BACKUP_ARCHIVE_NAME_SCHEMA,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@ -503,9 +522,9 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
|
||||
/// Basic information about archive files inside a backup snapshot.
|
||||
pub struct BackupContent {
|
||||
pub filename: String,
|
||||
/// Info if file is encrypted (or empty if we do not have that info)
|
||||
/// Info if file is encrypted, signed, or neither.
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub encrypted: Option<bool>,
|
||||
pub crypt_mode: Option<CryptMode>,
|
||||
/// Archive size (from backup manifest).
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub size: Option<u64>,
|
||||
@ -540,6 +559,8 @@ pub struct GarbageCollectionStatus {
|
||||
pub pending_bytes: u64,
|
||||
/// Number of pending chunks (pending removal - kept for safety).
|
||||
pub pending_chunks: usize,
|
||||
/// Number of chunks marked as .bad by verify that have been removed by GC.
|
||||
pub removed_bad: usize,
|
||||
}
|
||||
|
||||
impl Default for GarbageCollectionStatus {
|
||||
@ -554,6 +575,7 @@ impl Default for GarbageCollectionStatus {
|
||||
removed_chunks: 0,
|
||||
pending_bytes: 0,
|
||||
pending_chunks: 0,
|
||||
removed_bad: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -573,7 +595,8 @@ pub struct StorageStatus {
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"upid": { schema: UPID_SCHEMA },
|
||||
upid: { schema: UPID_SCHEMA },
|
||||
user: { type: Userid },
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@ -593,7 +616,7 @@ pub struct TaskListItem {
|
||||
/// Worker ID (arbitrary ASCII string)
|
||||
pub worker_id: Option<String>,
|
||||
/// The user who started the task
|
||||
pub user: String,
|
||||
pub user: Userid,
|
||||
/// The task end time (Epoch)
|
||||
#[serde(skip_serializing_if="Option::is_none")]
|
||||
pub endtime: Option<i64>,
|
||||
@ -606,7 +629,7 @@ impl From<crate::server::TaskListInfo> for TaskListItem {
|
||||
fn from(info: crate::server::TaskListInfo) -> Self {
|
||||
let (endtime, status) = info
|
||||
.state
|
||||
.map_or_else(|| (None, None), |(a,b)| (Some(a), Some(b)));
|
||||
.map_or_else(|| (None, None), |a| (Some(a.endtime()), Some(a.to_string())));
|
||||
|
||||
TaskListItem {
|
||||
upid: info.upid_str,
|
||||
@ -616,7 +639,7 @@ impl From<crate::server::TaskListInfo> for TaskListItem {
|
||||
starttime: info.upid.starttime,
|
||||
worker_type: info.upid.worker_type,
|
||||
worker_id: info.upid.worker_id,
|
||||
user: info.upid.username,
|
||||
user: info.upid.userid,
|
||||
endtime,
|
||||
status,
|
||||
}
|
||||
@ -882,9 +905,6 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
|
||||
|
||||
#[test]
|
||||
fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
||||
|
||||
let schema = PROXMOX_USER_ID_SCHEMA;
|
||||
|
||||
let invalid_user_ids = [
|
||||
"x", // too short
|
||||
"xx", // too short
|
||||
@ -898,7 +918,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
||||
];
|
||||
|
||||
for name in invalid_user_ids.iter() {
|
||||
if let Ok(_) = parse_simple_value(name, &schema) {
|
||||
if let Ok(_) = parse_simple_value(name, &Userid::API_SCHEMA) {
|
||||
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
|
||||
}
|
||||
}
|
||||
@ -912,7 +932,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
||||
];
|
||||
|
||||
for name in valid_user_ids.iter() {
|
||||
let v = match parse_simple_value(name, &schema) {
|
||||
let v = match parse_simple_value(name, &Userid::API_SCHEMA) {
|
||||
Ok(v) => v,
|
||||
Err(err) => {
|
||||
bail!("unable to parse userid '{}' - {}", name, err);
|
||||
@ -954,3 +974,30 @@ pub enum RRDTimeFrameResolution {
|
||||
/// 1 week => last 490 days
|
||||
Year = 60*10080,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "PascalCase")]
|
||||
/// Describes a package for which an update is available.
|
||||
pub struct APTUpdateInfo {
|
||||
/// Package name
|
||||
pub package: String,
|
||||
/// Package title
|
||||
pub title: String,
|
||||
/// Package architecture
|
||||
pub arch: String,
|
||||
/// Human readable package description
|
||||
pub description: String,
|
||||
/// New version to be updated to
|
||||
pub version: String,
|
||||
/// Old version currently installed
|
||||
pub old_version: String,
|
||||
/// Package origin
|
||||
pub origin: String,
|
||||
/// Package priority in human-readable form
|
||||
pub priority: String,
|
||||
/// Package section
|
||||
pub section: String,
|
||||
/// URL under which the package's changelog can be retrieved
|
||||
pub change_log_url: String,
|
||||
}
|
420
src/api2/types/userid.rs
Normal file
@ -0,0 +1,420 @@
|
||||
//! Types for user handling.
|
||||
//!
|
||||
//! We have [`Username`]s and [`Realm`]s. To uniquely identify a user, they must be combined into a [`Userid`].
|
||||
//!
|
||||
//! Since they're all string types, they're organized as follows:
|
||||
//!
|
||||
//! * [`Username`]: an owned user name. Internally a `String`.
|
||||
//! * [`UsernameRef`]: a borrowed user name. Pairs with a `Username` the same way a `str` pairs
|
||||
//! with `String`, meaning you can only make references to it.
|
||||
//! * [`Realm`]: an owned realm (`String` equivalent).
|
||||
//! * [`RealmRef`]: a borrowed realm (`str` equivalent).
|
||||
//! * [`Userid`]: an owned user id (`"user@realm"`). Note that this does not have a separate
|
||||
//! borrowed type.
|
||||
//!
|
||||
//! Note that `Username`s are not unique, therefore they do not implement `Eq` and cannot be
|
||||
//! compared directly. If a direct comparison is really required, they can be compared as strings
|
||||
//! via the `as_str()` method. [`Realm`]s and [`Userid`]s on the other hand can be compared with
|
||||
//! each other, as in those two cases the comparison has meaning.
|
||||
|
||||
use std::borrow::Borrow;
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema};
|
||||
use proxmox::const_regex;
|
||||
|
||||
// we only allow a limited set of characters
|
||||
// colon is not allowed, because we store usernames in
|
||||
// colon separated lists)!
|
||||
// slash is not allowed because it is used as pve API delimiter
|
||||
// also see "man useradd"
|
||||
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
|
||||
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
|
||||
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
|
||||
|
||||
const_regex! {
|
||||
pub PROXMOX_USER_NAME_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"$");
|
||||
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$");
|
||||
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
|
||||
}
|
||||
|
||||
pub const PROXMOX_USER_NAME_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_USER_NAME_REGEX);
|
||||
|
||||
pub const PROXMOX_USER_ID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_USER_ID_REGEX);
|
||||
|
||||
pub const PROXMOX_GROUP_ID_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_GROUP_ID_REGEX);
|
||||
|
||||
pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID")
|
||||
.format(&PROXMOX_GROUP_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const PROXMOX_AUTH_REALM_STRING_SCHEMA: StringSchema =
|
||||
StringSchema::new("Authentication domain ID")
|
||||
.format(&super::PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32);
|
||||
pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = PROXMOX_AUTH_REALM_STRING_SCHEMA.schema();
|
||||
|
||||
|
||||
#[api(
|
||||
type: String,
|
||||
format: &PROXMOX_USER_NAME_FORMAT,
|
||||
)]
|
||||
/// The user name part of a user id.
|
||||
///
|
||||
/// This alone does NOT uniquely identify the user and therefore does not implement `Eq`. In order
|
||||
/// to compare user names directly, they need to be explicitly compared as strings by calling
|
||||
/// `.as_str()`.
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// fn test(a: Username, b: Username) -> bool {
|
||||
/// a == b // illegal and does not compile
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Clone, Debug, Hash, Deserialize, Serialize)]
|
||||
pub struct Username(String);
|
||||
|
||||
/// A reference to a user name part of a user id. This alone does NOT uniquely identify the user.
|
||||
///
|
||||
/// This is like a `str` to the `String` of a [`Username`].
|
||||
#[derive(Debug, Hash)]
|
||||
pub struct UsernameRef(str);
|
||||
|
||||
#[doc(hidden)]
|
||||
/// ```compile_fail
|
||||
/// let a: Username = unsafe { std::mem::zeroed() };
|
||||
/// let b: Username = unsafe { std::mem::zeroed() };
|
||||
/// let _ = <Username as PartialEq>::eq(&a, &b);
|
||||
/// ```
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||
/// let _ = <&UsernameRef as PartialEq>::eq(a, b);
|
||||
/// ```
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||
/// let _ = <&UsernameRef as PartialEq>::eq(&a, &b);
|
||||
/// ```
|
||||
struct _AssertNoEqImpl;
|
||||
|
||||
impl UsernameRef {
|
||||
fn new(s: &str) -> &Self {
|
||||
unsafe { &*(s as *const str as *const UsernameRef) }
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Deref for Username {
|
||||
type Target = UsernameRef;
|
||||
|
||||
fn deref(&self) -> &UsernameRef {
|
||||
self.borrow()
|
||||
}
|
||||
}
|
||||
|
||||
impl Borrow<UsernameRef> for Username {
|
||||
fn borrow(&self) -> &UsernameRef {
|
||||
UsernameRef::new(self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<UsernameRef> for Username {
|
||||
fn as_ref(&self) -> &UsernameRef {
|
||||
UsernameRef::new(self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl ToOwned for UsernameRef {
|
||||
type Owned = Username;
|
||||
|
||||
fn to_owned(&self) -> Self::Owned {
|
||||
Username(self.0.to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for Username {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(s: String) -> Result<Self, Error> {
|
||||
if !PROXMOX_USER_NAME_REGEX.is_match(&s) {
|
||||
bail!("invalid user name");
|
||||
}
|
||||
|
||||
Ok(Self(s))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> TryFrom<&'a str> for &'a UsernameRef {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(s: &'a str) -> Result<&'a UsernameRef, Error> {
|
||||
if !PROXMOX_USER_NAME_REGEX.is_match(s) {
|
||||
bail!("invalid name in user id");
|
||||
}
|
||||
|
||||
Ok(UsernameRef::new(s))
|
||||
}
|
||||
}
|
||||
|
||||
#[api(schema: PROXMOX_AUTH_REALM_SCHEMA)]
|
||||
/// An authentication realm.
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Hash, Deserialize, Serialize)]
|
||||
pub struct Realm(String);
|
||||
|
||||
/// A reference to an authentication realm.
|
||||
///
|
||||
/// This is like a `str` to the `String` of a `Realm`.
|
||||
#[derive(Debug, Hash, Eq, PartialEq)]
|
||||
pub struct RealmRef(str);
|
||||
|
||||
impl RealmRef {
|
||||
fn new(s: &str) -> &Self {
|
||||
unsafe { &*(s as *const str as *const RealmRef) }
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Deref for Realm {
|
||||
type Target = RealmRef;
|
||||
|
||||
fn deref(&self) -> &RealmRef {
|
||||
self.borrow()
|
||||
}
|
||||
}
|
||||
|
||||
impl Borrow<RealmRef> for Realm {
|
||||
fn borrow(&self) -> &RealmRef {
|
||||
RealmRef::new(self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<RealmRef> for Realm {
|
||||
fn as_ref(&self) -> &RealmRef {
|
||||
RealmRef::new(self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl ToOwned for RealmRef {
|
||||
type Owned = Realm;
|
||||
|
||||
fn to_owned(&self) -> Self::Owned {
|
||||
Realm(self.0.to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for Realm {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(s: String) -> Result<Self, Error> {
|
||||
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(&s)
|
||||
.map_err(|_| format_err!("invalid realm"))?;
|
||||
|
||||
Ok(Self(s))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> TryFrom<&'a str> for &'a RealmRef {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(s: &'a str) -> Result<&'a RealmRef, Error> {
|
||||
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(s)
|
||||
.map_err(|_| format_err!("invalid realm"))?;
|
||||
|
||||
Ok(RealmRef::new(s))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<str> for Realm {
|
||||
fn eq(&self, rhs: &str) -> bool {
|
||||
self.0 == rhs
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<&str> for Realm {
|
||||
fn eq(&self, rhs: &&str) -> bool {
|
||||
self.0 == *rhs
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<str> for RealmRef {
|
||||
fn eq(&self, rhs: &str) -> bool {
|
||||
self.0 == *rhs
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<&str> for RealmRef {
|
||||
fn eq(&self, rhs: &&str) -> bool {
|
||||
self.0 == **rhs
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<RealmRef> for Realm {
|
||||
fn eq(&self, rhs: &RealmRef) -> bool {
|
||||
self.0 == &rhs.0
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<Realm> for RealmRef {
|
||||
fn eq(&self, rhs: &Realm) -> bool {
|
||||
self.0 == rhs.0
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<Realm> for &RealmRef {
|
||||
fn eq(&self, rhs: &Realm) -> bool {
|
||||
(*self).0 == rhs.0
|
||||
}
|
||||
}
|
||||
|
||||
/// A complete user id consting of a user name and a realm.
|
||||
#[derive(Clone, Debug, Hash)]
|
||||
pub struct Userid {
|
||||
data: String,
|
||||
name_len: usize,
|
||||
//name: Username,
|
||||
//realm: Realm,
|
||||
}
|
||||
|
||||
impl Userid {
|
||||
pub const API_SCHEMA: Schema = StringSchema::new("User ID")
|
||||
.format(&PROXMOX_USER_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
const fn new(data: String, name_len: usize) -> Self {
|
||||
Self { data, name_len }
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &UsernameRef {
|
||||
UsernameRef::new(&self.data[..self.name_len])
|
||||
}
|
||||
|
||||
pub fn realm(&self) -> &RealmRef {
|
||||
RealmRef::new(&self.data[(self.name_len + 1)..])
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &str {
|
||||
&self.data
|
||||
}
|
||||
|
||||
/// Get the "backup@pam" user id.
|
||||
pub fn backup_userid() -> &'static Self {
|
||||
&*BACKUP_USERID
|
||||
}
|
||||
|
||||
/// Get the "root@pam" user id.
|
||||
pub fn root_userid() -> &'static Self {
|
||||
&*ROOT_USERID
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
pub static ref BACKUP_USERID: Userid = Userid::new("backup@pam".to_string(), 6);
|
||||
pub static ref ROOT_USERID: Userid = Userid::new("root@pam".to_string(), 4);
|
||||
}
|
||||
|
||||
impl Eq for Userid {}
|
||||
|
||||
impl PartialEq for Userid {
|
||||
fn eq(&self, rhs: &Self) -> bool {
|
||||
self.data == rhs.data && self.name_len == rhs.name_len
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(Username, Realm)> for Userid {
|
||||
fn from(parts: (Username, Realm)) -> Self {
|
||||
Self::from((parts.0.as_ref(), parts.1.as_ref()))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(&UsernameRef, &RealmRef)> for Userid {
|
||||
fn from(parts: (&UsernameRef, &RealmRef)) -> Self {
|
||||
let data = format!("{}@{}", parts.0.as_str(), parts.1.as_str());
|
||||
let name_len = parts.0.as_str().len();
|
||||
Self { data, name_len }
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Userid {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
self.data.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for Userid {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(id: &str) -> Result<Self, Error> {
|
||||
let (name, realm) = match id.as_bytes().iter().rposition(|&b| b == b'@') {
|
||||
Some(pos) => (&id[..pos], &id[(pos + 1)..]),
|
||||
None => bail!("not a valid user id"),
|
||||
};
|
||||
|
||||
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(realm)
|
||||
.map_err(|_| format_err!("invalid realm in user id"))?;
|
||||
|
||||
Ok(Self::from((UsernameRef::new(name), RealmRef::new(realm))))
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for Userid {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(data: String) -> Result<Self, Error> {
|
||||
let name_len = data
|
||||
.as_bytes()
|
||||
.iter()
|
||||
.rposition(|&b| b == b'@')
|
||||
.ok_or_else(|| format_err!("not a valid user id"))?;
|
||||
|
||||
PROXMOX_AUTH_REALM_STRING_SCHEMA.check_constraints(&data[(name_len + 1)..])
|
||||
.map_err(|_| format_err!("invalid realm in user id"))?;
|
||||
|
||||
Ok(Self { data, name_len })
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<str> for Userid {
|
||||
fn eq(&self, rhs: &str) -> bool {
|
||||
rhs.len() > self.name_len + 2 // make sure range access below is allowed
|
||||
&& rhs.starts_with(self.name().as_str())
|
||||
&& rhs.as_bytes()[self.name_len] == b'@'
|
||||
&& &rhs[(self.name_len + 1)..] == self.realm().as_str()
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<&str> for Userid {
|
||||
fn eq(&self, rhs: &&str) -> bool {
|
||||
*self == **rhs
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<String> for Userid {
|
||||
fn eq(&self, rhs: &String) -> bool {
|
||||
self == rhs.as_str()
|
||||
}
|
||||
}
|
||||
|
||||
proxmox::forward_deserialize_to_from_str!(Userid);
|
||||
proxmox::forward_serialize_to_display!(Userid);
|
67
src/auth.rs
@ -10,39 +10,54 @@ use base64;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde_json::json;
|
||||
|
||||
use crate::api2::types::{Userid, UsernameRef, RealmRef};
|
||||
|
||||
pub trait ProxmoxAuthenticator {
|
||||
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error>;
|
||||
fn store_password(&self, username: &str, password: &str) -> Result<(), Error>;
|
||||
fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
|
||||
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
pub struct PAM();
|
||||
|
||||
impl ProxmoxAuthenticator for PAM {
|
||||
|
||||
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error> {
|
||||
fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
|
||||
let mut auth = pam::Authenticator::with_password("proxmox-backup-auth").unwrap();
|
||||
auth.get_handler().set_credentials(username, password);
|
||||
auth.get_handler().set_credentials(username.as_str(), password);
|
||||
auth.authenticate()?;
|
||||
return Ok(());
|
||||
|
||||
}
|
||||
|
||||
fn store_password(&self, username: &str, password: &str) -> Result<(), Error> {
|
||||
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
|
||||
let mut child = Command::new("passwd")
|
||||
.arg(username)
|
||||
.arg(username.as_str())
|
||||
.stdin(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()
|
||||
.or_else(|err| Err(format_err!("unable to set password for '{}' - execute passwd failed: {}", username, err)))?;
|
||||
.map_err(|err| format_err!(
|
||||
"unable to set password for '{}' - execute passwd failed: {}",
|
||||
username.as_str(),
|
||||
err,
|
||||
))?;
|
||||
|
||||
// Note: passwd reads password twice from stdin (for verify)
|
||||
writeln!(child.stdin.as_mut().unwrap(), "{}\n{}", password, password)?;
|
||||
|
||||
let output = child.wait_with_output()
|
||||
.or_else(|err| Err(format_err!("unable to set password for '{}' - wait failed: {}", username, err)))?;
|
||||
let output = child
|
||||
.wait_with_output()
|
||||
.map_err(|err| format_err!(
|
||||
"unable to set password for '{}' - wait failed: {}",
|
||||
username.as_str(),
|
||||
err,
|
||||
))?;
|
||||
|
||||
if !output.status.success() {
|
||||
bail!("unable to set password for '{}' - {}", username, String::from_utf8_lossy(&output.stderr));
|
||||
bail!(
|
||||
"unable to set password for '{}' - {}",
|
||||
username.as_str(),
|
||||
String::from_utf8_lossy(&output.stderr),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -90,23 +105,23 @@ pub fn verify_crypt_pw(password: &str, enc_password: &str) -> Result<(), Error>
|
||||
Ok(())
|
||||
}
|
||||
|
||||
const SHADOW_CONFIG_FILENAME: &str = "/etc/proxmox-backup/shadow.json";
|
||||
const SHADOW_CONFIG_FILENAME: &str = configdir!("/shadow.json");
|
||||
|
||||
impl ProxmoxAuthenticator for PBS {
|
||||
|
||||
fn authenticate_user(&self, username: &str, password: &str) -> Result<(), Error> {
|
||||
fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
|
||||
let data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
||||
match data[username].as_str() {
|
||||
match data[username.as_str()].as_str() {
|
||||
None => bail!("no password set"),
|
||||
Some(enc_password) => verify_crypt_pw(password, enc_password)?,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn store_password(&self, username: &str, password: &str) -> Result<(), Error> {
|
||||
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
|
||||
let enc_password = encrypt_pw(password)?;
|
||||
let mut data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
||||
data[username] = enc_password.into();
|
||||
data[username.as_str()] = enc_password.into();
|
||||
|
||||
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
|
||||
let options = proxmox::tools::fs::CreateOptions::new()
|
||||
@ -121,28 +136,18 @@ impl ProxmoxAuthenticator for PBS {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_userid(userid: &str) -> Result<(String, String), Error> {
|
||||
let data: Vec<&str> = userid.rsplitn(2, '@').collect();
|
||||
|
||||
if data.len() != 2 {
|
||||
bail!("userid '{}' has no realm", userid);
|
||||
}
|
||||
Ok((data[1].to_owned(), data[0].to_owned()))
|
||||
}
|
||||
|
||||
/// Lookup the autenticator for the specified realm
|
||||
pub fn lookup_authenticator(realm: &str) -> Result<Box<dyn ProxmoxAuthenticator>, Error> {
|
||||
match realm {
|
||||
pub fn lookup_authenticator(realm: &RealmRef) -> Result<Box<dyn ProxmoxAuthenticator>, Error> {
|
||||
match realm.as_str() {
|
||||
"pam" => Ok(Box::new(PAM())),
|
||||
"pbs" => Ok(Box::new(PBS())),
|
||||
_ => bail!("unknown realm '{}'", realm),
|
||||
_ => bail!("unknown realm '{}'", realm.as_str()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Authenticate users
|
||||
pub fn authenticate_user(userid: &str, password: &str) -> Result<(), Error> {
|
||||
let (username, realm) = parse_userid(userid)?;
|
||||
pub fn authenticate_user(userid: &Userid, password: &str) -> Result<(), Error> {
|
||||
|
||||
lookup_authenticator(&realm)?
|
||||
.authenticate_user(&username, password)
|
||||
lookup_authenticator(userid.realm())?
|
||||
.authenticate_user(userid.name(), password)
|
||||
}
|
||||
|
@ -10,16 +10,17 @@ use std::path::PathBuf;
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
use proxmox::try_block;
|
||||
|
||||
use crate::api2::types::Userid;
|
||||
use crate::tools::epoch_now_u64;
|
||||
|
||||
fn compute_csrf_secret_digest(
|
||||
timestamp: i64,
|
||||
secret: &[u8],
|
||||
username: &str,
|
||||
userid: &Userid,
|
||||
) -> String {
|
||||
|
||||
let mut hasher = sha::Sha256::new();
|
||||
let data = format!("{:08X}:{}:", timestamp, username);
|
||||
let data = format!("{:08X}:{}:", timestamp, userid);
|
||||
hasher.update(data.as_bytes());
|
||||
hasher.update(secret);
|
||||
|
||||
@ -28,19 +29,19 @@ fn compute_csrf_secret_digest(
|
||||
|
||||
pub fn assemble_csrf_prevention_token(
|
||||
secret: &[u8],
|
||||
username: &str,
|
||||
userid: &Userid,
|
||||
) -> String {
|
||||
|
||||
let epoch = epoch_now_u64().unwrap() as i64;
|
||||
|
||||
let digest = compute_csrf_secret_digest(epoch, secret, username);
|
||||
let digest = compute_csrf_secret_digest(epoch, secret, userid);
|
||||
|
||||
format!("{:08X}:{}", epoch, digest)
|
||||
}
|
||||
|
||||
pub fn verify_csrf_prevention_token(
|
||||
secret: &[u8],
|
||||
username: &str,
|
||||
userid: &Userid,
|
||||
token: &str,
|
||||
min_age: i64,
|
||||
max_age: i64,
|
||||
@ -62,7 +63,7 @@ pub fn verify_csrf_prevention_token(
|
||||
let ttime = i64::from_str_radix(timestamp, 16).
|
||||
map_err(|err| format_err!("timestamp format error - {}", err))?;
|
||||
|
||||
let digest = compute_csrf_secret_digest(ttime, secret, username);
|
||||
let digest = compute_csrf_secret_digest(ttime, secret, userid);
|
||||
|
||||
if digest != sig {
|
||||
bail!("invalid signature.");
|
||||
|
@ -40,21 +40,21 @@
|
||||
//!
|
||||
//! Acquire shared lock for ChunkStore (process wide).
|
||||
//!
|
||||
//! Note: When creating .idx files, we create temporary (.tmp) file,
|
||||
//! Note: When creating .idx files, we create temporary a (.tmp) file,
|
||||
//! then do an atomic rename ...
|
||||
//!
|
||||
//!
|
||||
//! * Garbage Collect:
|
||||
//!
|
||||
//! Acquire exclusive lock for ChunkStore (process wide). If we have
|
||||
//! already an shared lock for ChunkStore, try to updraged that
|
||||
//! already a shared lock for the ChunkStore, try to upgrade that
|
||||
//! lock.
|
||||
//!
|
||||
//!
|
||||
//! * Server Restart
|
||||
//!
|
||||
//! Try to abort running garbage collection to release exclusive
|
||||
//! ChunkStore lock asap. Start new service with existing listening
|
||||
//! Try to abort the running garbage collection to release exclusive
|
||||
//! ChunkStore locks ASAP. Start the new service with the existing listening
|
||||
//! socket.
|
||||
//!
|
||||
//!
|
||||
@ -62,10 +62,10 @@
|
||||
//!
|
||||
//! Deleting backups is as easy as deleting the corresponding .idx
|
||||
//! files. Unfortunately, this does not free up any storage, because
|
||||
//! those files just contains references to chunks.
|
||||
//! those files just contain references to chunks.
|
||||
//!
|
||||
//! To free up some storage, we run a garbage collection process at
|
||||
//! regular intervals. The collector uses an mark and sweep
|
||||
//! regular intervals. The collector uses a mark and sweep
|
||||
//! approach. In the first phase, it scans all .idx files to mark used
|
||||
//! chunks. The second phase then removes all unmarked chunks from the
|
||||
//! store.
|
||||
@ -90,12 +90,12 @@
|
||||
//! amount of time ago (by default 24h). So we may only delete chunks
|
||||
//! with `atime` older than 24 hours.
|
||||
//!
|
||||
//! Another problem arise from running backups. The mark phase does
|
||||
//! Another problem arises from running backups. The mark phase does
|
||||
//! not find any chunks from those backups, because there is no .idx
|
||||
//! file for them (created after the backup). Chunks created or
|
||||
//! touched by those backups may have an `atime` as old as the start
|
||||
//! time of those backup. Please not that the backup start time may
|
||||
//! predate the GC start time. Se we may only delete chunk older than
|
||||
//! time of those backups. Please note that the backup start time may
|
||||
//! predate the GC start time. So we may only delete chunks older than
|
||||
//! the start time of those running backup jobs.
|
||||
//!
|
||||
//!
|
||||
@ -120,6 +120,8 @@ macro_rules! PROXMOX_BACKUP_READER_PROTOCOL_ID_V1 {
|
||||
|
||||
/// Unix system user used by proxmox-backup-proxy
|
||||
pub const BACKUP_USER_NAME: &str = "backup";
|
||||
/// Unix system group used by proxmox-backup-proxy
|
||||
pub const BACKUP_GROUP_NAME: &str = "backup";
|
||||
|
||||
/// Return User info for the 'backup' user (``getpwnam_r(3)``)
|
||||
pub fn backup_user() -> Result<nix::unistd::User, Error> {
|
||||
@ -129,6 +131,14 @@ pub fn backup_user() -> Result<nix::unistd::User, Error> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Return Group info for the 'backup' group (``getgrnam(3)``)
|
||||
pub fn backup_group() -> Result<nix::unistd::Group, Error> {
|
||||
match nix::unistd::Group::from_name(BACKUP_GROUP_NAME)? {
|
||||
Some(group) => Ok(group),
|
||||
None => bail!("Unable to lookup backup user."),
|
||||
}
|
||||
}
|
||||
|
||||
mod file_formats;
|
||||
pub use file_formats::*;
|
||||
|
||||
|
@ -1,30 +1,35 @@
|
||||
use std::future::Future;
|
||||
use std::task::{Poll, Context};
|
||||
use std::pin::Pin;
|
||||
use std::io::SeekFrom;
|
||||
|
||||
use anyhow::Error;
|
||||
use futures::future::FutureExt;
|
||||
use futures::ready;
|
||||
use tokio::io::AsyncRead;
|
||||
use tokio::io::{AsyncRead, AsyncSeek};
|
||||
|
||||
use proxmox::sys::error::io_err_other;
|
||||
use proxmox::io_format_err;
|
||||
|
||||
use super::IndexFile;
|
||||
use super::read_chunk::AsyncReadChunk;
|
||||
use super::index::ChunkReadInfo;
|
||||
|
||||
enum AsyncIndexReaderState<S> {
|
||||
NoData,
|
||||
WaitForData(Pin<Box<dyn Future<Output = Result<(S, Vec<u8>), Error>> + Send + 'static>>),
|
||||
HaveData(usize),
|
||||
HaveData,
|
||||
}
|
||||
|
||||
pub struct AsyncIndexReader<S, I: IndexFile> {
|
||||
store: Option<S>,
|
||||
index: I,
|
||||
read_buffer: Vec<u8>,
|
||||
current_chunk_offset: u64,
|
||||
current_chunk_idx: usize,
|
||||
current_chunk_digest: [u8; 32],
|
||||
current_chunk_info: Option<ChunkReadInfo>,
|
||||
position: u64,
|
||||
seek_to_pos: i64,
|
||||
state: AsyncIndexReaderState<S>,
|
||||
}
|
||||
|
||||
@ -37,8 +42,11 @@ impl<S: AsyncReadChunk, I: IndexFile> AsyncIndexReader<S, I> {
|
||||
store: Some(store),
|
||||
index,
|
||||
read_buffer: Vec::with_capacity(1024 * 1024),
|
||||
current_chunk_offset: 0,
|
||||
current_chunk_idx: 0,
|
||||
current_chunk_digest: [0u8; 32],
|
||||
current_chunk_info: None,
|
||||
position: 0,
|
||||
seek_to_pos: 0,
|
||||
state: AsyncIndexReaderState::NoData,
|
||||
}
|
||||
}
|
||||
@ -58,23 +66,41 @@ where
|
||||
loop {
|
||||
match &mut this.state {
|
||||
AsyncIndexReaderState::NoData => {
|
||||
if this.current_chunk_idx >= this.index.index_count() {
|
||||
let (idx, offset) = if this.current_chunk_info.is_some() &&
|
||||
this.position == this.current_chunk_info.as_ref().unwrap().range.end
|
||||
{
|
||||
// optimization for sequential chunk read
|
||||
let next_idx = this.current_chunk_idx + 1;
|
||||
(next_idx, 0)
|
||||
} else {
|
||||
match this.index.chunk_from_offset(this.position) {
|
||||
Some(res) => res,
|
||||
None => return Poll::Ready(Ok(0))
|
||||
}
|
||||
};
|
||||
|
||||
if idx >= this.index.index_count() {
|
||||
return Poll::Ready(Ok(0));
|
||||
}
|
||||
|
||||
let digest = this
|
||||
let info = this
|
||||
.index
|
||||
.index_digest(this.current_chunk_idx)
|
||||
.ok_or(io_format_err!("could not get digest"))?
|
||||
.clone();
|
||||
.chunk_info(idx)
|
||||
.ok_or(io_format_err!("could not get digest"))?;
|
||||
|
||||
if digest == this.current_chunk_digest {
|
||||
this.state = AsyncIndexReaderState::HaveData(0);
|
||||
continue;
|
||||
this.current_chunk_offset = offset;
|
||||
this.current_chunk_idx = idx;
|
||||
let old_info = this.current_chunk_info.replace(info.clone());
|
||||
|
||||
if let Some(old_info) = old_info {
|
||||
if old_info.digest == info.digest {
|
||||
// hit, chunk is currently in cache
|
||||
this.state = AsyncIndexReaderState::HaveData;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
this.current_chunk_digest = digest;
|
||||
|
||||
// miss, need to download new chunk
|
||||
let store = match this.store.take() {
|
||||
Some(store) => store,
|
||||
None => {
|
||||
@ -83,7 +109,7 @@ where
|
||||
};
|
||||
|
||||
let future = async move {
|
||||
store.read_chunk(&digest)
|
||||
store.read_chunk(&info.digest)
|
||||
.await
|
||||
.map(move |x| (store, x))
|
||||
};
|
||||
@ -95,7 +121,7 @@ where
|
||||
Ok((store, mut chunk_data)) => {
|
||||
this.read_buffer.clear();
|
||||
this.read_buffer.append(&mut chunk_data);
|
||||
this.state = AsyncIndexReaderState::HaveData(0);
|
||||
this.state = AsyncIndexReaderState::HaveData;
|
||||
this.store = Some(store);
|
||||
}
|
||||
Err(err) => {
|
||||
@ -103,8 +129,8 @@ where
|
||||
}
|
||||
};
|
||||
}
|
||||
AsyncIndexReaderState::HaveData(offset) => {
|
||||
let offset = *offset;
|
||||
AsyncIndexReaderState::HaveData => {
|
||||
let offset = this.current_chunk_offset as usize;
|
||||
let len = this.read_buffer.len();
|
||||
let n = if len - offset < buf.len() {
|
||||
len - offset
|
||||
@ -113,11 +139,13 @@ where
|
||||
};
|
||||
|
||||
buf[0..n].copy_from_slice(&this.read_buffer[offset..(offset + n)]);
|
||||
this.position += n as u64;
|
||||
|
||||
if offset + n == len {
|
||||
this.state = AsyncIndexReaderState::NoData;
|
||||
this.current_chunk_idx += 1;
|
||||
} else {
|
||||
this.state = AsyncIndexReaderState::HaveData(offset + n);
|
||||
this.current_chunk_offset += n as u64;
|
||||
this.state = AsyncIndexReaderState::HaveData;
|
||||
}
|
||||
|
||||
return Poll::Ready(Ok(n));
|
||||
@ -126,3 +154,51 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, I> AsyncSeek for AsyncIndexReader<S, I>
|
||||
where
|
||||
S: AsyncReadChunk + Unpin + Sync + 'static,
|
||||
I: IndexFile + Unpin,
|
||||
{
|
||||
fn start_seek(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
pos: SeekFrom,
|
||||
) -> Poll<tokio::io::Result<()>> {
|
||||
let this = Pin::get_mut(self);
|
||||
this.seek_to_pos = match pos {
|
||||
SeekFrom::Start(offset) => {
|
||||
offset as i64
|
||||
},
|
||||
SeekFrom::End(offset) => {
|
||||
this.index.index_bytes() as i64 + offset
|
||||
},
|
||||
SeekFrom::Current(offset) => {
|
||||
this.position as i64 + offset
|
||||
}
|
||||
};
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn poll_complete(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
) -> Poll<tokio::io::Result<u64>> {
|
||||
let this = Pin::get_mut(self);
|
||||
|
||||
let index_bytes = this.index.index_bytes();
|
||||
if this.seek_to_pos < 0 {
|
||||
return Poll::Ready(Err(io_format_err!("cannot seek to negative values")));
|
||||
} else if this.seek_to_pos > index_bytes as i64 {
|
||||
this.position = index_bytes;
|
||||
} else {
|
||||
this.position = this.seek_to_pos as u64;
|
||||
}
|
||||
|
||||
// even if seeking within one chunk, we need to go to NoData to
|
||||
// recalculate the current_chunk_offset (data is cached anyway)
|
||||
this.state = AsyncIndexReaderState::NoData;
|
||||
|
||||
Poll::Ready(Ok(this.position))
|
||||
}
|
||||
}
|
||||
|
@ -2,9 +2,10 @@ use crate::tools;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use regex::Regex;
|
||||
use std::convert::TryFrom;
|
||||
use std::os::unix::io::RawFd;
|
||||
|
||||
use chrono::{DateTime, TimeZone, SecondsFormat, Utc};
|
||||
use chrono::{DateTime, LocalResult, TimeZone, SecondsFormat, Utc};
|
||||
|
||||
use std::path::{PathBuf, Path};
|
||||
use lazy_static::lazy_static;
|
||||
@ -45,6 +46,31 @@ pub struct BackupGroup {
|
||||
backup_id: String,
|
||||
}
|
||||
|
||||
impl std::cmp::Ord for BackupGroup {
|
||||
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
let type_order = self.backup_type.cmp(&other.backup_type);
|
||||
if type_order != std::cmp::Ordering::Equal {
|
||||
return type_order;
|
||||
}
|
||||
// try to compare IDs numerically
|
||||
let id_self = self.backup_id.parse::<u64>();
|
||||
let id_other = other.backup_id.parse::<u64>();
|
||||
match (id_self, id_other) {
|
||||
(Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other),
|
||||
(Ok(_), Err(_)) => std::cmp::Ordering::Less,
|
||||
(Err(_), Ok(_)) => std::cmp::Ordering::Greater,
|
||||
_ => self.backup_id.cmp(&other.backup_id),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::cmp::PartialOrd for BackupGroup {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl BackupGroup {
|
||||
|
||||
pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
|
||||
@ -81,7 +107,7 @@ impl BackupGroup {
|
||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
||||
|
||||
let dt = backup_time.parse::<DateTime<Utc>>()?;
|
||||
let backup_dir = BackupDir::new(self.backup_type.clone(), self.backup_id.clone(), dt.timestamp());
|
||||
let backup_dir = BackupDir::new(self.backup_type.clone(), self.backup_id.clone(), dt.timestamp())?;
|
||||
let files = list_backup_files(l2_fd, backup_time)?;
|
||||
|
||||
list.push(BackupInfo { backup_dir, files });
|
||||
@ -106,7 +132,11 @@ impl BackupGroup {
|
||||
|
||||
use nix::fcntl::{openat, OFlag};
|
||||
match openat(l2_fd, &manifest_path, OFlag::O_RDONLY, nix::sys::stat::Mode::empty()) {
|
||||
Ok(_) => { /* manifest exists --> assume backup was successful */ },
|
||||
Ok(rawfd) => {
|
||||
/* manifest exists --> assume backup was successful */
|
||||
/* close else this leaks! */
|
||||
nix::unistd::close(rawfd)?;
|
||||
},
|
||||
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => { return Ok(()); }
|
||||
Err(err) => {
|
||||
bail!("last_successful_backup: unexpected error - {}", err);
|
||||
@ -169,7 +199,7 @@ impl std::str::FromStr for BackupGroup {
|
||||
/// Uniquely identify a Backup (relative to data store)
|
||||
///
|
||||
/// We also call this a backup snaphost.
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||
pub struct BackupDir {
|
||||
/// Backup group
|
||||
group: BackupGroup,
|
||||
@ -179,19 +209,22 @@ pub struct BackupDir {
|
||||
|
||||
impl BackupDir {
|
||||
|
||||
pub fn new<T, U>(backup_type: T, backup_id: U, timestamp: i64) -> Self
|
||||
pub fn new<T, U>(backup_type: T, backup_id: U, timestamp: i64) -> Result<Self, Error>
|
||||
where
|
||||
T: Into<String>,
|
||||
U: Into<String>,
|
||||
{
|
||||
// Note: makes sure that nanoseconds is 0
|
||||
Self {
|
||||
group: BackupGroup::new(backup_type.into(), backup_id.into()),
|
||||
backup_time: Utc.timestamp(timestamp, 0),
|
||||
}
|
||||
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
||||
BackupDir::new_with_group(group, timestamp)
|
||||
}
|
||||
pub fn new_with_group(group: BackupGroup, timestamp: i64) -> Self {
|
||||
Self { group, backup_time: Utc.timestamp(timestamp, 0) }
|
||||
|
||||
pub fn new_with_group(group: BackupGroup, timestamp: i64) -> Result<Self, Error> {
|
||||
let backup_time = match Utc.timestamp_opt(timestamp, 0) {
|
||||
LocalResult::Single(time) => time,
|
||||
_ => bail!("can't create BackupDir with invalid backup time {}", timestamp),
|
||||
};
|
||||
|
||||
Ok(Self { group, backup_time })
|
||||
}
|
||||
|
||||
pub fn group(&self) -> &BackupGroup {
|
||||
@ -228,7 +261,7 @@ impl std::str::FromStr for BackupDir {
|
||||
|
||||
let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str());
|
||||
let backup_time = cap.get(3).unwrap().as_str().parse::<DateTime<Utc>>()?;
|
||||
Ok(BackupDir::from((group, backup_time.timestamp())))
|
||||
BackupDir::try_from((group, backup_time.timestamp()))
|
||||
}
|
||||
}
|
||||
|
||||
@ -241,9 +274,11 @@ impl std::fmt::Display for BackupDir {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(BackupGroup, i64)> for BackupDir {
|
||||
fn from((group, timestamp): (BackupGroup, i64)) -> Self {
|
||||
Self { group, backup_time: Utc.timestamp(timestamp, 0) }
|
||||
impl TryFrom<(BackupGroup, i64)> for BackupDir {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from((group, timestamp): (BackupGroup, i64)) -> Result<Self, Error> {
|
||||
BackupDir::new_with_group(group, timestamp)
|
||||
}
|
||||
}
|
||||
|
||||
@ -268,9 +303,13 @@ impl BackupInfo {
|
||||
}
|
||||
|
||||
/// Finds the latest backup inside a backup group
|
||||
pub fn last_backup(base_path: &Path, group: &BackupGroup) -> Result<Option<BackupInfo>, Error> {
|
||||
pub fn last_backup(base_path: &Path, group: &BackupGroup, only_finished: bool)
|
||||
-> Result<Option<BackupInfo>, Error>
|
||||
{
|
||||
let backups = group.list_backups(base_path)?;
|
||||
Ok(backups.into_iter().max_by_key(|item| item.backup_dir.backup_time()))
|
||||
Ok(backups.into_iter()
|
||||
.filter(|item| !only_finished || item.is_finished())
|
||||
.max_by_key(|item| item.backup_dir.backup_time()))
|
||||
}
|
||||
|
||||
pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
|
||||
@ -301,7 +340,7 @@ impl BackupInfo {
|
||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
||||
|
||||
let dt = backup_time.parse::<DateTime<Utc>>()?;
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, dt.timestamp());
|
||||
let backup_dir = BackupDir::new(backup_type, backup_id, dt.timestamp())?;
|
||||
|
||||
let files = list_backup_files(l2_fd, backup_time)?;
|
||||
|
||||
@ -313,6 +352,11 @@ impl BackupInfo {
|
||||
})?;
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
pub fn is_finished(&self) -> bool {
|
||||
// backup is considered unfinished if there is no manifest
|
||||
self.files.iter().any(|name| name == super::MANIFEST_BLOB_NAME)
|
||||
}
|
||||
}
|
||||
|
||||
fn list_backup_files<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> Result<Vec<String>, Error> {
|
||||
|
@ -5,7 +5,7 @@ use std::io::{Read, Write, Seek, SeekFrom};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::offset::{TimeZone, Local};
|
||||
use chrono::offset::{TimeZone, Local, LocalResult};
|
||||
|
||||
use pathpatterns::{MatchList, MatchType};
|
||||
use proxmox::tools::io::ReadExt;
|
||||
@ -533,17 +533,17 @@ impl <R: Read + Seek> CatalogReader<R> {
|
||||
self.dump_dir(&path, pos)?;
|
||||
}
|
||||
CatalogEntryType::File => {
|
||||
let dt = Local
|
||||
.timestamp_opt(mtime as i64, 0)
|
||||
.single() // chrono docs say timestamp_opt can only be None or Single!
|
||||
.unwrap_or_else(|| Local.timestamp(0, 0));
|
||||
let mtime_string = match Local.timestamp_opt(mtime as i64, 0) {
|
||||
LocalResult::Single(time) => time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false),
|
||||
_ => (mtime as i64).to_string(),
|
||||
};
|
||||
|
||||
println!(
|
||||
"{} {:?} {} {}",
|
||||
etype,
|
||||
path,
|
||||
size,
|
||||
dt.to_rfc3339_opts(chrono::SecondsFormat::Secs, false),
|
||||
mtime_string,
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
|
@ -3,7 +3,7 @@ use std::ffi::{CStr, CString, OsStr, OsString};
|
||||
use std::future::Future;
|
||||
use std::io::Write;
|
||||
use std::mem;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::os::unix::ffi::{OsStrExt, OsStringExt};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::pin::Pin;
|
||||
|
||||
@ -89,6 +89,10 @@ pub fn catalog_shell_cli() -> CommandLineInterface {
|
||||
"find",
|
||||
CliCommand::new(&API_METHOD_FIND_COMMAND).arg_param(&["pattern"]),
|
||||
)
|
||||
.insert(
|
||||
"exit",
|
||||
CliCommand::new(&API_METHOD_EXIT),
|
||||
)
|
||||
.insert_help(),
|
||||
)
|
||||
}
|
||||
@ -104,6 +108,14 @@ fn complete_path(complete_me: &str, _map: &HashMap<String, String>) -> Vec<Strin
|
||||
}
|
||||
}
|
||||
|
||||
// just an empty wrapper so that it is displayed in help/docs, we check
|
||||
// in the readloop for 'exit' again break
|
||||
#[api(input: { properties: {} })]
|
||||
/// Exit the shell
|
||||
async fn exit() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[api(input: { properties: {} })]
|
||||
/// List the current working directory.
|
||||
async fn pwd_command() -> Result<(), Error> {
|
||||
@ -439,6 +451,9 @@ impl Shell {
|
||||
SHELL = Some(this as *mut Shell as usize);
|
||||
}
|
||||
while let Ok(line) = this.rl.readline(&this.prompt) {
|
||||
if line == "exit" {
|
||||
break;
|
||||
}
|
||||
let helper = this.rl.helper().unwrap();
|
||||
let args = match cli::shellword_split(&line) {
|
||||
Ok(args) => args,
|
||||
@ -1058,6 +1073,7 @@ impl<'a> ExtractorState<'a> {
|
||||
}
|
||||
self.path.extend(&entry.name);
|
||||
|
||||
self.extractor.set_path(OsString::from_vec(self.path.clone()));
|
||||
self.handle_entry(entry).await?;
|
||||
}
|
||||
|
||||
|
@ -80,8 +80,9 @@ impl ChunkStore {
|
||||
|
||||
let default_options = CreateOptions::new();
|
||||
|
||||
if let Err(err) = create_path(&base, Some(default_options.clone()), Some(options.clone())) {
|
||||
bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err);
|
||||
match create_path(&base, Some(default_options.clone()), Some(options.clone())) {
|
||||
Err(err) => bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err),
|
||||
Ok(res) => if ! res { nix::unistd::chown(&base, Some(uid), Some(gid))? },
|
||||
}
|
||||
|
||||
if let Err(err) = create_dir(&chunk_dir, options.clone()) {
|
||||
@ -103,7 +104,7 @@ impl ChunkStore {
|
||||
}
|
||||
let percentage = (i*100)/(64*1024);
|
||||
if percentage != last_percentage {
|
||||
eprintln!("Percentage done: {}", percentage);
|
||||
eprintln!("{}%", percentage);
|
||||
last_percentage = percentage;
|
||||
}
|
||||
}
|
||||
@ -177,32 +178,16 @@ impl ChunkStore {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
bail!("updata atime failed for chunk {:?} - {}", chunk_path, err);
|
||||
bail!("update atime failed for chunk {:?} - {}", chunk_path, err);
|
||||
}
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
pub fn read_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
|
||||
let (chunk_path, digest_str) = self.chunk_path(digest);
|
||||
let mut file = std::fs::File::open(&chunk_path)
|
||||
.map_err(|err| {
|
||||
format_err!(
|
||||
"store '{}', unable to read chunk '{}' - {}",
|
||||
self.name,
|
||||
digest_str,
|
||||
err,
|
||||
)
|
||||
})?;
|
||||
|
||||
DataBlob::load(&mut file)
|
||||
}
|
||||
|
||||
pub fn get_chunk_iterator(
|
||||
&self,
|
||||
) -> Result<
|
||||
impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize)> + std::iter::FusedIterator,
|
||||
impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize, bool)> + std::iter::FusedIterator,
|
||||
Error
|
||||
> {
|
||||
use nix::dir::Dir;
|
||||
@ -234,19 +219,21 @@ impl ChunkStore {
|
||||
Some(Ok(entry)) => {
|
||||
// skip files if they're not a hash
|
||||
let bytes = entry.file_name().to_bytes();
|
||||
if bytes.len() != 64 {
|
||||
if bytes.len() != 64 && bytes.len() != 64 + ".0.bad".len() {
|
||||
continue;
|
||||
}
|
||||
if !bytes.iter().all(u8::is_ascii_hexdigit) {
|
||||
if !bytes.iter().take(64).all(u8::is_ascii_hexdigit) {
|
||||
continue;
|
||||
}
|
||||
return Some((Ok(entry), percentage));
|
||||
|
||||
let bad = bytes.ends_with(".bad".as_bytes());
|
||||
return Some((Ok(entry), percentage, bad));
|
||||
}
|
||||
Some(Err(err)) => {
|
||||
// stop after first error
|
||||
done = true;
|
||||
// and pass the error through:
|
||||
return Some((Err(err), percentage));
|
||||
return Some((Err(err), percentage, false));
|
||||
}
|
||||
None => (), // open next directory
|
||||
}
|
||||
@ -276,7 +263,7 @@ impl ChunkStore {
|
||||
// other errors are fatal, so end our iteration
|
||||
done = true;
|
||||
// and pass the error through:
|
||||
return Some((Err(format_err!("unable to read subdir '{}' - {}", subdir, err)), percentage));
|
||||
return Some((Err(format_err!("unable to read subdir '{}' - {}", subdir, err)), percentage, false));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -290,14 +277,14 @@ impl ChunkStore {
|
||||
pub fn sweep_unused_chunks(
|
||||
&self,
|
||||
oldest_writer: i64,
|
||||
phase1_start_time: i64,
|
||||
status: &mut GarbageCollectionStatus,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<(), Error> {
|
||||
use nix::sys::stat::fstatat;
|
||||
use nix::unistd::{unlinkat, UnlinkatFlags};
|
||||
|
||||
let now = unsafe { libc::time(std::ptr::null_mut()) };
|
||||
|
||||
let mut min_atime = now - 3600*24; // at least 24h (see mount option relatime)
|
||||
let mut min_atime = phase1_start_time - 3600*24; // at least 24h (see mount option relatime)
|
||||
|
||||
if oldest_writer < min_atime {
|
||||
min_atime = oldest_writer;
|
||||
@ -308,10 +295,10 @@ impl ChunkStore {
|
||||
let mut last_percentage = 0;
|
||||
let mut chunk_count = 0;
|
||||
|
||||
for (entry, percentage) in self.get_chunk_iterator()? {
|
||||
for (entry, percentage, bad) in self.get_chunk_iterator()? {
|
||||
if last_percentage != percentage {
|
||||
last_percentage = percentage;
|
||||
worker.log(format!("percentage done: {}, chunk count: {}", percentage, chunk_count));
|
||||
worker.log(format!("percentage done: phase2 {}% (processed {} chunks)", percentage, chunk_count));
|
||||
}
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
@ -337,14 +324,47 @@ impl ChunkStore {
|
||||
let lock = self.mutex.lock();
|
||||
|
||||
if let Ok(stat) = fstatat(dirfd, filename, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW) {
|
||||
if stat.st_atime < min_atime {
|
||||
if bad {
|
||||
// filename validity checked in iterator
|
||||
let orig_filename = std::ffi::CString::new(&filename.to_bytes()[..64])?;
|
||||
match fstatat(
|
||||
dirfd,
|
||||
orig_filename.as_c_str(),
|
||||
nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW)
|
||||
{
|
||||
Ok(_) => {
|
||||
match unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
|
||||
Err(err) =>
|
||||
worker.warn(format!(
|
||||
"unlinking corrupt chunk {:?} failed on store '{}' - {}",
|
||||
filename,
|
||||
self.name,
|
||||
err,
|
||||
)),
|
||||
Ok(_) => {
|
||||
status.removed_bad += 1;
|
||||
status.removed_bytes += stat.st_size as u64;
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
|
||||
// chunk hasn't been rewritten yet, keep .bad file
|
||||
},
|
||||
Err(err) => {
|
||||
// some other error, warn user and keep .bad file around too
|
||||
worker.warn(format!(
|
||||
"error during stat on '{:?}' - {}",
|
||||
orig_filename,
|
||||
err,
|
||||
));
|
||||
}
|
||||
}
|
||||
} else if stat.st_atime < min_atime {
|
||||
//let age = now - stat.st_atime;
|
||||
//println!("UNLINK {} {:?}", age/(3600*24), filename);
|
||||
let res = unsafe { libc::unlinkat(dirfd, filename.as_ptr(), 0) };
|
||||
if res != 0 {
|
||||
let err = nix::Error::last();
|
||||
if let Err(err) = unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
|
||||
bail!(
|
||||
"unlink chunk {:?} failed on store '{}' - {}",
|
||||
"unlinking chunk {:?} failed on store '{}' - {}",
|
||||
filename,
|
||||
self.name,
|
||||
err,
|
||||
@ -382,6 +402,7 @@ impl ChunkStore {
|
||||
|
||||
if let Ok(metadata) = std::fs::metadata(&chunk_path) {
|
||||
if metadata.is_file() {
|
||||
self.touch_chunk(digest)?;
|
||||
return Ok((true, metadata.len()));
|
||||
} else {
|
||||
bail!("Got unexpected file type on store '{}' for chunk {}", self.name, digest_str);
|
||||
|
@ -5,15 +5,15 @@
|
||||
/// use hash value 0 to detect a boundary.
|
||||
const CA_CHUNKER_WINDOW_SIZE: usize = 64;
|
||||
|
||||
/// Slinding window chunker (Buzhash)
|
||||
/// Sliding window chunker (Buzhash)
|
||||
///
|
||||
/// This is a rewrite of *casync* chunker (cachunker.h) in rust.
|
||||
///
|
||||
/// Hashing by cyclic polynomial (also called Buzhash) has the benefit
|
||||
/// of avoiding multiplications, using barrel shifts instead. For more
|
||||
/// information please take a look at the [Rolling
|
||||
/// Hash](https://en.wikipedia.org/wiki/Rolling_hash) artikel from
|
||||
/// wikipedia.
|
||||
/// Hash](https://en.wikipedia.org/wiki/Rolling_hash) article from
|
||||
/// Wikipedia.
|
||||
|
||||
pub struct Chunker {
|
||||
h: u32,
|
||||
|
@ -6,12 +6,30 @@
|
||||
//! See the Wikipedia Artikel for [Authenticated
|
||||
//! encryption](https://en.wikipedia.org/wiki/Authenticated_encryption)
|
||||
//! for a short introduction.
|
||||
use anyhow::{bail, Error};
|
||||
use openssl::pkcs5::pbkdf2_hmac;
|
||||
use openssl::hash::MessageDigest;
|
||||
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
|
||||
|
||||
use std::io::Write;
|
||||
use chrono::{Local, TimeZone, DateTime};
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use chrono::{Local, DateTime};
|
||||
use openssl::hash::MessageDigest;
|
||||
use openssl::pkcs5::pbkdf2_hmac;
|
||||
use openssl::symm::{decrypt_aead, Cipher, Crypter, Mode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox::api::api;
|
||||
|
||||
#[api(default: "encrypt")]
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Defines whether data is encrypted (using an AEAD cipher), only signed, or neither.
|
||||
pub enum CryptMode {
|
||||
/// Don't encrypt.
|
||||
None,
|
||||
/// Encrypt.
|
||||
Encrypt,
|
||||
/// Only sign.
|
||||
SignOnly,
|
||||
}
|
||||
|
||||
/// Encryption Configuration with secret key
|
||||
///
|
||||
@ -26,7 +44,6 @@ pub struct CryptConfig {
|
||||
id_pkey: openssl::pkey::PKey<openssl::pkey::Private>,
|
||||
// The private key used by the cipher.
|
||||
enc_key: [u8; 32],
|
||||
|
||||
}
|
||||
|
||||
impl CryptConfig {
|
||||
@ -63,10 +80,9 @@ impl CryptConfig {
|
||||
/// chunk digest values do not clash with values computed for
|
||||
/// other sectret keys.
|
||||
pub fn compute_digest(&self, data: &[u8]) -> [u8; 32] {
|
||||
// FIXME: use HMAC-SHA256 instead??
|
||||
let mut hasher = openssl::sha::Sha256::new();
|
||||
hasher.update(&self.id_key);
|
||||
hasher.update(data);
|
||||
hasher.update(&self.id_key); // at the end, to avoid length extensions attacks
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
@ -203,7 +219,7 @@ impl CryptConfig {
|
||||
created: DateTime<Local>,
|
||||
) -> Result<Vec<u8>, Error> {
|
||||
|
||||
let modified = Local.timestamp(Local::now().timestamp(), 0);
|
||||
let modified = Local::now();
|
||||
let key_config = super::KeyConfig { kdf: None, created, modified, data: self.enc_key.to_vec() };
|
||||
let data = serde_json::to_string(&key_config)?.as_bytes().to_vec();
|
||||
|
||||
|
@ -3,10 +3,10 @@ use std::convert::TryInto;
|
||||
|
||||
use proxmox::tools::io::{ReadExt, WriteExt};
|
||||
|
||||
const MAX_BLOB_SIZE: usize = 128*1024*1024;
|
||||
|
||||
use super::file_formats::*;
|
||||
use super::CryptConfig;
|
||||
use super::{CryptConfig, CryptMode};
|
||||
|
||||
const MAX_BLOB_SIZE: usize = 128*1024*1024;
|
||||
|
||||
/// Encoded data chunk with digest and positional information
|
||||
pub struct ChunkInfo {
|
||||
@ -36,6 +36,11 @@ impl DataBlob {
|
||||
&self.raw_data
|
||||
}
|
||||
|
||||
/// Returns raw_data size
|
||||
pub fn raw_size(&self) -> u64 {
|
||||
self.raw_data.len() as u64
|
||||
}
|
||||
|
||||
/// Consume self and returns raw_data
|
||||
pub fn into_inner(self) -> Vec<u8> {
|
||||
self.raw_data
|
||||
@ -66,8 +71,8 @@ impl DataBlob {
|
||||
hasher.finalize()
|
||||
}
|
||||
|
||||
/// verify the CRC32 checksum
|
||||
pub fn verify_crc(&self) -> Result<(), Error> {
|
||||
// verify the CRC32 checksum
|
||||
fn verify_crc(&self) -> Result<(), Error> {
|
||||
let expected_crc = self.compute_crc();
|
||||
if expected_crc != self.crc() {
|
||||
bail!("Data blob has wrong CRC checksum.");
|
||||
@ -166,17 +171,37 @@ impl DataBlob {
|
||||
Ok(blob)
|
||||
}
|
||||
|
||||
/// Get the encryption mode for this blob.
|
||||
pub fn crypt_mode(&self) -> Result<CryptMode, Error> {
|
||||
let magic = self.magic();
|
||||
|
||||
Ok(if magic == &UNCOMPRESSED_BLOB_MAGIC_1_0 || magic == &COMPRESSED_BLOB_MAGIC_1_0 {
|
||||
CryptMode::None
|
||||
} else if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||
CryptMode::Encrypt
|
||||
} else {
|
||||
bail!("Invalid blob magic number.");
|
||||
})
|
||||
}
|
||||
|
||||
/// Decode blob data
|
||||
pub fn decode(&self, config: Option<&CryptConfig>) -> Result<Vec<u8>, Error> {
|
||||
pub fn decode(&self, config: Option<&CryptConfig>, digest: Option<&[u8; 32]>) -> Result<Vec<u8>, Error> {
|
||||
|
||||
let magic = self.magic();
|
||||
|
||||
if magic == &UNCOMPRESSED_BLOB_MAGIC_1_0 {
|
||||
let data_start = std::mem::size_of::<DataBlobHeader>();
|
||||
Ok(self.raw_data[data_start..].to_vec())
|
||||
let data = self.raw_data[data_start..].to_vec();
|
||||
if let Some(digest) = digest {
|
||||
Self::verify_digest(&data, None, digest)?;
|
||||
}
|
||||
Ok(data)
|
||||
} else if magic == &COMPRESSED_BLOB_MAGIC_1_0 {
|
||||
let data_start = std::mem::size_of::<DataBlobHeader>();
|
||||
let data = zstd::block::decompress(&self.raw_data[data_start..], MAX_BLOB_SIZE)?;
|
||||
if let Some(digest) = digest {
|
||||
Self::verify_digest(&data, None, digest)?;
|
||||
}
|
||||
Ok(data)
|
||||
} else if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||
let header_len = std::mem::size_of::<EncryptedDataBlobHeader>();
|
||||
@ -190,86 +215,29 @@ impl DataBlob {
|
||||
} else {
|
||||
config.decode_uncompressed_chunk(&self.raw_data[header_len..], &head.iv, &head.tag)?
|
||||
};
|
||||
if let Some(digest) = digest {
|
||||
Self::verify_digest(&data, Some(config), digest)?;
|
||||
}
|
||||
Ok(data)
|
||||
} else {
|
||||
bail!("unable to decrypt blob - missing CryptConfig");
|
||||
}
|
||||
} else if magic == &AUTH_COMPR_BLOB_MAGIC_1_0 || magic == &AUTHENTICATED_BLOB_MAGIC_1_0 {
|
||||
let header_len = std::mem::size_of::<AuthenticatedDataBlobHeader>();
|
||||
let head = unsafe {
|
||||
(&self.raw_data[..header_len]).read_le_value::<AuthenticatedDataBlobHeader>()?
|
||||
};
|
||||
|
||||
let data_start = std::mem::size_of::<AuthenticatedDataBlobHeader>();
|
||||
|
||||
// Note: only verify if we have a crypt config
|
||||
if let Some(config) = config {
|
||||
let signature = config.compute_auth_tag(&self.raw_data[data_start..]);
|
||||
if signature != head.tag {
|
||||
bail!("verifying blob signature failed");
|
||||
}
|
||||
}
|
||||
|
||||
if magic == &AUTH_COMPR_BLOB_MAGIC_1_0 {
|
||||
let data = zstd::block::decompress(&self.raw_data[data_start..], 16*1024*1024)?;
|
||||
Ok(data)
|
||||
} else {
|
||||
Ok(self.raw_data[data_start..].to_vec())
|
||||
}
|
||||
} else {
|
||||
bail!("Invalid blob magic number.");
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a signed DataBlob, optionally compressed
|
||||
pub fn create_signed(
|
||||
data: &[u8],
|
||||
config: &CryptConfig,
|
||||
compress: bool,
|
||||
) -> Result<Self, Error> {
|
||||
|
||||
if data.len() > MAX_BLOB_SIZE {
|
||||
bail!("data blob too large ({} bytes).", data.len());
|
||||
}
|
||||
|
||||
let compr_data;
|
||||
let (_compress, data, magic) = if compress {
|
||||
compr_data = zstd::block::compress(data, 1)?;
|
||||
// Note: We only use compression if result is shorter
|
||||
if compr_data.len() < data.len() {
|
||||
(true, &compr_data[..], AUTH_COMPR_BLOB_MAGIC_1_0)
|
||||
} else {
|
||||
(false, data, AUTHENTICATED_BLOB_MAGIC_1_0)
|
||||
}
|
||||
} else {
|
||||
(false, data, AUTHENTICATED_BLOB_MAGIC_1_0)
|
||||
};
|
||||
|
||||
let header_len = std::mem::size_of::<AuthenticatedDataBlobHeader>();
|
||||
let mut raw_data = Vec::with_capacity(data.len() + header_len);
|
||||
|
||||
let head = AuthenticatedDataBlobHeader {
|
||||
head: DataBlobHeader { magic, crc: [0; 4] },
|
||||
tag: config.compute_auth_tag(data),
|
||||
};
|
||||
unsafe {
|
||||
raw_data.write_le_value(head)?;
|
||||
}
|
||||
raw_data.extend_from_slice(data);
|
||||
|
||||
let mut blob = DataBlob { raw_data };
|
||||
blob.set_crc(blob.compute_crc());
|
||||
|
||||
Ok(blob)
|
||||
}
|
||||
|
||||
/// Load blob from ``reader``
|
||||
pub fn load(reader: &mut dyn std::io::Read) -> Result<Self, Error> {
|
||||
/// Load blob from ``reader``, verify CRC
|
||||
pub fn load_from_reader(reader: &mut dyn std::io::Read) -> Result<Self, Error> {
|
||||
|
||||
let mut data = Vec::with_capacity(1024*1024);
|
||||
reader.read_to_end(&mut data)?;
|
||||
|
||||
Self::from_raw(data)
|
||||
let blob = Self::from_raw(data)?;
|
||||
|
||||
blob.verify_crc()?;
|
||||
|
||||
Ok(blob)
|
||||
}
|
||||
|
||||
/// Create Instance from raw data
|
||||
@ -294,14 +262,6 @@ impl DataBlob {
|
||||
|
||||
let blob = DataBlob { raw_data: data };
|
||||
|
||||
Ok(blob)
|
||||
} else if magic == AUTH_COMPR_BLOB_MAGIC_1_0 || magic == AUTHENTICATED_BLOB_MAGIC_1_0 {
|
||||
if data.len() < std::mem::size_of::<AuthenticatedDataBlobHeader>() {
|
||||
bail!("authenticated blob too small ({} bytes).", data.len());
|
||||
}
|
||||
|
||||
let blob = DataBlob { raw_data: data };
|
||||
|
||||
Ok(blob)
|
||||
} else {
|
||||
bail!("unable to parse raw blob - wrong magic");
|
||||
@ -313,7 +273,7 @@ impl DataBlob {
|
||||
/// To do that, we need to decompress data first. Please note that
|
||||
/// this is not possible for encrypted chunks. This function simply return Ok
|
||||
/// for encrypted chunks.
|
||||
/// Note: This does not call verify_crc
|
||||
/// Note: This does not call verify_crc, because this is usually done in load
|
||||
pub fn verify_unencrypted(
|
||||
&self,
|
||||
expected_chunk_size: usize,
|
||||
@ -326,12 +286,26 @@ impl DataBlob {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let data = self.decode(None)?;
|
||||
// verifies digest!
|
||||
let data = self.decode(None, Some(expected_digest))?;
|
||||
|
||||
if expected_chunk_size != data.len() {
|
||||
bail!("detected chunk with wrong length ({} != {})", expected_chunk_size, data.len());
|
||||
}
|
||||
let digest = openssl::sha::sha256(&data);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verify_digest(
|
||||
data: &[u8],
|
||||
config: Option<&CryptConfig>,
|
||||
expected_digest: &[u8; 32],
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let digest = match config {
|
||||
Some(config) => config.compute_digest(data),
|
||||
None => openssl::sha::sha256(data),
|
||||
};
|
||||
if &digest != expected_digest {
|
||||
bail!("detected chunk with wrong digest.");
|
||||
}
|
||||
@ -376,7 +350,7 @@ impl <'a, 'b> DataChunkBuilder<'a, 'b> {
|
||||
|
||||
/// Set encryption Configuration
|
||||
///
|
||||
/// If set, chunks are encrypted.
|
||||
/// If set, chunks are encrypted
|
||||
pub fn crypt_config(mut self, value: &'b CryptConfig) -> Self {
|
||||
if self.digest_computed {
|
||||
panic!("unable to set crypt_config after compute_digest().");
|
||||
@ -415,12 +389,7 @@ impl <'a, 'b> DataChunkBuilder<'a, 'b> {
|
||||
self.compute_digest();
|
||||
}
|
||||
|
||||
let chunk = DataBlob::encode(
|
||||
self.orig_data,
|
||||
self.config,
|
||||
self.compress,
|
||||
)?;
|
||||
|
||||
let chunk = DataBlob::encode(self.orig_data, self.config, self.compress)?;
|
||||
Ok((chunk, self.digest))
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use anyhow::{bail, Error};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use std::sync::Arc;
|
||||
use std::io::{Read, BufReader};
|
||||
use proxmox::tools::io::ReadExt;
|
||||
@ -8,8 +8,6 @@ use super::*;
|
||||
enum BlobReaderState<R: Read> {
|
||||
Uncompressed { expected_crc: u32, csum_reader: ChecksumReader<R> },
|
||||
Compressed { expected_crc: u32, decompr: zstd::stream::read::Decoder<BufReader<ChecksumReader<R>>> },
|
||||
Signed { expected_crc: u32, expected_hmac: [u8; 32], csum_reader: ChecksumReader<R> },
|
||||
SignedCompressed { expected_crc: u32, expected_hmac: [u8; 32], decompr: zstd::stream::read::Decoder<BufReader<ChecksumReader<R>>> },
|
||||
Encrypted { expected_crc: u32, decrypt_reader: CryptReader<BufReader<ChecksumReader<R>>> },
|
||||
EncryptedCompressed { expected_crc: u32, decompr: zstd::stream::read::Decoder<BufReader<CryptReader<BufReader<ChecksumReader<R>>>>> },
|
||||
}
|
||||
@ -41,40 +39,26 @@ impl <R: Read> DataBlobReader<R> {
|
||||
let decompr = zstd::stream::read::Decoder::new(csum_reader)?;
|
||||
Ok(Self { state: BlobReaderState::Compressed { expected_crc, decompr }})
|
||||
}
|
||||
AUTHENTICATED_BLOB_MAGIC_1_0 => {
|
||||
let expected_crc = u32::from_le_bytes(head.crc);
|
||||
let mut expected_hmac = [0u8; 32];
|
||||
reader.read_exact(&mut expected_hmac)?;
|
||||
let csum_reader = ChecksumReader::new(reader, config);
|
||||
Ok(Self { state: BlobReaderState::Signed { expected_crc, expected_hmac, csum_reader }})
|
||||
}
|
||||
AUTH_COMPR_BLOB_MAGIC_1_0 => {
|
||||
let expected_crc = u32::from_le_bytes(head.crc);
|
||||
let mut expected_hmac = [0u8; 32];
|
||||
reader.read_exact(&mut expected_hmac)?;
|
||||
let csum_reader = ChecksumReader::new(reader, config);
|
||||
|
||||
let decompr = zstd::stream::read::Decoder::new(csum_reader)?;
|
||||
Ok(Self { state: BlobReaderState::SignedCompressed { expected_crc, expected_hmac, decompr }})
|
||||
}
|
||||
ENCRYPTED_BLOB_MAGIC_1_0 => {
|
||||
let config = config.ok_or_else(|| format_err!("unable to read encrypted blob without key"))?;
|
||||
let expected_crc = u32::from_le_bytes(head.crc);
|
||||
let mut iv = [0u8; 16];
|
||||
let mut expected_tag = [0u8; 16];
|
||||
reader.read_exact(&mut iv)?;
|
||||
reader.read_exact(&mut expected_tag)?;
|
||||
let csum_reader = ChecksumReader::new(reader, None);
|
||||
let decrypt_reader = CryptReader::new(BufReader::with_capacity(64*1024, csum_reader), iv, expected_tag, config.unwrap())?;
|
||||
let decrypt_reader = CryptReader::new(BufReader::with_capacity(64*1024, csum_reader), iv, expected_tag, config)?;
|
||||
Ok(Self { state: BlobReaderState::Encrypted { expected_crc, decrypt_reader }})
|
||||
}
|
||||
ENCR_COMPR_BLOB_MAGIC_1_0 => {
|
||||
let config = config.ok_or_else(|| format_err!("unable to read encrypted blob without key"))?;
|
||||
let expected_crc = u32::from_le_bytes(head.crc);
|
||||
let mut iv = [0u8; 16];
|
||||
let mut expected_tag = [0u8; 16];
|
||||
reader.read_exact(&mut iv)?;
|
||||
reader.read_exact(&mut expected_tag)?;
|
||||
let csum_reader = ChecksumReader::new(reader, None);
|
||||
let decrypt_reader = CryptReader::new(BufReader::with_capacity(64*1024, csum_reader), iv, expected_tag, config.unwrap())?;
|
||||
let decrypt_reader = CryptReader::new(BufReader::with_capacity(64*1024, csum_reader), iv, expected_tag, config)?;
|
||||
let decompr = zstd::stream::read::Decoder::new(decrypt_reader)?;
|
||||
Ok(Self { state: BlobReaderState::EncryptedCompressed { expected_crc, decompr }})
|
||||
}
|
||||
@ -99,31 +83,6 @@ impl <R: Read> DataBlobReader<R> {
|
||||
}
|
||||
Ok(reader)
|
||||
}
|
||||
BlobReaderState::Signed { csum_reader, expected_crc, expected_hmac } => {
|
||||
let (reader, crc, hmac) = csum_reader.finish()?;
|
||||
if crc != expected_crc {
|
||||
bail!("blob crc check failed");
|
||||
}
|
||||
if let Some(hmac) = hmac {
|
||||
if hmac != expected_hmac {
|
||||
bail!("blob signature check failed");
|
||||
}
|
||||
}
|
||||
Ok(reader)
|
||||
}
|
||||
BlobReaderState::SignedCompressed { expected_crc, expected_hmac, decompr } => {
|
||||
let csum_reader = decompr.finish().into_inner();
|
||||
let (reader, crc, hmac) = csum_reader.finish()?;
|
||||
if crc != expected_crc {
|
||||
bail!("blob crc check failed");
|
||||
}
|
||||
if let Some(hmac) = hmac {
|
||||
if hmac != expected_hmac {
|
||||
bail!("blob signature check failed");
|
||||
}
|
||||
}
|
||||
Ok(reader)
|
||||
}
|
||||
BlobReaderState::Encrypted { expected_crc, decrypt_reader } => {
|
||||
let csum_reader = decrypt_reader.finish()?.into_inner();
|
||||
let (reader, crc, _) = csum_reader.finish()?;
|
||||
@ -155,12 +114,6 @@ impl <R: Read> Read for DataBlobReader<R> {
|
||||
BlobReaderState::Compressed { decompr, .. } => {
|
||||
decompr.read(buf)
|
||||
}
|
||||
BlobReaderState::Signed { csum_reader, .. } => {
|
||||
csum_reader.read(buf)
|
||||
}
|
||||
BlobReaderState::SignedCompressed { decompr, .. } => {
|
||||
decompr.read(buf)
|
||||
}
|
||||
BlobReaderState::Encrypted { decrypt_reader, .. } => {
|
||||
decrypt_reader.read(buf)
|
||||
}
|
||||
|
@ -8,8 +8,6 @@ use super::*;
|
||||
enum BlobWriterState<W: Write> {
|
||||
Uncompressed { csum_writer: ChecksumWriter<W> },
|
||||
Compressed { compr: zstd::stream::write::Encoder<ChecksumWriter<W>> },
|
||||
Signed { csum_writer: ChecksumWriter<W> },
|
||||
SignedCompressed { compr: zstd::stream::write::Encoder<ChecksumWriter<W>> },
|
||||
Encrypted { crypt_writer: CryptWriter<ChecksumWriter<W>> },
|
||||
EncryptedCompressed { compr: zstd::stream::write::Encoder<CryptWriter<ChecksumWriter<W>>> },
|
||||
}
|
||||
@ -42,33 +40,6 @@ impl <W: Write + Seek> DataBlobWriter<W> {
|
||||
Ok(Self { state: BlobWriterState::Compressed { compr }})
|
||||
}
|
||||
|
||||
pub fn new_signed(mut writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
|
||||
writer.seek(SeekFrom::Start(0))?;
|
||||
let head = AuthenticatedDataBlobHeader {
|
||||
head: DataBlobHeader { magic: AUTHENTICATED_BLOB_MAGIC_1_0, crc: [0; 4] },
|
||||
tag: [0u8; 32],
|
||||
};
|
||||
unsafe {
|
||||
writer.write_le_value(head)?;
|
||||
}
|
||||
let csum_writer = ChecksumWriter::new(writer, Some(config));
|
||||
Ok(Self { state: BlobWriterState::Signed { csum_writer }})
|
||||
}
|
||||
|
||||
pub fn new_signed_compressed(mut writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
|
||||
writer.seek(SeekFrom::Start(0))?;
|
||||
let head = AuthenticatedDataBlobHeader {
|
||||
head: DataBlobHeader { magic: AUTH_COMPR_BLOB_MAGIC_1_0, crc: [0; 4] },
|
||||
tag: [0u8; 32],
|
||||
};
|
||||
unsafe {
|
||||
writer.write_le_value(head)?;
|
||||
}
|
||||
let csum_writer = ChecksumWriter::new(writer, Some(config));
|
||||
let compr = zstd::stream::write::Encoder::new(csum_writer, 1)?;
|
||||
Ok(Self { state: BlobWriterState::SignedCompressed { compr }})
|
||||
}
|
||||
|
||||
pub fn new_encrypted(mut writer: W, config: Arc<CryptConfig>) -> Result<Self, Error> {
|
||||
writer.seek(SeekFrom::Start(0))?;
|
||||
let head = EncryptedDataBlobHeader {
|
||||
@ -129,37 +100,6 @@ impl <W: Write + Seek> DataBlobWriter<W> {
|
||||
|
||||
Ok(writer)
|
||||
}
|
||||
BlobWriterState::Signed { csum_writer } => {
|
||||
let (mut writer, crc, tag) = csum_writer.finish()?;
|
||||
|
||||
let head = AuthenticatedDataBlobHeader {
|
||||
head: DataBlobHeader { magic: AUTHENTICATED_BLOB_MAGIC_1_0, crc: crc.to_le_bytes() },
|
||||
tag: tag.unwrap(),
|
||||
};
|
||||
|
||||
writer.seek(SeekFrom::Start(0))?;
|
||||
unsafe {
|
||||
writer.write_le_value(head)?;
|
||||
}
|
||||
|
||||
Ok(writer)
|
||||
}
|
||||
BlobWriterState::SignedCompressed { compr } => {
|
||||
let csum_writer = compr.finish()?;
|
||||
let (mut writer, crc, tag) = csum_writer.finish()?;
|
||||
|
||||
let head = AuthenticatedDataBlobHeader {
|
||||
head: DataBlobHeader { magic: AUTH_COMPR_BLOB_MAGIC_1_0, crc: crc.to_le_bytes() },
|
||||
tag: tag.unwrap(),
|
||||
};
|
||||
|
||||
writer.seek(SeekFrom::Start(0))?;
|
||||
unsafe {
|
||||
writer.write_le_value(head)?;
|
||||
}
|
||||
|
||||
Ok(writer)
|
||||
}
|
||||
BlobWriterState::Encrypted { crypt_writer } => {
|
||||
let (csum_writer, iv, tag) = crypt_writer.finish()?;
|
||||
let (mut writer, crc, _) = csum_writer.finish()?;
|
||||
@ -203,12 +143,6 @@ impl <W: Write + Seek> Write for DataBlobWriter<W> {
|
||||
BlobWriterState::Compressed { ref mut compr } => {
|
||||
compr.write(buf)
|
||||
}
|
||||
BlobWriterState::Signed { ref mut csum_writer } => {
|
||||
csum_writer.write(buf)
|
||||
}
|
||||
BlobWriterState::SignedCompressed { ref mut compr } => {
|
||||
compr.write(buf)
|
||||
}
|
||||
BlobWriterState::Encrypted { ref mut crypt_writer } => {
|
||||
crypt_writer.write(buf)
|
||||
}
|
||||
@ -226,13 +160,7 @@ impl <W: Write + Seek> Write for DataBlobWriter<W> {
|
||||
BlobWriterState::Compressed { ref mut compr } => {
|
||||
compr.flush()
|
||||
}
|
||||
BlobWriterState::Signed { ref mut csum_writer } => {
|
||||
csum_writer.flush()
|
||||
}
|
||||
BlobWriterState::SignedCompressed { ref mut compr } => {
|
||||
compr.flush()
|
||||
}
|
||||
BlobWriterState::Encrypted { ref mut crypt_writer } => {
|
||||
BlobWriterState::Encrypted { ref mut crypt_writer } => {
|
||||
crypt_writer.flush()
|
||||
}
|
||||
BlobWriterState::EncryptedCompressed { ref mut compr } => {
|
||||
|
@ -7,6 +7,9 @@ use std::convert::TryFrom;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||
|
||||
use super::backup_info::{BackupGroup, BackupDir};
|
||||
use super::chunk_store::ChunkStore;
|
||||
@ -18,7 +21,9 @@ use super::{DataBlob, ArchiveType, archive_type};
|
||||
use crate::config::datastore;
|
||||
use crate::server::WorkerTask;
|
||||
use crate::tools;
|
||||
use crate::api2::types::GarbageCollectionStatus;
|
||||
use crate::tools::format::HumanByte;
|
||||
use crate::tools::fs::{lock_dir_noblock, DirLockGuard};
|
||||
use crate::api2::types::{GarbageCollectionStatus, Userid};
|
||||
|
||||
lazy_static! {
|
||||
static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
|
||||
@ -80,7 +85,7 @@ impl DataStore {
|
||||
pub fn get_chunk_iterator(
|
||||
&self,
|
||||
) -> Result<
|
||||
impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize)>,
|
||||
impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize, bool)>,
|
||||
Error
|
||||
> {
|
||||
self.chunk_store.get_chunk_iterator()
|
||||
@ -143,7 +148,7 @@ impl DataStore {
|
||||
self.chunk_store.base_path()
|
||||
}
|
||||
|
||||
/// Clenaup a backup directory
|
||||
/// Cleanup a backup directory
|
||||
///
|
||||
/// Removes all files not mentioned in the manifest.
|
||||
pub fn cleanup_backup_dir(&self, backup_dir: &BackupDir, manifest: &BackupManifest
|
||||
@ -196,6 +201,8 @@ impl DataStore {
|
||||
|
||||
let full_path = self.group_path(backup_group);
|
||||
|
||||
let _guard = tools::fs::lock_dir_noblock(&full_path, "backup group", "possible running backup")?;
|
||||
|
||||
log::info!("removing backup group {:?}", full_path);
|
||||
std::fs::remove_dir_all(&full_path)
|
||||
.map_err(|err| {
|
||||
@ -210,10 +217,15 @@ impl DataStore {
|
||||
}
|
||||
|
||||
/// Remove a backup directory including all content
|
||||
pub fn remove_backup_dir(&self, backup_dir: &BackupDir) -> Result<(), Error> {
|
||||
pub fn remove_backup_dir(&self, backup_dir: &BackupDir, force: bool) -> Result<(), Error> {
|
||||
|
||||
let full_path = self.snapshot_path(backup_dir);
|
||||
|
||||
let _guard;
|
||||
if !force {
|
||||
_guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or used as base")?;
|
||||
}
|
||||
|
||||
log::info!("removing backup snapshot {:?}", full_path);
|
||||
std::fs::remove_dir_all(&full_path)
|
||||
.map_err(|err| {
|
||||
@ -245,16 +257,21 @@ impl DataStore {
|
||||
/// Returns the backup owner.
|
||||
///
|
||||
/// The backup owner is the user who first created the backup group.
|
||||
pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<String, Error> {
|
||||
pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Userid, Error> {
|
||||
let mut full_path = self.base_path();
|
||||
full_path.push(backup_group.group_path());
|
||||
full_path.push("owner");
|
||||
let owner = proxmox::tools::fs::file_read_firstline(full_path)?;
|
||||
Ok(owner.trim_end().to_string()) // remove trailing newline
|
||||
Ok(owner.trim_end().parse()?) // remove trailing newline
|
||||
}
|
||||
|
||||
/// Set the backup owner.
|
||||
pub fn set_owner(&self, backup_group: &BackupGroup, userid: &str, force: bool) -> Result<(), Error> {
|
||||
pub fn set_owner(
|
||||
&self,
|
||||
backup_group: &BackupGroup,
|
||||
userid: &Userid,
|
||||
force: bool,
|
||||
) -> Result<(), Error> {
|
||||
let mut path = self.base_path();
|
||||
path.push(backup_group.group_path());
|
||||
path.push("owner");
|
||||
@ -278,12 +295,17 @@ impl DataStore {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a backup group if it does not already exists.
|
||||
/// Create (if it does not already exists) and lock a backup group
|
||||
///
|
||||
/// And set the owner to 'userid'. If the group already exists, it returns the
|
||||
/// current owner (instead of setting the owner).
|
||||
pub fn create_backup_group(&self, backup_group: &BackupGroup, userid: &str) -> Result<String, Error> {
|
||||
|
||||
///
|
||||
/// This also acquires an exclusive lock on the directory and returns the lock guard.
|
||||
pub fn create_locked_backup_group(
|
||||
&self,
|
||||
backup_group: &BackupGroup,
|
||||
userid: &Userid,
|
||||
) -> Result<(Userid, DirLockGuard), Error> {
|
||||
// create intermediate path first:
|
||||
let base_path = self.base_path();
|
||||
|
||||
@ -296,13 +318,15 @@ impl DataStore {
|
||||
// create the last component now
|
||||
match std::fs::create_dir(&full_path) {
|
||||
Ok(_) => {
|
||||
let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
|
||||
self.set_owner(backup_group, userid, false)?;
|
||||
let owner = self.get_owner(backup_group)?; // just to be sure
|
||||
Ok(owner)
|
||||
Ok((owner, guard))
|
||||
}
|
||||
Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
|
||||
let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
|
||||
let owner = self.get_owner(backup_group)?; // just to be sure
|
||||
Ok(owner)
|
||||
Ok((owner, guard))
|
||||
}
|
||||
Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
|
||||
}
|
||||
@ -311,15 +335,20 @@ impl DataStore {
|
||||
/// Creates a new backup snapshot inside a BackupGroup
|
||||
///
|
||||
/// The BackupGroup directory needs to exist.
|
||||
pub fn create_backup_dir(&self, backup_dir: &BackupDir) -> Result<(PathBuf, bool), io::Error> {
|
||||
pub fn create_locked_backup_dir(&self, backup_dir: &BackupDir)
|
||||
-> Result<(PathBuf, bool, DirLockGuard), Error>
|
||||
{
|
||||
let relative_path = backup_dir.relative_path();
|
||||
let mut full_path = self.base_path();
|
||||
full_path.push(&relative_path);
|
||||
|
||||
let lock = ||
|
||||
lock_dir_noblock(&full_path, "snapshot", "internal error - tried creating snapshot that's already in use");
|
||||
|
||||
match std::fs::create_dir(&full_path) {
|
||||
Ok(_) => Ok((relative_path, true)),
|
||||
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false)),
|
||||
Err(e) => Err(e)
|
||||
Ok(_) => Ok((relative_path, true, lock()?)),
|
||||
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false, lock()?)),
|
||||
Err(e) => Err(e.into())
|
||||
}
|
||||
}
|
||||
|
||||
@ -339,9 +368,30 @@ impl DataStore {
|
||||
.map(|s| s.starts_with("."))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
let handle_entry_err = |err: walkdir::Error| {
|
||||
if let Some(inner) = err.io_error() {
|
||||
let path = err.path().unwrap_or(Path::new(""));
|
||||
match inner.kind() {
|
||||
io::ErrorKind::PermissionDenied => {
|
||||
// only allow to skip ext4 fsck directory, avoid GC if, for example,
|
||||
// a user got file permissions wrong on datastore rsync to new server
|
||||
if err.depth() > 1 || !path.ends_with("lost+found") {
|
||||
bail!("cannot continue garbage-collection safely, permission denied on: {}", path.display())
|
||||
}
|
||||
},
|
||||
_ => bail!("unexpected error on datastore traversal: {} - {}", inner, path.display()),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
for entry in walker.filter_entry(|e| !is_hidden(e)) {
|
||||
let path = entry?.into_path();
|
||||
let path = match entry {
|
||||
Ok(entry) => entry.into_path(),
|
||||
Err(err) => {
|
||||
handle_entry_err(err)?;
|
||||
continue
|
||||
},
|
||||
};
|
||||
if let Ok(archive_type) = archive_type(&path) {
|
||||
if archive_type == ArchiveType::FixedIndex || archive_type == ArchiveType::DynamicIndex {
|
||||
list.push(path);
|
||||
@ -369,8 +419,8 @@ impl DataStore {
|
||||
tools::fail_on_shutdown()?;
|
||||
let digest = index.index_digest(pos).unwrap();
|
||||
if let Err(err) = self.chunk_store.touch_chunk(digest) {
|
||||
bail!("unable to access chunk {}, required by {:?} - {}",
|
||||
proxmox::tools::digest_to_hex(digest), file_name, err);
|
||||
worker.warn(&format!("warning: unable to access chunk {}, required by {:?} - {}",
|
||||
proxmox::tools::digest_to_hex(digest), file_name, err));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@ -380,6 +430,12 @@ impl DataStore {
|
||||
|
||||
let image_list = self.list_images()?;
|
||||
|
||||
let image_count = image_list.len();
|
||||
|
||||
let mut done = 0;
|
||||
|
||||
let mut last_percentage: usize = 0;
|
||||
|
||||
for path in image_list {
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
@ -394,6 +450,14 @@ impl DataStore {
|
||||
self.index_mark_used_chunks(index, &path, status, worker)?;
|
||||
}
|
||||
}
|
||||
done += 1;
|
||||
|
||||
let percentage = done*100/image_count;
|
||||
if percentage > last_percentage {
|
||||
worker.log(format!("percentage done: phase1 {}% ({} of {} index files)",
|
||||
percentage, done, image_count));
|
||||
last_percentage = percentage;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -413,9 +477,8 @@ impl DataStore {
|
||||
|
||||
let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
|
||||
|
||||
let now = unsafe { libc::time(std::ptr::null_mut()) };
|
||||
|
||||
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(now);
|
||||
let phase1_start_time = unsafe { libc::time(std::ptr::null_mut()) };
|
||||
let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(phase1_start_time);
|
||||
|
||||
let mut gc_status = GarbageCollectionStatus::default();
|
||||
gc_status.upid = Some(worker.to_string());
|
||||
@ -425,26 +488,29 @@ impl DataStore {
|
||||
self.mark_used_chunks(&mut gc_status, &worker)?;
|
||||
|
||||
worker.log("Start GC phase2 (sweep unused chunks)");
|
||||
self.chunk_store.sweep_unused_chunks(oldest_writer, &mut gc_status, &worker)?;
|
||||
self.chunk_store.sweep_unused_chunks(oldest_writer, phase1_start_time, &mut gc_status, &worker)?;
|
||||
|
||||
worker.log(&format!("Removed bytes: {}", gc_status.removed_bytes));
|
||||
worker.log(&format!("Removed garbage: {}", HumanByte::from(gc_status.removed_bytes)));
|
||||
worker.log(&format!("Removed chunks: {}", gc_status.removed_chunks));
|
||||
if gc_status.pending_bytes > 0 {
|
||||
worker.log(&format!("Pending removals: {} bytes ({} chunks)", gc_status.pending_bytes, gc_status.pending_chunks));
|
||||
worker.log(&format!("Pending removals: {} (in {} chunks)", HumanByte::from(gc_status.pending_bytes), gc_status.pending_chunks));
|
||||
}
|
||||
if gc_status.removed_bad > 0 {
|
||||
worker.log(&format!("Removed bad files: {}", gc_status.removed_bad));
|
||||
}
|
||||
|
||||
worker.log(&format!("Original data bytes: {}", gc_status.index_data_bytes));
|
||||
worker.log(&format!("Original data usage: {}", HumanByte::from(gc_status.index_data_bytes)));
|
||||
|
||||
if gc_status.index_data_bytes > 0 {
|
||||
let comp_per = (gc_status.disk_bytes*100)/gc_status.index_data_bytes;
|
||||
worker.log(&format!("Disk bytes: {} ({} %)", gc_status.disk_bytes, comp_per));
|
||||
let comp_per = (gc_status.disk_bytes as f64 * 100.)/gc_status.index_data_bytes as f64;
|
||||
worker.log(&format!("On-Disk usage: {} ({:.2}%)", HumanByte::from(gc_status.disk_bytes), comp_per));
|
||||
}
|
||||
|
||||
worker.log(&format!("Disk chunks: {}", gc_status.disk_chunks));
|
||||
worker.log(&format!("On-Disk chunks: {}", gc_status.disk_chunks));
|
||||
|
||||
if gc_status.disk_chunks > 0 {
|
||||
let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64);
|
||||
worker.log(&format!("Average chunk size: {}", avg_chunk));
|
||||
worker.log(&format!("Average chunk size: {}", HumanByte::from(avg_chunk)));
|
||||
}
|
||||
|
||||
*self.last_gc_status.lock().unwrap() = gc_status;
|
||||
@ -476,27 +542,69 @@ impl DataStore {
|
||||
self.chunk_store.insert_chunk(chunk, digest)
|
||||
}
|
||||
|
||||
pub fn verify_stored_chunk(&self, digest: &[u8; 32], expected_chunk_size: u64) -> Result<(), Error> {
|
||||
let blob = self.chunk_store.read_chunk(digest)?;
|
||||
blob.verify_crc()?;
|
||||
blob.verify_unencrypted(expected_chunk_size as usize, digest)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<(DataBlob, u64), Error> {
|
||||
pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<DataBlob, Error> {
|
||||
let mut path = self.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(filename);
|
||||
|
||||
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
||||
let raw_size = raw_data.len() as u64;
|
||||
let blob = DataBlob::from_raw(raw_data)?;
|
||||
Ok((blob, raw_size))
|
||||
proxmox::try_block!({
|
||||
let mut file = std::fs::File::open(&path)?;
|
||||
DataBlob::load_from_reader(&mut file)
|
||||
}).map_err(|err| format_err!("unable to load blob '{:?}' - {}", path, err))
|
||||
}
|
||||
|
||||
pub fn load_manifest(&self, backup_dir: &BackupDir) -> Result<(BackupManifest, u64), Error> {
|
||||
let (blob, raw_size) = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
||||
|
||||
pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
|
||||
let (chunk_path, digest_str) = self.chunk_store.chunk_path(digest);
|
||||
|
||||
proxmox::try_block!({
|
||||
let mut file = std::fs::File::open(&chunk_path)?;
|
||||
DataBlob::load_from_reader(&mut file)
|
||||
}).map_err(|err| format_err!(
|
||||
"store '{}', unable to load chunk '{}' - {}",
|
||||
self.name(),
|
||||
digest_str,
|
||||
err,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn load_manifest(
|
||||
&self,
|
||||
backup_dir: &BackupDir,
|
||||
) -> Result<(BackupManifest, u64), Error> {
|
||||
let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
||||
let raw_size = blob.raw_size();
|
||||
let manifest = BackupManifest::try_from(blob)?;
|
||||
Ok((manifest, raw_size))
|
||||
}
|
||||
|
||||
pub fn load_manifest_json(
|
||||
&self,
|
||||
backup_dir: &BackupDir,
|
||||
) -> Result<Value, Error> {
|
||||
let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
||||
// no expected digest available
|
||||
let manifest_data = blob.decode(None, None)?;
|
||||
let manifest: Value = serde_json::from_slice(&manifest_data[..])?;
|
||||
Ok(manifest)
|
||||
}
|
||||
|
||||
pub fn store_manifest(
|
||||
&self,
|
||||
backup_dir: &BackupDir,
|
||||
manifest: Value,
|
||||
) -> Result<(), Error> {
|
||||
let manifest = serde_json::to_string_pretty(&manifest)?;
|
||||
let blob = DataBlob::encode(manifest.as_bytes(), None, true)?;
|
||||
let raw_data = blob.raw_data();
|
||||
|
||||
let mut path = self.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(MANIFEST_BLOB_NAME);
|
||||
|
||||
replace_file(&path, raw_data, CreateOptions::new())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -11,7 +11,6 @@ use anyhow::{bail, format_err, Error};
|
||||
|
||||
use proxmox::tools::io::ReadExt;
|
||||
use proxmox::tools::uuid::Uuid;
|
||||
use proxmox::tools::vec;
|
||||
use proxmox::tools::mmap::Mmap;
|
||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||
|
||||
@ -41,6 +40,24 @@ proxmox::static_assert_size!(DynamicIndexHeader, 4096);
|
||||
// pub data: DynamicIndexHeaderData,
|
||||
// }
|
||||
|
||||
impl DynamicIndexHeader {
|
||||
/// Convenience method to allocate a zero-initialized header struct.
|
||||
pub fn zeroed() -> Box<Self> {
|
||||
unsafe {
|
||||
Box::from_raw(std::alloc::alloc_zeroed(std::alloc::Layout::new::<Self>()) as *mut Self)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_bytes(&self) -> &[u8] {
|
||||
unsafe {
|
||||
std::slice::from_raw_parts(
|
||||
self as *const Self as *const u8,
|
||||
std::mem::size_of::<Self>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[repr(C)]
|
||||
pub struct DynamicEntry {
|
||||
@ -216,6 +233,24 @@ impl IndexFile for DynamicIndexReader {
|
||||
digest: self.index[pos].digest.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)> {
|
||||
let end_idx = self.index.len() - 1;
|
||||
let end = self.chunk_end(end_idx);
|
||||
let found_idx = self.binary_search(0, 0, end_idx, end, offset);
|
||||
let found_idx = match found_idx {
|
||||
Ok(i) => i,
|
||||
Err(_) => return None
|
||||
};
|
||||
|
||||
let found_start = if found_idx == 0 {
|
||||
0
|
||||
} else {
|
||||
self.chunk_end(found_idx - 1)
|
||||
};
|
||||
|
||||
Some((found_idx, offset - found_start))
|
||||
}
|
||||
}
|
||||
|
||||
struct CachedChunk {
|
||||
@ -471,27 +506,16 @@ impl DynamicIndexWriter {
|
||||
|
||||
let mut writer = BufWriter::with_capacity(1024 * 1024, file);
|
||||
|
||||
let header_size = std::mem::size_of::<DynamicIndexHeader>();
|
||||
|
||||
// todo: use static assertion when available in rust
|
||||
if header_size != 4096 {
|
||||
panic!("got unexpected header size");
|
||||
}
|
||||
|
||||
let ctime = epoch_now_u64()?;
|
||||
|
||||
let uuid = Uuid::generate();
|
||||
|
||||
let mut buffer = vec::zeroed(header_size);
|
||||
let header = crate::tools::map_struct_mut::<DynamicIndexHeader>(&mut buffer)?;
|
||||
|
||||
let mut header = DynamicIndexHeader::zeroed();
|
||||
header.magic = super::DYNAMIC_SIZED_CHUNK_INDEX_1_0;
|
||||
header.ctime = u64::to_le(ctime);
|
||||
header.uuid = *uuid.as_bytes();
|
||||
|
||||
header.index_csum = [0u8; 32];
|
||||
|
||||
writer.write_all(&buffer)?;
|
||||
// header.index_csum = [0u8; 32];
|
||||
writer.write_all(header.as_bytes())?;
|
||||
|
||||
let csum = Some(openssl::sha::Sha256::new());
|
||||
|
||||
|
@ -17,12 +17,6 @@ pub const ENCRYPTED_BLOB_MAGIC_1_0: [u8; 8] = [123, 103, 133, 190, 34, 45, 76, 2
|
||||
// openssl::sha::sha256(b"Proxmox Backup zstd compressed encrypted blob v1.0")[0..8]
|
||||
pub const ENCR_COMPR_BLOB_MAGIC_1_0: [u8; 8] = [230, 89, 27, 191, 11, 191, 216, 11];
|
||||
|
||||
//openssl::sha::sha256(b"Proxmox Backup authenticated blob v1.0")[0..8]
|
||||
pub const AUTHENTICATED_BLOB_MAGIC_1_0: [u8; 8] = [31, 135, 238, 226, 145, 206, 5, 2];
|
||||
|
||||
//openssl::sha::sha256(b"Proxmox Backup zstd compressed authenticated blob v1.0")[0..8]
|
||||
pub const AUTH_COMPR_BLOB_MAGIC_1_0: [u8; 8] = [126, 166, 15, 190, 145, 31, 169, 96];
|
||||
|
||||
// openssl::sha::sha256(b"Proxmox Backup fixed sized chunk index v1.0")[0..8]
|
||||
pub const FIXED_SIZED_CHUNK_INDEX_1_0: [u8; 8] = [47, 127, 65, 237, 145, 253, 15, 205];
|
||||
|
||||
@ -50,19 +44,6 @@ pub struct DataBlobHeader {
|
||||
pub crc: [u8; 4],
|
||||
}
|
||||
|
||||
/// Authenticated data blob binary storage format
|
||||
///
|
||||
/// The ``DataBlobHeader`` for authenticated blobs additionally contains
|
||||
/// a 16 byte HMAC tag, followed by the data:
|
||||
///
|
||||
/// (MAGIC || CRC32 || TAG || Data).
|
||||
#[derive(Endian)]
|
||||
#[repr(C,packed)]
|
||||
pub struct AuthenticatedDataBlobHeader {
|
||||
pub head: DataBlobHeader,
|
||||
pub tag: [u8; 32],
|
||||
}
|
||||
|
||||
/// Encrypted data blob binary storage format
|
||||
///
|
||||
/// The ``DataBlobHeader`` for encrypted blobs additionally contains
|
||||
@ -87,8 +68,6 @@ pub fn header_size(magic: &[u8; 8]) -> usize {
|
||||
&COMPRESSED_BLOB_MAGIC_1_0 => std::mem::size_of::<DataBlobHeader>(),
|
||||
&ENCRYPTED_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
||||
&ENCR_COMPR_BLOB_MAGIC_1_0 => std::mem::size_of::<EncryptedDataBlobHeader>(),
|
||||
&AUTHENTICATED_BLOB_MAGIC_1_0 => std::mem::size_of::<AuthenticatedDataBlobHeader>(),
|
||||
&AUTH_COMPR_BLOB_MAGIC_1_0 => std::mem::size_of::<AuthenticatedDataBlobHeader>(),
|
||||
_ => panic!("unknown blob magic"),
|
||||
}
|
||||
}
|
||||
|
@ -6,14 +6,13 @@ use super::chunk_store::*;
|
||||
use super::{IndexFile, ChunkReadInfo};
|
||||
use crate::tools::{self, epoch_now_u64};
|
||||
|
||||
use chrono::{Local, TimeZone};
|
||||
use chrono::{Local, LocalResult, TimeZone};
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::read_chunk::*;
|
||||
use super::ChunkInfo;
|
||||
|
||||
use proxmox::tools::io::ReadExt;
|
||||
@ -146,26 +145,15 @@ impl FixedIndexReader {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn chunk_end(&self, pos: usize) -> u64 {
|
||||
if pos >= self.index_length {
|
||||
panic!("chunk index out of range");
|
||||
}
|
||||
|
||||
let end = ((pos + 1) * self.chunk_size) as u64;
|
||||
if end > self.size {
|
||||
self.size
|
||||
} else {
|
||||
end
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_info(&self) {
|
||||
println!("Size: {}", self.size);
|
||||
println!("ChunkSize: {}", self.chunk_size);
|
||||
println!(
|
||||
"CTime: {}",
|
||||
Local.timestamp(self.ctime as i64, 0).format("%c")
|
||||
match Local.timestamp_opt(self.ctime as i64, 0) {
|
||||
LocalResult::Single(ctime) => ctime.format("%c").to_string(),
|
||||
_ => (self.ctime as i64).to_string(),
|
||||
}
|
||||
);
|
||||
println!("UUID: {:?}", self.uuid);
|
||||
}
|
||||
@ -219,6 +207,17 @@ impl IndexFile for FixedIndexReader {
|
||||
|
||||
(csum, chunk_end)
|
||||
}
|
||||
|
||||
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)> {
|
||||
if offset >= self.size {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some((
|
||||
(offset / self.chunk_size as u64) as usize,
|
||||
offset & (self.chunk_size - 1) as u64 // fast modulo, valid for 2^x chunk_size
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FixedIndexWriter {
|
||||
@ -465,142 +464,3 @@ impl FixedIndexWriter {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BufferedFixedReader<S> {
|
||||
store: S,
|
||||
index: FixedIndexReader,
|
||||
archive_size: u64,
|
||||
read_buffer: Vec<u8>,
|
||||
buffered_chunk_idx: usize,
|
||||
buffered_chunk_start: u64,
|
||||
read_offset: u64,
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> BufferedFixedReader<S> {
|
||||
pub fn new(index: FixedIndexReader, store: S) -> Self {
|
||||
let archive_size = index.size;
|
||||
Self {
|
||||
store,
|
||||
index,
|
||||
archive_size,
|
||||
read_buffer: Vec::with_capacity(1024 * 1024),
|
||||
buffered_chunk_idx: 0,
|
||||
buffered_chunk_start: 0,
|
||||
read_offset: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn archive_size(&self) -> u64 {
|
||||
self.archive_size
|
||||
}
|
||||
|
||||
fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> {
|
||||
let index = &self.index;
|
||||
let info = match index.chunk_info(idx) {
|
||||
Some(info) => info,
|
||||
None => bail!("chunk index out of range"),
|
||||
};
|
||||
|
||||
// fixme: avoid copy
|
||||
|
||||
let data = self.store.read_chunk(&info.digest)?;
|
||||
let size = info.range.end - info.range.start;
|
||||
if size != data.len() as u64 {
|
||||
bail!("read chunk with wrong size ({} != {}", size, data.len());
|
||||
}
|
||||
|
||||
self.read_buffer.clear();
|
||||
self.read_buffer.extend_from_slice(&data);
|
||||
|
||||
self.buffered_chunk_idx = idx;
|
||||
|
||||
self.buffered_chunk_start = info.range.start as u64;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> crate::tools::BufferedRead for BufferedFixedReader<S> {
|
||||
fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error> {
|
||||
if offset == self.archive_size {
|
||||
return Ok(&self.read_buffer[0..0]);
|
||||
}
|
||||
|
||||
let buffer_len = self.read_buffer.len();
|
||||
let index = &self.index;
|
||||
|
||||
// optimization for sequential read
|
||||
if buffer_len > 0
|
||||
&& ((self.buffered_chunk_idx + 1) < index.index_length)
|
||||
&& (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||
{
|
||||
let next_idx = self.buffered_chunk_idx + 1;
|
||||
let next_end = index.chunk_end(next_idx);
|
||||
if offset < next_end {
|
||||
self.buffer_chunk(next_idx)?;
|
||||
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
|
||||
return Ok(&self.read_buffer[buffer_offset..]);
|
||||
}
|
||||
}
|
||||
|
||||
if (buffer_len == 0)
|
||||
|| (offset < self.buffered_chunk_start)
|
||||
|| (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64)))
|
||||
{
|
||||
let idx = (offset / index.chunk_size as u64) as usize;
|
||||
self.buffer_chunk(idx)?;
|
||||
}
|
||||
|
||||
let buffer_offset = (offset - self.buffered_chunk_start) as usize;
|
||||
Ok(&self.read_buffer[buffer_offset..])
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> std::io::Read for BufferedFixedReader<S> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
|
||||
use crate::tools::BufferedRead;
|
||||
use std::io::{Error, ErrorKind};
|
||||
|
||||
let data = match self.buffered_read(self.read_offset) {
|
||||
Ok(v) => v,
|
||||
Err(err) => return Err(Error::new(ErrorKind::Other, err.to_string())),
|
||||
};
|
||||
|
||||
let n = if data.len() > buf.len() {
|
||||
buf.len()
|
||||
} else {
|
||||
data.len()
|
||||
};
|
||||
|
||||
unsafe {
|
||||
std::ptr::copy_nonoverlapping(data.as_ptr(), buf.as_mut_ptr(), n);
|
||||
}
|
||||
|
||||
self.read_offset += n as u64;
|
||||
|
||||
Ok(n)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ReadChunk> Seek for BufferedFixedReader<S> {
|
||||
fn seek(&mut self, pos: SeekFrom) -> Result<u64, std::io::Error> {
|
||||
let new_offset = match pos {
|
||||
SeekFrom::Start(start_offset) => start_offset as i64,
|
||||
SeekFrom::End(end_offset) => (self.archive_size as i64) + end_offset,
|
||||
SeekFrom::Current(offset) => (self.read_offset as i64) + offset,
|
||||
};
|
||||
|
||||
use std::io::{Error, ErrorKind};
|
||||
if (new_offset < 0) || (new_offset > (self.archive_size as i64)) {
|
||||
return Err(Error::new(
|
||||
ErrorKind::Other,
|
||||
format!(
|
||||
"seek is out of range {} ([0..{}])",
|
||||
new_offset, self.archive_size
|
||||
),
|
||||
));
|
||||
}
|
||||
self.read_offset = new_offset as u64;
|
||||
|
||||
Ok(self.read_offset)
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Range;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ChunkReadInfo {
|
||||
pub range: Range<u64>,
|
||||
pub digest: [u8; 32],
|
||||
@ -22,6 +23,9 @@ pub trait IndexFile {
|
||||
fn index_bytes(&self) -> u64;
|
||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo>;
|
||||
|
||||
/// Get the chunk index and the relative offset within it for a byte offset
|
||||
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)>;
|
||||
|
||||
/// Compute index checksum and size
|
||||
fn compute_csum(&self) -> ([u8; 32], u64);
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use anyhow::{bail, format_err, Context, Error};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use chrono::{Local, TimeZone, DateTime};
|
||||
use chrono::{Local, DateTime};
|
||||
|
||||
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
|
||||
use proxmox::try_block;
|
||||
@ -136,7 +136,7 @@ pub fn encrypt_key_with_passphrase(
|
||||
enc_data.extend_from_slice(&tag);
|
||||
enc_data.extend_from_slice(&encrypted_key);
|
||||
|
||||
let created = Local.timestamp(Local::now().timestamp(), 0);
|
||||
let created = Local::now();
|
||||
|
||||
Ok(KeyConfig {
|
||||
kdf: Some(kdf),
|
||||
@ -146,12 +146,26 @@ pub fn encrypt_key_with_passphrase(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn load_and_decrypt_key(path: &std::path::Path, passphrase: &dyn Fn() -> Result<Vec<u8>, Error>) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
pub fn load_and_decrypt_key(
|
||||
path: &std::path::Path,
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
do_load_and_decrypt_key(path, passphrase)
|
||||
.with_context(|| format!("failed to load decryption key from {:?}", path))
|
||||
}
|
||||
|
||||
let raw = file_get_contents(&path)?;
|
||||
let data = String::from_utf8(raw)?;
|
||||
fn do_load_and_decrypt_key(
|
||||
path: &std::path::Path,
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
decrypt_key(&file_get_contents(&path)?, passphrase)
|
||||
}
|
||||
|
||||
let key_config: KeyConfig = serde_json::from_str(&data)?;
|
||||
pub fn decrypt_key(
|
||||
mut keydata: &[u8],
|
||||
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
|
||||
) -> Result<([u8;32], DateTime<Local>), Error> {
|
||||
let key_config: KeyConfig = serde_json::from_reader(&mut keydata)?;
|
||||
|
||||
let raw_data = key_config.data;
|
||||
let created = key_config.created;
|
||||
|
@ -3,22 +3,76 @@ use std::convert::TryFrom;
|
||||
use std::path::Path;
|
||||
|
||||
use serde_json::{json, Value};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::backup::BackupDir;
|
||||
use crate::backup::{BackupDir, CryptMode, CryptConfig};
|
||||
|
||||
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
||||
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
||||
|
||||
mod hex_csum {
|
||||
use serde::{self, Deserialize, Serializer, Deserializer};
|
||||
|
||||
pub fn serialize<S>(
|
||||
csum: &[u8; 32],
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let s = proxmox::tools::digest_to_hex(csum);
|
||||
serializer.serialize_str(&s)
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(
|
||||
deserializer: D,
|
||||
) -> Result<[u8; 32], D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
proxmox::tools::hex_to_digest(&s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
fn crypt_mode_none() -> CryptMode { CryptMode::None }
|
||||
fn empty_value() -> Value { json!({}) }
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
pub struct FileInfo {
|
||||
pub filename: String,
|
||||
pub encrypted: Option<bool>,
|
||||
#[serde(default="crypt_mode_none")] // to be compatible with < 0.8.0 backups
|
||||
pub crypt_mode: CryptMode,
|
||||
pub size: u64,
|
||||
#[serde(with = "hex_csum")]
|
||||
pub csum: [u8; 32],
|
||||
}
|
||||
|
||||
impl FileInfo {
|
||||
|
||||
/// Return expected CryptMode of referenced chunks
|
||||
///
|
||||
/// Encrypted Indices should only reference encrypted chunks, while signed or plain indices
|
||||
/// should only reference plain chunks.
|
||||
pub fn chunk_crypt_mode (&self) -> CryptMode {
|
||||
match self.crypt_mode {
|
||||
CryptMode::Encrypt => CryptMode::Encrypt,
|
||||
CryptMode::SignOnly | CryptMode::None => CryptMode::None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
pub struct BackupManifest {
|
||||
snapshot: BackupDir,
|
||||
backup_type: String,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
files: Vec<FileInfo>,
|
||||
#[serde(default="empty_value")] // to be compatible with < 0.8.0 backups
|
||||
pub unprotected: Value,
|
||||
pub signature: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq)]
|
||||
@ -46,12 +100,19 @@ pub fn archive_type<P: AsRef<Path>>(
|
||||
impl BackupManifest {
|
||||
|
||||
pub fn new(snapshot: BackupDir) -> Self {
|
||||
Self { files: Vec::new(), snapshot }
|
||||
Self {
|
||||
backup_type: snapshot.group().backup_type().into(),
|
||||
backup_id: snapshot.group().backup_id().into(),
|
||||
backup_time: snapshot.backup_time().timestamp(),
|
||||
files: Vec::new(),
|
||||
unprotected: json!({}),
|
||||
signature: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32], encrypted: Option<bool>) -> Result<(), Error> {
|
||||
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32], crypt_mode: CryptMode) -> Result<(), Error> {
|
||||
let _archive_type = archive_type(&filename)?; // check type
|
||||
self.files.push(FileInfo { filename, size, csum, encrypted });
|
||||
self.files.push(FileInfo { filename, size, csum, crypt_mode });
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -59,7 +120,7 @@ impl BackupManifest {
|
||||
&self.files[..]
|
||||
}
|
||||
|
||||
fn lookup_file_info(&self, name: &str) -> Result<&FileInfo, Error> {
|
||||
pub fn lookup_file_info(&self, name: &str) -> Result<&FileInfo, Error> {
|
||||
|
||||
let info = self.files.iter().find(|item| item.filename == name);
|
||||
|
||||
@ -84,74 +145,164 @@ impl BackupManifest {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn into_json(self) -> Value {
|
||||
json!({
|
||||
"backup-type": self.snapshot.group().backup_type(),
|
||||
"backup-id": self.snapshot.group().backup_id(),
|
||||
"backup-time": self.snapshot.backup_time().timestamp(),
|
||||
"files": self.files.iter()
|
||||
.fold(Vec::new(), |mut acc, info| {
|
||||
let mut value = json!({
|
||||
"filename": info.filename,
|
||||
"encrypted": info.encrypted,
|
||||
"size": info.size,
|
||||
"csum": proxmox::tools::digest_to_hex(&info.csum),
|
||||
});
|
||||
|
||||
if let Some(encrypted) = info.encrypted {
|
||||
value["encrypted"] = encrypted.into();
|
||||
}
|
||||
|
||||
acc.push(value);
|
||||
acc
|
||||
})
|
||||
})
|
||||
// Generate canonical json
|
||||
fn to_canonical_json(value: &Value) -> Result<Vec<u8>, Error> {
|
||||
let mut data = Vec::new();
|
||||
Self::write_canonical_json(value, &mut data)?;
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
fn write_canonical_json(value: &Value, output: &mut Vec<u8>) -> Result<(), Error> {
|
||||
match value {
|
||||
Value::Null => bail!("got unexpected null value"),
|
||||
Value::String(_) | Value::Number(_) | Value::Bool(_) => {
|
||||
serde_json::to_writer(output, &value)?;
|
||||
}
|
||||
Value::Array(list) => {
|
||||
output.push(b'[');
|
||||
let mut iter = list.iter();
|
||||
if let Some(item) = iter.next() {
|
||||
Self::write_canonical_json(item, output)?;
|
||||
for item in iter {
|
||||
output.push(b',');
|
||||
Self::write_canonical_json(item, output)?;
|
||||
}
|
||||
}
|
||||
output.push(b']');
|
||||
}
|
||||
Value::Object(map) => {
|
||||
output.push(b'{');
|
||||
let mut keys: Vec<&str> = map.keys().map(String::as_str).collect();
|
||||
keys.sort();
|
||||
let mut iter = keys.into_iter();
|
||||
if let Some(key) = iter.next() {
|
||||
serde_json::to_writer(&mut *output, &key)?;
|
||||
output.push(b':');
|
||||
Self::write_canonical_json(&map[key], output)?;
|
||||
for key in iter {
|
||||
output.push(b',');
|
||||
serde_json::to_writer(&mut *output, &key)?;
|
||||
output.push(b':');
|
||||
Self::write_canonical_json(&map[key], output)?;
|
||||
}
|
||||
}
|
||||
output.push(b'}');
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Compute manifest signature
|
||||
///
|
||||
/// By generating a HMAC SHA256 over the canonical json
|
||||
/// representation, The 'unpreotected' property is excluded.
|
||||
pub fn signature(&self, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
|
||||
Self::json_signature(&serde_json::to_value(&self)?, crypt_config)
|
||||
}
|
||||
|
||||
fn json_signature(data: &Value, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
|
||||
|
||||
let mut signed_data = data.clone();
|
||||
|
||||
signed_data.as_object_mut().unwrap().remove("unprotected"); // exclude
|
||||
signed_data.as_object_mut().unwrap().remove("signature"); // exclude
|
||||
|
||||
let canonical = Self::to_canonical_json(&signed_data)?;
|
||||
|
||||
let sig = crypt_config.compute_auth_tag(&canonical);
|
||||
|
||||
Ok(sig)
|
||||
}
|
||||
|
||||
/// Converts the Manifest into json string, and add a signature if there is a crypt_config.
|
||||
pub fn to_string(&self, crypt_config: Option<&CryptConfig>) -> Result<String, Error> {
|
||||
|
||||
let mut manifest = serde_json::to_value(&self)?;
|
||||
|
||||
if let Some(crypt_config) = crypt_config {
|
||||
let sig = self.signature(crypt_config)?;
|
||||
manifest["signature"] = proxmox::tools::digest_to_hex(&sig).into();
|
||||
}
|
||||
|
||||
let manifest = serde_json::to_string_pretty(&manifest).unwrap().into();
|
||||
Ok(manifest)
|
||||
}
|
||||
|
||||
/// Try to read the manifest. This verifies the signature if there is a crypt_config.
|
||||
pub fn from_data(data: &[u8], crypt_config: Option<&CryptConfig>) -> Result<BackupManifest, Error> {
|
||||
let json: Value = serde_json::from_slice(data)?;
|
||||
let signature = json["signature"].as_str().map(String::from);
|
||||
|
||||
if let Some(ref crypt_config) = crypt_config {
|
||||
if let Some(signature) = signature {
|
||||
let expected_signature = proxmox::tools::digest_to_hex(&Self::json_signature(&json, crypt_config)?);
|
||||
if signature != expected_signature {
|
||||
bail!("wrong signature in manifest");
|
||||
}
|
||||
} else {
|
||||
// not signed: warn/fail?
|
||||
}
|
||||
}
|
||||
|
||||
let manifest: BackupManifest = serde_json::from_value(json)?;
|
||||
Ok(manifest)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl TryFrom<super::DataBlob> for BackupManifest {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(blob: super::DataBlob) -> Result<Self, Error> {
|
||||
let data = blob.decode(None)
|
||||
// no expected digest available
|
||||
let data = blob.decode(None, None)
|
||||
.map_err(|err| format_err!("decode backup manifest blob failed - {}", err))?;
|
||||
let json: Value = serde_json::from_slice(&data[..])
|
||||
.map_err(|err| format_err!("unable to parse backup manifest json - {}", err))?;
|
||||
BackupManifest::try_from(json)
|
||||
let manifest: BackupManifest = serde_json::from_value(json)?;
|
||||
Ok(manifest)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Value> for BackupManifest {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(data: Value) -> Result<Self, Error> {
|
||||
#[test]
|
||||
fn test_manifest_signature() -> Result<(), Error> {
|
||||
|
||||
use crate::tools::{required_string_property, required_integer_property, required_array_property};
|
||||
use crate::backup::{KeyDerivationConfig};
|
||||
|
||||
proxmox::try_block!({
|
||||
let backup_type = required_string_property(&data, "backup-type")?;
|
||||
let backup_id = required_string_property(&data, "backup-id")?;
|
||||
let backup_time = required_integer_property(&data, "backup-time")?;
|
||||
let pw = b"test";
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
let kdf = KeyDerivationConfig::Scrypt {
|
||||
n: 65536,
|
||||
r: 8,
|
||||
p: 1,
|
||||
salt: Vec::new(),
|
||||
};
|
||||
|
||||
let mut manifest = BackupManifest::new(snapshot);
|
||||
let testkey = kdf.derive_key(pw)?;
|
||||
|
||||
for item in required_array_property(&data, "files")?.iter() {
|
||||
let filename = required_string_property(item, "filename")?.to_owned();
|
||||
let csum = required_string_property(item, "csum")?;
|
||||
let csum = proxmox::tools::hex_to_digest(csum)?;
|
||||
let size = required_integer_property(item, "size")? as u64;
|
||||
let encrypted = item["encrypted"].as_bool();
|
||||
manifest.add_file(filename, size, csum, encrypted)?;
|
||||
}
|
||||
let crypt_config = CryptConfig::new(testkey)?;
|
||||
|
||||
if manifest.files().is_empty() {
|
||||
bail!("manifest does not list any files.");
|
||||
}
|
||||
let snapshot: BackupDir = "host/elsa/2020-06-26T13:56:05Z".parse()?;
|
||||
|
||||
Ok(manifest)
|
||||
}).map_err(|err: Error| format_err!("unable to parse backup manifest - {}", err))
|
||||
let mut manifest = BackupManifest::new(snapshot);
|
||||
|
||||
}
|
||||
manifest.add_file("test1.img.fidx".into(), 200, [1u8; 32], CryptMode::Encrypt)?;
|
||||
manifest.add_file("abc.blob".into(), 200, [2u8; 32], CryptMode::None)?;
|
||||
|
||||
manifest.unprotected["note"] = "This is not protected by the signature.".into();
|
||||
|
||||
let text = manifest.to_string(Some(&crypt_config))?;
|
||||
|
||||
let manifest: Value = serde_json::from_str(&text)?;
|
||||
let signature = manifest["signature"].as_str().unwrap().to_string();
|
||||
|
||||
assert_eq!(signature, "d7b446fb7db081662081d4b40fedd858a1d6307a5aff4ecff7d5bf4fd35679e9");
|
||||
|
||||
let manifest: BackupManifest = serde_json::from_value(manifest)?;
|
||||
let expected_signature = proxmox::tools::digest_to_hex(&manifest.signature(&crypt_config)?);
|
||||
|
||||
assert_eq!(signature, expected_signature);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ fn remove_incomplete_snapshots(
|
||||
let mut keep_unfinished = true;
|
||||
for info in list.iter() {
|
||||
// backup is considered unfinished if there is no manifest
|
||||
if info.files.iter().any(|name| name == super::MANIFEST_BLOB_NAME) {
|
||||
if info.is_finished() {
|
||||
// There is a new finished backup, so there is no need
|
||||
// to keep older unfinished backups.
|
||||
keep_unfinished = false;
|
||||
|
@ -2,9 +2,9 @@ use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Error;
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use super::crypt_config::CryptConfig;
|
||||
use super::crypt_config::{CryptConfig, CryptMode};
|
||||
use super::data_blob::DataBlob;
|
||||
use super::datastore::DataStore;
|
||||
|
||||
@ -21,33 +21,47 @@ pub trait ReadChunk {
|
||||
pub struct LocalChunkReader {
|
||||
store: Arc<DataStore>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
crypt_mode: CryptMode,
|
||||
}
|
||||
|
||||
impl LocalChunkReader {
|
||||
pub fn new(store: Arc<DataStore>, crypt_config: Option<Arc<CryptConfig>>) -> Self {
|
||||
pub fn new(store: Arc<DataStore>, crypt_config: Option<Arc<CryptConfig>>, crypt_mode: CryptMode) -> Self {
|
||||
Self {
|
||||
store,
|
||||
crypt_config,
|
||||
crypt_mode,
|
||||
}
|
||||
}
|
||||
|
||||
fn ensure_crypt_mode(&self, chunk_mode: CryptMode) -> Result<(), Error> {
|
||||
match self.crypt_mode {
|
||||
CryptMode::Encrypt => {
|
||||
match chunk_mode {
|
||||
CryptMode::Encrypt => Ok(()),
|
||||
CryptMode::SignOnly | CryptMode::None => bail!("Index and chunk CryptMode don't match."),
|
||||
}
|
||||
},
|
||||
CryptMode::SignOnly | CryptMode::None => {
|
||||
match chunk_mode {
|
||||
CryptMode::Encrypt => bail!("Index and chunk CryptMode don't match."),
|
||||
CryptMode::SignOnly | CryptMode::None => Ok(()),
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadChunk for LocalChunkReader {
|
||||
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
let (path, _) = self.store.chunk_path(digest);
|
||||
let raw_data = proxmox::tools::fs::file_get_contents(&path)?;
|
||||
let chunk = DataBlob::from_raw(raw_data)?;
|
||||
chunk.verify_crc()?;
|
||||
|
||||
let chunk = self.store.load_chunk(digest)?;
|
||||
self.ensure_crypt_mode(chunk.crypt_mode()?)?;
|
||||
Ok(chunk)
|
||||
}
|
||||
|
||||
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error> {
|
||||
let chunk = ReadChunk::read_raw_chunk(self, digest)?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
|
||||
|
||||
Ok(raw_data)
|
||||
}
|
||||
@ -76,8 +90,9 @@ impl AsyncReadChunk for LocalChunkReader {
|
||||
let (path, _) = self.store.chunk_path(digest);
|
||||
|
||||
let raw_data = tokio::fs::read(&path).await?;
|
||||
let chunk = DataBlob::from_raw(raw_data)?;
|
||||
chunk.verify_crc()?;
|
||||
|
||||
let chunk = DataBlob::load_from_reader(&mut &raw_data[..])?;
|
||||
self.ensure_crypt_mode(chunk.crypt_mode()?)?;
|
||||
|
||||
Ok(chunk)
|
||||
})
|
||||
@ -90,7 +105,7 @@ impl AsyncReadChunk for LocalChunkReader {
|
||||
Box::pin(async move {
|
||||
let chunk = AsyncReadChunk::read_raw_chunk(self, digest).await?;
|
||||
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
let raw_data = chunk.decode(self.crypt_config.as_ref().map(Arc::as_ref), Some(digest))?;
|
||||
|
||||
// fixme: verify digest?
|
||||
|
||||
|
@ -1,58 +1,215 @@
|
||||
use anyhow::{bail, Error};
|
||||
use std::collections::HashSet;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::atomic::{Ordering, AtomicUsize};
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use crate::server::WorkerTask;
|
||||
use crate::api2::types::*;
|
||||
|
||||
use super::{
|
||||
DataStore, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
||||
ENCR_COMPR_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0,
|
||||
DataStore, DataBlob, BackupGroup, BackupDir, BackupInfo, IndexFile,
|
||||
CryptMode,
|
||||
FileInfo, ArchiveType, archive_type,
|
||||
};
|
||||
|
||||
fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||
fn verify_blob(datastore: Arc<DataStore>, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||
|
||||
let (blob, raw_size) = datastore.load_blob(backup_dir, &info.filename)?;
|
||||
let blob = datastore.load_blob(backup_dir, &info.filename)?;
|
||||
|
||||
let csum = openssl::sha::sha256(blob.raw_data());
|
||||
let raw_size = blob.raw_size();
|
||||
if raw_size != info.size {
|
||||
bail!("wrong size ({} != {})", info.size, raw_size);
|
||||
}
|
||||
|
||||
let csum = openssl::sha::sha256(blob.raw_data());
|
||||
if csum != info.csum {
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
blob.verify_crc()?;
|
||||
match blob.crypt_mode()? {
|
||||
CryptMode::Encrypt => Ok(()),
|
||||
CryptMode::None => {
|
||||
// digest already verified above
|
||||
blob.decode(None, None)?;
|
||||
Ok(())
|
||||
},
|
||||
CryptMode::SignOnly => bail!("Invalid CryptMode for blob"),
|
||||
}
|
||||
}
|
||||
|
||||
let magic = blob.magic();
|
||||
fn rename_corrupted_chunk(
|
||||
datastore: Arc<DataStore>,
|
||||
digest: &[u8;32],
|
||||
worker: Arc<WorkerTask>,
|
||||
) {
|
||||
let (path, digest_str) = datastore.chunk_path(digest);
|
||||
|
||||
if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 {
|
||||
return Ok(());
|
||||
let mut counter = 0;
|
||||
let mut new_path = path.clone();
|
||||
loop {
|
||||
new_path.set_file_name(format!("{}.{}.bad", digest_str, counter));
|
||||
if new_path.exists() && counter < 9 { counter += 1; } else { break; }
|
||||
}
|
||||
|
||||
blob.decode(None)?;
|
||||
match std::fs::rename(&path, &new_path) {
|
||||
Ok(_) => {
|
||||
worker.log(format!("corrupted chunk renamed to {:?}", &new_path));
|
||||
},
|
||||
Err(err) => {
|
||||
match err.kind() {
|
||||
std::io::ErrorKind::NotFound => { /* ignored */ },
|
||||
_ => worker.log(format!("could not rename corrupted chunk {:?} - {}", &path, err))
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
// We use a separate thread to read/load chunks, so that we can do
|
||||
// load and verify in parallel to increase performance.
|
||||
fn chunk_reader_thread(
|
||||
datastore: Arc<DataStore>,
|
||||
index: Box<dyn IndexFile + Send>,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
errors: Arc<AtomicUsize>,
|
||||
worker: Arc<WorkerTask>,
|
||||
) -> std::sync::mpsc::Receiver<(DataBlob, [u8;32], u64)> {
|
||||
|
||||
let (sender, receiver) = std::sync::mpsc::sync_channel(3); // buffer up to 3 chunks
|
||||
|
||||
std::thread::spawn(move|| {
|
||||
for pos in 0..index.index_count() {
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let size = info.range.end - info.range.start;
|
||||
|
||||
if verified_chunks.lock().unwrap().contains(&info.digest) {
|
||||
continue; // already verified
|
||||
}
|
||||
|
||||
if corrupt_chunks.lock().unwrap().contains(&info.digest) {
|
||||
let digest_str = proxmox::tools::digest_to_hex(&info.digest);
|
||||
worker.log(format!("chunk {} was marked as corrupt", digest_str));
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
continue;
|
||||
}
|
||||
|
||||
match datastore.load_chunk(&info.digest) {
|
||||
Err(err) => {
|
||||
corrupt_chunks.lock().unwrap().insert(info.digest);
|
||||
worker.log(format!("can't verify chunk, load failed - {}", err));
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
rename_corrupted_chunk(datastore.clone(), &info.digest, worker.clone());
|
||||
continue;
|
||||
}
|
||||
Ok(chunk) => {
|
||||
if sender.send((chunk, info.digest, size)).is_err() {
|
||||
break; // receiver gone - simply stop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
receiver
|
||||
}
|
||||
|
||||
fn verify_index_chunks(
|
||||
datastore: &DataStore,
|
||||
index: Box<dyn IndexFile>,
|
||||
worker: &WorkerTask,
|
||||
datastore: Arc<DataStore>,
|
||||
index: Box<dyn IndexFile + Send>,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||
crypt_mode: CryptMode,
|
||||
worker: Arc<WorkerTask>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
let errors = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let start_time = Instant::now();
|
||||
|
||||
let chunk_channel = chunk_reader_thread(
|
||||
datastore.clone(),
|
||||
index,
|
||||
verified_chunks.clone(),
|
||||
corrupt_chunks.clone(),
|
||||
errors.clone(),
|
||||
worker.clone(),
|
||||
);
|
||||
|
||||
let mut read_bytes = 0;
|
||||
let mut decoded_bytes = 0;
|
||||
|
||||
loop {
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
crate::tools::fail_on_shutdown()?;
|
||||
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let size = info.range.end - info.range.start;
|
||||
datastore.verify_stored_chunk(&info.digest, size)?;
|
||||
let (chunk, digest, size) = match chunk_channel.recv() {
|
||||
Ok(tuple) => tuple,
|
||||
Err(std::sync::mpsc::RecvError) => break,
|
||||
};
|
||||
|
||||
read_bytes += chunk.raw_size();
|
||||
decoded_bytes += size;
|
||||
|
||||
let chunk_crypt_mode = match chunk.crypt_mode() {
|
||||
Err(err) => {
|
||||
corrupt_chunks.lock().unwrap().insert(digest);
|
||||
worker.log(format!("can't verify chunk, unknown CryptMode - {}", err));
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
continue;
|
||||
},
|
||||
Ok(mode) => mode,
|
||||
};
|
||||
|
||||
if chunk_crypt_mode != crypt_mode {
|
||||
worker.log(format!(
|
||||
"chunk CryptMode {:?} does not match index CryptMode {:?}",
|
||||
chunk_crypt_mode,
|
||||
crypt_mode
|
||||
));
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
if let Err(err) = chunk.verify_unencrypted(size as usize, &digest) {
|
||||
corrupt_chunks.lock().unwrap().insert(digest);
|
||||
worker.log(format!("{}", err));
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
rename_corrupted_chunk(datastore.clone(), &digest, worker.clone());
|
||||
} else {
|
||||
verified_chunks.lock().unwrap().insert(digest);
|
||||
}
|
||||
}
|
||||
|
||||
let elapsed = start_time.elapsed().as_secs_f64();
|
||||
|
||||
let read_bytes_mib = (read_bytes as f64)/(1024.0*1024.0);
|
||||
let decoded_bytes_mib = (decoded_bytes as f64)/(1024.0*1024.0);
|
||||
|
||||
let read_speed = read_bytes_mib/elapsed;
|
||||
let decode_speed = decoded_bytes_mib/elapsed;
|
||||
|
||||
let error_count = errors.load(Ordering::SeqCst);
|
||||
|
||||
worker.log(format!(" verified {:.2}/{:.2} MiB in {:.2} seconds, speed {:.2}/{:.2} MiB/s ({} errors)",
|
||||
read_bytes_mib, decoded_bytes_mib, elapsed, read_speed, decode_speed, error_count));
|
||||
|
||||
if errors.load(Ordering::SeqCst) > 0 {
|
||||
bail!("chunks could not be verified");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verify_fixed_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
||||
fn verify_fixed_index(
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: &BackupDir,
|
||||
info: &FileInfo,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
worker: Arc<WorkerTask>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut path = backup_dir.relative_path();
|
||||
path.push(&info.filename);
|
||||
@ -68,10 +225,18 @@ fn verify_fixed_index(datastore: &DataStore, backup_dir: &BackupDir, info: &File
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
verify_index_chunks(datastore, Box::new(index), worker)
|
||||
verify_index_chunks(datastore, Box::new(index), verified_chunks, corrupt_chunks, info.chunk_crypt_mode(), worker)
|
||||
}
|
||||
|
||||
fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo, worker: &WorkerTask) -> Result<(), Error> {
|
||||
fn verify_dynamic_index(
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: &BackupDir,
|
||||
info: &FileInfo,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
worker: Arc<WorkerTask>,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let mut path = backup_dir.relative_path();
|
||||
path.push(&info.filename);
|
||||
|
||||
@ -86,7 +251,7 @@ fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &Fi
|
||||
bail!("wrong index checksum");
|
||||
}
|
||||
|
||||
verify_index_chunks(datastore, Box::new(index), worker)
|
||||
verify_index_chunks(datastore, Box::new(index), verified_chunks, corrupt_chunks, info.chunk_crypt_mode(), worker)
|
||||
}
|
||||
|
||||
/// Verify a single backup snapshot
|
||||
@ -98,9 +263,15 @@ fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &Fi
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
pub fn verify_backup_dir(
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: &BackupDir,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
worker: Arc<WorkerTask>
|
||||
) -> Result<bool, Error> {
|
||||
|
||||
let manifest = match datastore.load_manifest(&backup_dir) {
|
||||
let mut manifest = match datastore.load_manifest(&backup_dir) {
|
||||
Ok((manifest, _)) => manifest,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err));
|
||||
@ -112,24 +283,53 @@ pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker:
|
||||
|
||||
let mut error_count = 0;
|
||||
|
||||
let mut verify_result = "ok";
|
||||
for info in manifest.files() {
|
||||
let result = proxmox::try_block!({
|
||||
worker.log(format!(" check {}", info.filename));
|
||||
match archive_type(&info.filename)? {
|
||||
ArchiveType::FixedIndex => verify_fixed_index(&datastore, &backup_dir, info, worker),
|
||||
ArchiveType::DynamicIndex => verify_dynamic_index(&datastore, &backup_dir, info, worker),
|
||||
ArchiveType::Blob => verify_blob(&datastore, &backup_dir, info),
|
||||
ArchiveType::FixedIndex =>
|
||||
verify_fixed_index(
|
||||
datastore.clone(),
|
||||
&backup_dir,
|
||||
info,
|
||||
verified_chunks.clone(),
|
||||
corrupt_chunks.clone(),
|
||||
worker.clone(),
|
||||
),
|
||||
ArchiveType::DynamicIndex =>
|
||||
verify_dynamic_index(
|
||||
datastore.clone(),
|
||||
&backup_dir,
|
||||
info,
|
||||
verified_chunks.clone(),
|
||||
corrupt_chunks.clone(),
|
||||
worker.clone(),
|
||||
),
|
||||
ArchiveType::Blob => verify_blob(datastore.clone(), &backup_dir, info),
|
||||
}
|
||||
});
|
||||
|
||||
worker.fail_on_abort()?;
|
||||
crate::tools::fail_on_shutdown()?;
|
||||
|
||||
if let Err(err) = result {
|
||||
worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err));
|
||||
error_count += 1;
|
||||
verify_result = "failed";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
let verify_state = SnapshotVerifyState {
|
||||
state: verify_result.to_string(),
|
||||
upid: worker.upid().clone(),
|
||||
};
|
||||
manifest.unprotected["verify_state"] = serde_json::to_value(verify_state)?;
|
||||
datastore.store_manifest(&backup_dir, serde_json::to_value(manifest)?)
|
||||
.map_err(|err| format_err!("unable to store manifest blob - {}", err))?;
|
||||
|
||||
|
||||
Ok(error_count == 0)
|
||||
}
|
||||
|
||||
@ -138,31 +338,45 @@ pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker:
|
||||
/// Errors are logged to the worker log.
|
||||
///
|
||||
/// Returns
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Ok((count, failed_dirs)) where failed_dirs had verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
pub fn verify_backup_group(
|
||||
datastore: Arc<DataStore>,
|
||||
group: &BackupGroup,
|
||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
||||
progress: Option<(usize, usize)>, // (done, snapshot_count)
|
||||
worker: Arc<WorkerTask>,
|
||||
) -> Result<(usize, Vec<String>), Error> {
|
||||
|
||||
let mut errors = Vec::new();
|
||||
let mut list = match group.list_backups(&datastore.base_path()) {
|
||||
Ok(list) => list,
|
||||
Err(err) => {
|
||||
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
|
||||
return Ok(false);
|
||||
return Ok((0, errors));
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify group {}:{}", datastore.name(), group));
|
||||
|
||||
let mut error_count = 0;
|
||||
let (done, snapshot_count) = progress.unwrap_or((0, list.len()));
|
||||
|
||||
let mut count = 0;
|
||||
BackupInfo::sort_list(&mut list, false); // newest first
|
||||
for info in list {
|
||||
if !verify_backup_dir(datastore, &info.backup_dir, worker)? {
|
||||
error_count += 1;
|
||||
count += 1;
|
||||
if !verify_backup_dir(datastore.clone(), &info.backup_dir, verified_chunks.clone(), corrupt_chunks.clone(), worker.clone())?{
|
||||
errors.push(info.backup_dir.to_string());
|
||||
}
|
||||
if snapshot_count != 0 {
|
||||
let pos = done + count;
|
||||
let percentage = ((pos as f64) * 100.0)/(snapshot_count as f64);
|
||||
worker.log(format!("percentage done: {:.2}% ({} of {} snapshots)", percentage, pos, snapshot_count));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(error_count == 0)
|
||||
Ok((count, errors))
|
||||
}
|
||||
|
||||
/// Verify all backups inside a datastore
|
||||
@ -170,27 +384,52 @@ pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &
|
||||
/// Errors are logged to the worker log.
|
||||
///
|
||||
/// Returns
|
||||
/// - Ok(true) if verify is successful
|
||||
/// - Ok(false) if there were verification errors
|
||||
/// - Ok(failed_dirs) where failed_dirs had verification errors
|
||||
/// - Err(_) if task was aborted
|
||||
pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
pub fn verify_all_backups(datastore: Arc<DataStore>, worker: Arc<WorkerTask>) -> Result<Vec<String>, Error> {
|
||||
|
||||
let list = match BackupGroup::list_groups(&datastore.base_path()) {
|
||||
Ok(list) => list,
|
||||
let mut errors = Vec::new();
|
||||
|
||||
let mut list = match BackupGroup::list_groups(&datastore.base_path()) {
|
||||
Ok(list) => list
|
||||
.into_iter()
|
||||
.filter(|group| !(group.backup_type() == "host" && group.backup_id() == "benchmark"))
|
||||
.collect::<Vec<BackupGroup>>(),
|
||||
Err(err) => {
|
||||
worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err));
|
||||
return Ok(false);
|
||||
return Ok(errors);
|
||||
}
|
||||
};
|
||||
|
||||
worker.log(format!("verify datastore {}", datastore.name()));
|
||||
list.sort_unstable();
|
||||
|
||||
let mut error_count = 0;
|
||||
for group in list {
|
||||
if !verify_backup_group(datastore, &group, worker)? {
|
||||
error_count += 1;
|
||||
}
|
||||
let mut snapshot_count = 0;
|
||||
for group in list.iter() {
|
||||
snapshot_count += group.list_backups(&datastore.base_path())?.len();
|
||||
}
|
||||
|
||||
Ok(error_count == 0)
|
||||
// start with 16384 chunks (up to 65GB)
|
||||
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
||||
|
||||
// start with 64 chunks since we assume there are few corrupt ones
|
||||
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
||||
|
||||
worker.log(format!("verify datastore {} ({} snapshots)", datastore.name(), snapshot_count));
|
||||
|
||||
let mut done = 0;
|
||||
for group in list {
|
||||
let (count, mut group_errors) = verify_backup_group(
|
||||
datastore.clone(),
|
||||
&group,
|
||||
verified_chunks.clone(),
|
||||
corrupt_chunks.clone(),
|
||||
Some((done, snapshot_count)),
|
||||
worker.clone(),
|
||||
)?;
|
||||
errors.append(&mut group_errors);
|
||||
|
||||
done += count;
|
||||
}
|
||||
|
||||
Ok(errors)
|
||||
}
|
||||
|
@ -37,6 +37,7 @@ async fn run() -> Result<(), Error> {
|
||||
config::update_self_signed_cert(false)?;
|
||||
|
||||
proxmox_backup::rrd::create_rrdb_dir()?;
|
||||
proxmox_backup::config::jobstate::create_jobstate_dir()?;
|
||||
|
||||
if let Err(err) = generate_auth_key() {
|
||||
bail!("unable to generate auth key - {}", err);
|
||||
|
@ -1,12 +1,14 @@
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::io::{self, Write, Seek, SeekFrom};
|
||||
use std::convert::TryFrom;
|
||||
use std::io::{self, Read, Write, Seek, SeekFrom};
|
||||
use std::os::unix::io::{FromRawFd, RawFd};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::task::Context;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use chrono::{Local, DateTime, Utc, TimeZone};
|
||||
use chrono::{Local, LocalResult, DateTime, Utc, TimeZone};
|
||||
use futures::future::FutureExt;
|
||||
use futures::stream::{StreamExt, TryStreamExt};
|
||||
use serde_json::{json, Value};
|
||||
@ -23,11 +25,12 @@ use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||
|
||||
use proxmox_backup::tools;
|
||||
use proxmox_backup::api2::types::*;
|
||||
use proxmox_backup::api2::version;
|
||||
use proxmox_backup::client::*;
|
||||
use proxmox_backup::pxar::catalog::*;
|
||||
use proxmox_backup::backup::{
|
||||
archive_type,
|
||||
load_and_decrypt_key,
|
||||
decrypt_key,
|
||||
verify_chunk_size,
|
||||
ArchiveType,
|
||||
AsyncReadChunk,
|
||||
@ -35,11 +38,12 @@ use proxmox_backup::backup::{
|
||||
BackupGroup,
|
||||
BackupManifest,
|
||||
BufferedDynamicReader,
|
||||
CATALOG_NAME,
|
||||
CatalogReader,
|
||||
CatalogWriter,
|
||||
CATALOG_NAME,
|
||||
ChunkStream,
|
||||
CryptConfig,
|
||||
CryptMode,
|
||||
DataBlob,
|
||||
DynamicIndexReader,
|
||||
FixedChunkStream,
|
||||
@ -65,9 +69,9 @@ pub const KEYFILE_SCHEMA: Schema = StringSchema::new(
|
||||
"Path to encryption key. All data will be encrypted using this key.")
|
||||
.schema();
|
||||
|
||||
pub const ENCRYPTION_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Explicitly enable or disable encryption. \
|
||||
(Allows disabling encryption when a default key file is present.)")
|
||||
pub const KEYFD_SCHEMA: Schema = IntegerSchema::new(
|
||||
"Pass an encryption key via an already opened file descriptor.")
|
||||
.minimum(0)
|
||||
.schema();
|
||||
|
||||
const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new(
|
||||
@ -180,7 +184,7 @@ pub fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<
|
||||
result
|
||||
}
|
||||
|
||||
fn connect(server: &str, userid: &str) -> Result<HttpClient, Error> {
|
||||
fn connect(server: &str, userid: &Userid) -> Result<HttpClient, Error> {
|
||||
|
||||
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
|
||||
|
||||
@ -253,7 +257,11 @@ pub async fn api_datastore_latest_snapshot(
|
||||
|
||||
list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
|
||||
|
||||
let backup_time = Utc.timestamp(list[0].backup_time, 0);
|
||||
let backup_time = match Utc.timestamp_opt(list[0].backup_time, 0) {
|
||||
LocalResult::Single(time) => time,
|
||||
_ => bail!("last snapshot of backup group {:?} has invalid timestmap {}.",
|
||||
group.group_path(), list[0].backup_time),
|
||||
};
|
||||
|
||||
Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
|
||||
}
|
||||
@ -270,6 +278,8 @@ async fn backup_directory<P: AsRef<Path>>(
|
||||
catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
||||
exclude_pattern: Vec<MatchEntry>,
|
||||
entries_max: usize,
|
||||
compress: bool,
|
||||
encrypt: bool,
|
||||
) -> Result<BackupStats, Error> {
|
||||
|
||||
let pxar_stream = PxarBackupStream::open(
|
||||
@ -296,7 +306,7 @@ async fn backup_directory<P: AsRef<Path>>(
|
||||
});
|
||||
|
||||
let stats = client
|
||||
.upload_stream(previous_manifest, archive_name, stream, "dynamic", None)
|
||||
.upload_stream(previous_manifest, archive_name, stream, "dynamic", None, compress, encrypt)
|
||||
.await?;
|
||||
|
||||
Ok(stats)
|
||||
@ -309,6 +319,8 @@ async fn backup_image<P: AsRef<Path>>(
|
||||
archive_name: &str,
|
||||
image_size: u64,
|
||||
chunk_size: Option<usize>,
|
||||
compress: bool,
|
||||
encrypt: bool,
|
||||
_verbose: bool,
|
||||
) -> Result<BackupStats, Error> {
|
||||
|
||||
@ -322,7 +334,7 @@ async fn backup_image<P: AsRef<Path>>(
|
||||
let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
|
||||
|
||||
let stats = client
|
||||
.upload_stream(previous_manifest, archive_name, stream, "fixed", Some(image_size))
|
||||
.upload_stream(previous_manifest, archive_name, stream, "fixed", Some(image_size), compress, encrypt)
|
||||
.await?;
|
||||
|
||||
Ok(stats)
|
||||
@ -365,7 +377,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup);
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup)?;
|
||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
||||
};
|
||||
|
||||
@ -436,7 +448,7 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
||||
};
|
||||
|
||||
@ -545,6 +557,56 @@ fn api_logout(param: Value) -> Result<Value, Error> {
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Show client and optional server version
|
||||
async fn api_version(param: Value) -> Result<(), Error> {
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
|
||||
let mut version_info = json!({
|
||||
"client": {
|
||||
"version": version::PROXMOX_PKG_VERSION,
|
||||
"release": version::PROXMOX_PKG_RELEASE,
|
||||
"repoid": version::PROXMOX_PKG_REPOID,
|
||||
}
|
||||
});
|
||||
|
||||
let repo = extract_repository_from_value(¶m);
|
||||
if let Ok(repo) = repo {
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
|
||||
match client.get("api2/json/version", None).await {
|
||||
Ok(mut result) => version_info["server"] = result["data"].take(),
|
||||
Err(e) => eprintln!("could not connect to server - {}", e),
|
||||
}
|
||||
}
|
||||
if output_format == "text" {
|
||||
println!("client version: {}.{}", version::PROXMOX_PKG_VERSION, version::PROXMOX_PKG_RELEASE);
|
||||
if let Some(server) = version_info["server"].as_object() {
|
||||
let server_version = server["version"].as_str().unwrap();
|
||||
let server_release = server["release"].as_str().unwrap();
|
||||
println!("server version: {}.{}", server_version, server_release);
|
||||
}
|
||||
} else {
|
||||
format_and_print_result(&version_info, &output_format);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
@ -632,7 +694,8 @@ async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
|
||||
}
|
||||
|
||||
fn spawn_catalog_upload(
|
||||
client: Arc<BackupWriter>
|
||||
client: Arc<BackupWriter>,
|
||||
encrypt: bool,
|
||||
) -> Result<
|
||||
(
|
||||
Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
|
||||
@ -650,7 +713,7 @@ fn spawn_catalog_upload(
|
||||
|
||||
tokio::spawn(async move {
|
||||
let catalog_upload_result = client
|
||||
.upload_stream(None, CATALOG_NAME, catalog_chunk_stream, "dynamic", None)
|
||||
.upload_stream(None, CATALOG_NAME, catalog_chunk_stream, "dynamic", None, true, encrypt)
|
||||
.await;
|
||||
|
||||
if let Err(ref err) = catalog_upload_result {
|
||||
@ -664,34 +727,71 @@ fn spawn_catalog_upload(
|
||||
Ok((catalog, catalog_result_rx))
|
||||
}
|
||||
|
||||
fn keyfile_parameters(param: &Value) -> Result<Option<PathBuf>, Error> {
|
||||
Ok(match (param.get("keyfile"), param.get("encryption")) {
|
||||
fn keyfile_parameters(param: &Value) -> Result<(Option<Vec<u8>>, CryptMode), Error> {
|
||||
let keyfile = match param.get("keyfile") {
|
||||
Some(Value::String(keyfile)) => Some(keyfile),
|
||||
Some(_) => bail!("bad --keyfile parameter type"),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let key_fd = match param.get("keyfd") {
|
||||
Some(Value::Number(key_fd)) => Some(
|
||||
RawFd::try_from(key_fd
|
||||
.as_i64()
|
||||
.ok_or_else(|| format_err!("bad key fd: {:?}", key_fd))?
|
||||
)
|
||||
.map_err(|err| format_err!("bad key fd: {:?}: {}", key_fd, err))?
|
||||
),
|
||||
Some(_) => bail!("bad --keyfd parameter type"),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let crypt_mode: Option<CryptMode> = match param.get("crypt-mode") {
|
||||
Some(mode) => Some(serde_json::from_value(mode.clone())?),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let keydata = match (keyfile, key_fd) {
|
||||
(None, None) => None,
|
||||
(Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
|
||||
(Some(keyfile), None) => Some(file_get_contents(keyfile)?),
|
||||
(None, Some(fd)) => {
|
||||
let input = unsafe { std::fs::File::from_raw_fd(fd) };
|
||||
let mut data = Vec::new();
|
||||
let _len: usize = { input }.read_to_end(&mut data)
|
||||
.map_err(|err| {
|
||||
format_err!("error reading encryption key from fd {}: {}", fd, err)
|
||||
})?;
|
||||
Some(data)
|
||||
}
|
||||
};
|
||||
|
||||
Ok(match (keydata, crypt_mode) {
|
||||
// no parameters:
|
||||
(None, None) => key::optional_default_key_path()?,
|
||||
(None, None) => match key::read_optional_default_encryption_key()? {
|
||||
Some(key) => (Some(key), CryptMode::Encrypt),
|
||||
None => (None, CryptMode::None),
|
||||
},
|
||||
|
||||
// just --encryption=false
|
||||
(None, Some(Value::Bool(false))) => None,
|
||||
// just --crypt-mode=none
|
||||
(None, Some(CryptMode::None)) => (None, CryptMode::None),
|
||||
|
||||
// just --encryption=true
|
||||
(None, Some(Value::Bool(true))) => match key::optional_default_key_path()? {
|
||||
None => bail!("--encryption=false without --keyfile and no default key file available"),
|
||||
Some(path) => Some(path),
|
||||
// just --crypt-mode other than none
|
||||
(None, Some(crypt_mode)) => match key::read_optional_default_encryption_key()? {
|
||||
None => bail!("--crypt-mode without --keyfile and no default key file available"),
|
||||
Some(key) => (Some(key), crypt_mode),
|
||||
}
|
||||
|
||||
// just --keyfile
|
||||
(Some(Value::String(keyfile)), None) => Some(PathBuf::from(keyfile)),
|
||||
(Some(key), None) => (Some(key), CryptMode::Encrypt),
|
||||
|
||||
// --keyfile and --encryption=false
|
||||
(Some(Value::String(_)), Some(Value::Bool(false))) => {
|
||||
bail!("--keyfile and --encryption=false are mutually exclusive");
|
||||
// --keyfile and --crypt-mode=none
|
||||
(Some(_), Some(CryptMode::None)) => {
|
||||
bail!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive");
|
||||
}
|
||||
|
||||
// --keyfile and --encryption=true
|
||||
(Some(Value::String(keyfile)), Some(Value::Bool(true))) => Some(PathBuf::from(keyfile)),
|
||||
|
||||
// wrong value types:
|
||||
(Some(_), _) => bail!("bad --keyfile parameter"),
|
||||
(_, Some(_)) => bail!("bad --encryption parameter"),
|
||||
// --keyfile and --crypt-mode other than none
|
||||
(Some(key), Some(crypt_mode)) => (Some(key), crypt_mode),
|
||||
})
|
||||
}
|
||||
|
||||
@ -721,8 +821,12 @@ fn keyfile_parameters(param: &Value) -> Result<Option<PathBuf>, Error> {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
encryption: {
|
||||
schema: ENCRYPTION_SCHEMA,
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
"skip-lost-and-found": {
|
||||
@ -794,7 +898,7 @@ async fn create_backup(
|
||||
verify_chunk_size(size)?;
|
||||
}
|
||||
|
||||
let keyfile = keyfile_parameters(¶m)?;
|
||||
let (keydata, crypt_mode) = keyfile_parameters(¶m)?;
|
||||
|
||||
let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
|
||||
|
||||
@ -835,12 +939,18 @@ async fn create_backup(
|
||||
}
|
||||
|
||||
let mut upload_list = vec![];
|
||||
let mut target_set = HashSet::new();
|
||||
|
||||
for backupspec in backupspec_list {
|
||||
let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
|
||||
let filename = &spec.config_string;
|
||||
let target = &spec.archive_name;
|
||||
|
||||
if target_set.contains(target) {
|
||||
bail!("got target twice: '{}'", target);
|
||||
}
|
||||
target_set.insert(target.to_string());
|
||||
|
||||
use std::os::unix::fs::FileTypeExt;
|
||||
|
||||
let metadata = std::fs::metadata(filename)
|
||||
@ -880,7 +990,15 @@ async fn create_backup(
|
||||
}
|
||||
}
|
||||
|
||||
let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
|
||||
let backup_time = match backup_time_opt {
|
||||
Some(timestamp) => {
|
||||
match Utc.timestamp_opt(timestamp, 0) {
|
||||
LocalResult::Single(time) => time,
|
||||
_ => bail!("Invalid backup-time parameter: {}", timestamp),
|
||||
}
|
||||
},
|
||||
_ => Utc::now(),
|
||||
};
|
||||
|
||||
let client = connect(repo.host(), repo.user())?;
|
||||
record_repository(&repo);
|
||||
@ -893,27 +1011,25 @@ async fn create_backup(
|
||||
|
||||
println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
|
||||
|
||||
let (crypt_config, rsa_encrypted_key) = match keyfile {
|
||||
let (crypt_config, rsa_encrypted_key) = match keydata {
|
||||
None => (None, None),
|
||||
Some(path) => {
|
||||
let (key, created) = load_and_decrypt_key(&path, &key::get_encryption_key_password)?;
|
||||
Some(key) => {
|
||||
let (key, created) = decrypt_key(&key, &key::get_encryption_key_password)?;
|
||||
|
||||
let crypt_config = CryptConfig::new(key)?;
|
||||
|
||||
let path = master_pubkey_path()?;
|
||||
if path.exists() {
|
||||
let pem_data = file_get_contents(&path)?;
|
||||
let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
|
||||
let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
|
||||
(Some(Arc::new(crypt_config)), Some(enc_key))
|
||||
} else {
|
||||
(Some(Arc::new(crypt_config)), None)
|
||||
match key::find_master_pubkey()? {
|
||||
Some(ref path) if path.exists() => {
|
||||
let pem_data = file_get_contents(path)?;
|
||||
let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
|
||||
let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
|
||||
(Some(Arc::new(crypt_config)), Some(enc_key))
|
||||
}
|
||||
_ => (Some(Arc::new(crypt_config)), None),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let is_encrypted = Some(crypt_config.is_some());
|
||||
|
||||
let client = BackupWriter::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
@ -922,6 +1038,7 @@ async fn create_backup(
|
||||
&backup_id,
|
||||
backup_time,
|
||||
verbose,
|
||||
false
|
||||
).await?;
|
||||
|
||||
let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await {
|
||||
@ -930,7 +1047,7 @@ async fn create_backup(
|
||||
None
|
||||
};
|
||||
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp())?;
|
||||
let mut manifest = BackupManifest::new(snapshot);
|
||||
|
||||
let mut catalog = None;
|
||||
@ -939,29 +1056,29 @@ async fn create_backup(
|
||||
for (backup_type, filename, target, size) in upload_list {
|
||||
match backup_type {
|
||||
BackupSpecificationType::CONFIG => {
|
||||
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
|
||||
println!("Upload config file '{}' to '{}' as {}", filename, repo, target);
|
||||
let stats = client
|
||||
.upload_blob_from_file(&filename, &target, true, Some(true))
|
||||
.upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
|
||||
.await?;
|
||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
|
||||
}
|
||||
BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
|
||||
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
|
||||
println!("Upload log file '{}' to '{}' as {}", filename, repo, target);
|
||||
let stats = client
|
||||
.upload_blob_from_file(&filename, &target, true, Some(true))
|
||||
.upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
|
||||
.await?;
|
||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
|
||||
}
|
||||
BackupSpecificationType::PXAR => {
|
||||
// start catalog upload on first use
|
||||
if catalog.is_none() {
|
||||
let (cat, res) = spawn_catalog_upload(client.clone())?;
|
||||
let (cat, res) = spawn_catalog_upload(client.clone(), crypt_mode == CryptMode::Encrypt)?;
|
||||
catalog = Some(cat);
|
||||
catalog_result_tx = Some(res);
|
||||
}
|
||||
let catalog = catalog.as_ref().unwrap();
|
||||
|
||||
println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
|
||||
println!("Upload directory '{}' to '{}' as {}", filename, repo, target);
|
||||
catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
|
||||
let stats = backup_directory(
|
||||
&client,
|
||||
@ -975,8 +1092,10 @@ async fn create_backup(
|
||||
catalog.clone(),
|
||||
pattern_list.clone(),
|
||||
entries_max as usize,
|
||||
true,
|
||||
crypt_mode == CryptMode::Encrypt,
|
||||
).await?;
|
||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
|
||||
catalog.lock().unwrap().end_directory()?;
|
||||
}
|
||||
BackupSpecificationType::IMAGE => {
|
||||
@ -988,9 +1107,11 @@ async fn create_backup(
|
||||
&target,
|
||||
size,
|
||||
chunk_size_opt,
|
||||
true,
|
||||
crypt_mode == CryptMode::Encrypt,
|
||||
verbose,
|
||||
).await?;
|
||||
manifest.add_file(target, stats.size, stats.csum, is_encrypted)?;
|
||||
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1007,17 +1128,17 @@ async fn create_backup(
|
||||
|
||||
if let Some(catalog_result_rx) = catalog_result_tx {
|
||||
let stats = catalog_result_rx.await??;
|
||||
manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, is_encrypted)?;
|
||||
manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypt_mode)?;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(rsa_encrypted_key) = rsa_encrypted_key {
|
||||
let target = "rsa-encrypted.key";
|
||||
let target = "rsa-encrypted.key.blob";
|
||||
println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
|
||||
let stats = client
|
||||
.upload_blob_from_data(rsa_encrypted_key, target, false, None)
|
||||
.upload_blob_from_data(rsa_encrypted_key, target, false, false)
|
||||
.await?;
|
||||
manifest.add_file(format!("{}.blob", target), stats.size, stats.csum, is_encrypted)?;
|
||||
manifest.add_file(target.to_string(), stats.size, stats.csum, crypt_mode)?;
|
||||
|
||||
// openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
|
||||
/*
|
||||
@ -1028,14 +1149,15 @@ async fn create_backup(
|
||||
println!("TEST {} {:?}", len, buffer2);
|
||||
*/
|
||||
}
|
||||
|
||||
// create manifest (index.json)
|
||||
let manifest = manifest.into_json();
|
||||
// manifests are never encrypted, but include a signature
|
||||
let manifest = manifest.to_string(crypt_config.as_ref().map(Arc::as_ref))
|
||||
.map_err(|err| format_err!("unable to format manifest - {}", err))?;
|
||||
|
||||
println!("Upload index.json to '{:?}'", repo);
|
||||
let manifest = serde_json::to_string_pretty(&manifest)?.into();
|
||||
|
||||
if verbose { println!("Upload index.json to '{}'", repo) };
|
||||
client
|
||||
.upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, true, Some(true))
|
||||
.upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, true, false)
|
||||
.await?;
|
||||
|
||||
client.finish().await?;
|
||||
@ -1073,6 +1195,7 @@ fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<Str
|
||||
async fn dump_image<W: Write>(
|
||||
client: Arc<BackupReader>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
crypt_mode: CryptMode,
|
||||
index: FixedIndexReader,
|
||||
mut writer: W,
|
||||
verbose: bool,
|
||||
@ -1080,7 +1203,7 @@ async fn dump_image<W: Write>(
|
||||
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, crypt_mode, most_used);
|
||||
|
||||
// Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
|
||||
// and thus slows down reading. Instead, directly use RemoteChunkReader
|
||||
@ -1159,8 +1282,12 @@ We do not extraxt '.pxar' archives when writing to standard output.
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
encryption: {
|
||||
schema: ENCRYPTION_SCHEMA,
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
@ -1193,12 +1320,12 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
let target = tools::required_string_param(¶m, "target")?;
|
||||
let target = if target == "-" { None } else { Some(target) };
|
||||
|
||||
let keyfile = keyfile_parameters(¶m)?;
|
||||
let (keydata, _crypt_mode) = keyfile_parameters(¶m)?;
|
||||
|
||||
let crypt_config = match keyfile {
|
||||
let crypt_config = match keydata {
|
||||
None => None,
|
||||
Some(path) => {
|
||||
let (key, _) = load_and_decrypt_key(&path, &key::get_encryption_key_password)?;
|
||||
Some(key) => {
|
||||
let (key, _) = decrypt_key(&key, &key::get_encryption_key_password)?;
|
||||
Some(Arc::new(CryptConfig::new(key)?))
|
||||
}
|
||||
};
|
||||
@ -1213,22 +1340,26 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
true,
|
||||
).await?;
|
||||
|
||||
let manifest = client.download_manifest().await?;
|
||||
let (manifest, backup_index_data) = client.download_manifest().await?;
|
||||
|
||||
let (archive_name, archive_type) = parse_archive_type(archive_name);
|
||||
|
||||
if archive_name == MANIFEST_BLOB_NAME {
|
||||
let backup_index_data = manifest.into_json().to_string();
|
||||
if let Some(target) = target {
|
||||
replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
|
||||
replace_file(target, &backup_index_data, CreateOptions::new())?;
|
||||
} else {
|
||||
let stdout = std::io::stdout();
|
||||
let mut writer = stdout.lock();
|
||||
writer.write_all(backup_index_data.as_bytes())
|
||||
writer.write_all(&backup_index_data)
|
||||
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
|
||||
}
|
||||
|
||||
} else if archive_type == ArchiveType::Blob {
|
||||
return Ok(Value::Null);
|
||||
}
|
||||
|
||||
let file_info = manifest.lookup_file_info(&archive_name)?;
|
||||
|
||||
if archive_type == ArchiveType::Blob {
|
||||
|
||||
let mut reader = client.download_blob(&manifest, &archive_name).await?;
|
||||
|
||||
@ -1253,7 +1384,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
|
||||
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
|
||||
|
||||
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
|
||||
@ -1262,6 +1393,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
pxar::decoder::Decoder::from_std(reader)?,
|
||||
Path::new(target),
|
||||
&[],
|
||||
true,
|
||||
proxmox_backup::pxar::Flags::DEFAULT,
|
||||
allow_existing_dirs,
|
||||
|path| {
|
||||
@ -1269,6 +1401,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
println!("{:?}", path);
|
||||
}
|
||||
},
|
||||
None,
|
||||
)
|
||||
.map_err(|err| format_err!("error extracting archive - {}", err))?;
|
||||
} else {
|
||||
@ -1298,7 +1431,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
.map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
|
||||
};
|
||||
|
||||
dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose).await?;
|
||||
dump_image(client.clone(), crypt_config.clone(), file_info.chunk_crypt_mode(), index, &mut writer, verbose).await?;
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
@ -1323,8 +1456,12 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
encryption: {
|
||||
schema: ENCRYPTION_SCHEMA,
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
@ -1341,12 +1478,12 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let mut client = connect(repo.host(), repo.user())?;
|
||||
|
||||
let keyfile = keyfile_parameters(¶m)?;
|
||||
let (keydata, crypt_mode) = keyfile_parameters(¶m)?;
|
||||
|
||||
let crypt_config = match keyfile {
|
||||
let crypt_config = match keydata {
|
||||
None => None,
|
||||
Some(path) => {
|
||||
let (key, _created) = load_and_decrypt_key(&path, &key::get_encryption_key_password)?;
|
||||
Some(key) => {
|
||||
let (key, _created) = decrypt_key(&key, &key::get_encryption_key_password)?;
|
||||
let crypt_config = CryptConfig::new(key)?;
|
||||
Some(Arc::new(crypt_config))
|
||||
}
|
||||
@ -1354,7 +1491,11 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let data = file_get_contents(logfile)?;
|
||||
|
||||
let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
|
||||
// fixme: howto sign log?
|
||||
let blob = match crypt_mode {
|
||||
CryptMode::None | CryptMode::SignOnly => DataBlob::encode(&data, None, true)?,
|
||||
CryptMode::Encrypt => DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?,
|
||||
};
|
||||
|
||||
let raw_data = blob.into_inner();
|
||||
|
||||
@ -1431,7 +1572,7 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
|
||||
|
||||
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||
let item: PruneListItem = serde_json::from_value(record.to_owned())?;
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
||||
};
|
||||
|
||||
@ -1623,8 +1764,9 @@ async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<Str
|
||||
if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
|
||||
(item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
|
||||
{
|
||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
|
||||
result.push(snapshot.relative_path().to_str().unwrap().to_owned());
|
||||
if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
|
||||
result.push(snapshot.relative_path().to_str().unwrap().to_owned());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1711,15 +1853,6 @@ fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<Stri
|
||||
result
|
||||
}
|
||||
|
||||
fn master_pubkey_path() -> Result<PathBuf, Error> {
|
||||
let base = BaseDirectories::with_prefix("proxmox-backup")?;
|
||||
|
||||
// usually $HOME/.config/proxmox-backup/master-public.pem
|
||||
let path = base.place_config_file("master-public.pem")?;
|
||||
|
||||
Ok(path)
|
||||
}
|
||||
|
||||
use proxmox_backup::client::RemoteChunkReader;
|
||||
/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
|
||||
/// async use!
|
||||
@ -1746,7 +1879,6 @@ impl ReadAt for BufferedDynamicReadAt {
|
||||
buf: &'a mut [u8],
|
||||
offset: u64,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
|
||||
use std::io::Read;
|
||||
MaybeReady::Ready(tokio::task::block_in_place(move || {
|
||||
let mut reader = self.inner.lock().unwrap();
|
||||
reader.seek(SeekFrom::Start(offset))?;
|
||||
@ -1824,6 +1956,9 @@ fn main() {
|
||||
let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
|
||||
.completion_cb("repository", complete_repository);
|
||||
|
||||
let version_cmd_def = CliCommand::new(&API_METHOD_API_VERSION)
|
||||
.completion_cb("repository", complete_repository);
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert("backup", backup_cmd_def)
|
||||
.insert("upload-log", upload_log_cmd_def)
|
||||
@ -1841,6 +1976,7 @@ fn main() {
|
||||
.insert("mount", mount_cmd_def())
|
||||
.insert("catalog", catalog_mgmt_cli())
|
||||
.insert("task", task_mgmt_cli())
|
||||
.insert("version", version_cmd_def)
|
||||
.insert("benchmark", benchmark_cmd_def);
|
||||
|
||||
let rpcenv = CliEnvironment::new();
|
||||
|
@ -9,7 +9,7 @@ use proxmox_backup::tools;
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::api2::{self, types::* };
|
||||
use proxmox_backup::client::*;
|
||||
use proxmox_backup::tools::ticket::*;
|
||||
use proxmox_backup::tools::ticket::Ticket;
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
|
||||
mod proxmox_backup_manager;
|
||||
@ -59,12 +59,13 @@ fn connect() -> Result<HttpClient, Error> {
|
||||
.verify_cert(false); // not required for connection to localhost
|
||||
|
||||
let client = if uid.is_root() {
|
||||
let ticket = assemble_rsa_ticket(private_auth_key(), "PBS", Some("root@pam"), None)?;
|
||||
let ticket = Ticket::new("PBS", Userid::root_userid())?
|
||||
.sign(private_auth_key(), None)?;
|
||||
options = options.password(Some(ticket));
|
||||
HttpClient::new("localhost", "root@pam", options)?
|
||||
HttpClient::new("localhost", Userid::root_userid(), options)?
|
||||
} else {
|
||||
options = options.ticket_cache(true).interactive(true);
|
||||
HttpClient::new("localhost", "root@pam", options)?
|
||||
HttpClient::new("localhost", Userid::root_userid(), options)?
|
||||
};
|
||||
|
||||
Ok(client)
|
||||
@ -127,7 +128,7 @@ async fn garbage_collection_status(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let mut result = client.get(&path, None).await?;
|
||||
let mut data = result["data"].take();
|
||||
let schema = api2::admin::datastore::API_RETURN_SCHEMA_GARBAGE_COLLECTION_STATUS;
|
||||
let schema = &api2::admin::datastore::API_RETURN_SCHEMA_GARBAGE_COLLECTION_STATUS;
|
||||
|
||||
let options = default_table_format_options();
|
||||
|
||||
@ -193,7 +194,7 @@ async fn task_list(param: Value) -> Result<Value, Error> {
|
||||
let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
|
||||
|
||||
let mut data = result["data"].take();
|
||||
let schema = api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
|
||||
let schema = &api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
|
||||
|
||||
let options = default_table_format_options()
|
||||
.column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
|
||||
|
@ -1,5 +1,5 @@
|
||||
use std::sync::Arc;
|
||||
use std::path::Path;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
@ -9,6 +9,7 @@ use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
|
||||
use proxmox::try_block;
|
||||
use proxmox::api::RpcEnvironmentType;
|
||||
|
||||
use proxmox_backup::api2::types::Userid;
|
||||
use proxmox_backup::configdir;
|
||||
use proxmox_backup::buildcfg;
|
||||
use proxmox_backup::server;
|
||||
@ -17,13 +18,21 @@ use proxmox_backup::server::{ApiConfig, rest::*};
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
use proxmox_backup::tools::disks::{ DiskManage, zfs_pool_stats };
|
||||
|
||||
fn main() {
|
||||
use proxmox_backup::api2::pull::do_sync_job;
|
||||
|
||||
fn main() -> Result<(), Error> {
|
||||
proxmox_backup::tools::setup_safe_path_env();
|
||||
|
||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
||||
eprintln!("Error: {}", err);
|
||||
std::process::exit(-1);
|
||||
let backup_uid = proxmox_backup::backup::backup_user()?.uid;
|
||||
let backup_gid = proxmox_backup::backup::backup_group()?.gid;
|
||||
let running_uid = nix::unistd::Uid::effective();
|
||||
let running_gid = nix::unistd::Gid::effective();
|
||||
|
||||
if running_uid != backup_uid || running_gid != backup_gid {
|
||||
bail!("proxy not running as backup user or group (got uid {} gid {})", running_uid, running_gid);
|
||||
}
|
||||
|
||||
proxmox_backup::tools::runtime::main(run())
|
||||
}
|
||||
|
||||
async fn run() -> Result<(), Error> {
|
||||
@ -40,19 +49,20 @@ async fn run() -> Result<(), Error> {
|
||||
let mut config = ApiConfig::new(
|
||||
buildcfg::JS_DIR, &proxmox_backup::api2::ROUTER, RpcEnvironmentType::PUBLIC)?;
|
||||
|
||||
// add default dirs which includes jquery and bootstrap
|
||||
// my $base = '/usr/share/libpve-http-server-perl';
|
||||
// add_dirs($self->{dirs}, '/css/' => "$base/css/");
|
||||
// add_dirs($self->{dirs}, '/js/' => "$base/js/");
|
||||
// add_dirs($self->{dirs}, '/fonts/' => "$base/fonts/");
|
||||
config.add_alias("novnc", "/usr/share/novnc-pve");
|
||||
config.add_alias("extjs", "/usr/share/javascript/extjs");
|
||||
config.add_alias("fontawesome", "/usr/share/fonts-font-awesome");
|
||||
config.add_alias("xtermjs", "/usr/share/pve-xtermjs");
|
||||
config.add_alias("locale", "/usr/share/pbs-i18n");
|
||||
config.add_alias("widgettoolkit", "/usr/share/javascript/proxmox-widget-toolkit");
|
||||
config.add_alias("css", "/usr/share/javascript/proxmox-backup/css");
|
||||
config.add_alias("docs", "/usr/share/doc/proxmox-backup/html");
|
||||
|
||||
let mut indexpath = PathBuf::from(buildcfg::JS_DIR);
|
||||
indexpath.push("index.hbs");
|
||||
config.register_template("index", &indexpath)?;
|
||||
config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?;
|
||||
|
||||
let rest_server = RestServer::new(config);
|
||||
|
||||
//openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
|
||||
@ -77,8 +87,6 @@ async fn run() -> Result<(), Error> {
|
||||
let acceptor = Arc::clone(&acceptor);
|
||||
async move {
|
||||
sock.set_nodelay(true).unwrap();
|
||||
sock.set_send_buffer_size(1024*1024).unwrap();
|
||||
sock.set_recv_buffer_size(1024*1024).unwrap();
|
||||
Ok(tokio_openssl::accept(&acceptor, sock)
|
||||
.await
|
||||
.ok() // handshake errors aren't be fatal, so return None to filter
|
||||
@ -292,7 +300,8 @@ async fn schedule_datastore_garbage_collection() {
|
||||
};
|
||||
|
||||
let next = match compute_next_event(&event, last, false) {
|
||||
Ok(next) => next,
|
||||
Ok(Some(next)) => next,
|
||||
Ok(None) => continue,
|
||||
Err(err) => {
|
||||
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
|
||||
continue;
|
||||
@ -313,7 +322,7 @@ async fn schedule_datastore_garbage_collection() {
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
worker_type,
|
||||
Some(store.clone()),
|
||||
"backup@pam",
|
||||
Userid::backup_userid().clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
worker.log(format!("starting garbage collection on store {}", store));
|
||||
@ -403,7 +412,8 @@ async fn schedule_datastore_prune() {
|
||||
};
|
||||
|
||||
let next = match compute_next_event(&event, last, false) {
|
||||
Ok(next) => next,
|
||||
Ok(Some(next)) => next,
|
||||
Ok(None) => continue,
|
||||
Err(err) => {
|
||||
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
|
||||
continue;
|
||||
@ -424,7 +434,7 @@ async fn schedule_datastore_prune() {
|
||||
if let Err(err) = WorkerTask::new_thread(
|
||||
worker_type,
|
||||
Some(store.clone()),
|
||||
"backup@pam",
|
||||
Userid::backup_userid().clone(),
|
||||
false,
|
||||
move |worker| {
|
||||
worker.log(format!("Starting datastore prune on store \"{}\"", store));
|
||||
@ -450,7 +460,7 @@ async fn schedule_datastore_prune() {
|
||||
BackupDir::backup_time_to_string(info.backup_dir.backup_time())));
|
||||
|
||||
if !keep {
|
||||
datastore.remove_backup_dir(&info.backup_dir)?;
|
||||
datastore.remove_backup_dir(&info.backup_dir, true)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -466,10 +476,7 @@ async fn schedule_datastore_prune() {
|
||||
async fn schedule_datastore_sync_jobs() {
|
||||
|
||||
use proxmox_backup::{
|
||||
backup::DataStore,
|
||||
client::{ HttpClient, HttpClientOptions, BackupRepository, pull::pull_store },
|
||||
server::{ WorkerTask },
|
||||
config::{ sync::{self, SyncJobConfig}, remote::{self, Remote} },
|
||||
config::{ sync::{self, SyncJobConfig}, jobstate::{self, Job} },
|
||||
tools::systemd::time::{ parse_calendar_event, compute_next_event },
|
||||
};
|
||||
|
||||
@ -481,14 +488,6 @@ async fn schedule_datastore_sync_jobs() {
|
||||
Ok((config, _digest)) => config,
|
||||
};
|
||||
|
||||
let remote_config = match remote::config() {
|
||||
Err(err) => {
|
||||
eprintln!("unable to read remote config - {}", err);
|
||||
return;
|
||||
}
|
||||
Ok((config, _digest)) => config,
|
||||
};
|
||||
|
||||
for (job_id, (_, job_config)) in config.sections {
|
||||
let job_config: SyncJobConfig = match serde_json::from_value(job_config) {
|
||||
Ok(c) => c,
|
||||
@ -513,22 +512,17 @@ async fn schedule_datastore_sync_jobs() {
|
||||
|
||||
let worker_type = "syncjob";
|
||||
|
||||
let last = match lookup_last_worker(worker_type, &job_id) {
|
||||
Ok(Some(upid)) => {
|
||||
if proxmox_backup::server::worker_is_active_local(&upid) {
|
||||
continue;
|
||||
}
|
||||
upid.starttime
|
||||
},
|
||||
Ok(None) => 0,
|
||||
let last = match jobstate::last_run_time(worker_type, &job_id) {
|
||||
Ok(time) => time,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_last_job_start failed: {}", err);
|
||||
eprintln!("could not get last run time of {} {}: {}", worker_type, job_id, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let next = match compute_next_event(&event, last, false) {
|
||||
Ok(next) => next,
|
||||
Ok(Some(next)) => next,
|
||||
Ok(None) => continue,
|
||||
Err(err) => {
|
||||
eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
|
||||
continue;
|
||||
@ -544,57 +538,15 @@ async fn schedule_datastore_sync_jobs() {
|
||||
};
|
||||
if next > now { continue; }
|
||||
|
||||
|
||||
let job_id2 = job_id.clone();
|
||||
|
||||
let tgt_store = match DataStore::lookup_datastore(&job_config.store) {
|
||||
Ok(datastore) => datastore,
|
||||
Err(err) => {
|
||||
eprintln!("lookup_datastore '{}' failed - {}", job_config.store, err);
|
||||
continue;
|
||||
}
|
||||
let job = match Job::new(worker_type, &job_id) {
|
||||
Ok(job) => job,
|
||||
Err(_) => continue, // could not get lock
|
||||
};
|
||||
|
||||
let remote: Remote = match remote_config.lookup("remote", &job_config.remote) {
|
||||
Ok(remote) => remote,
|
||||
Err(err) => {
|
||||
eprintln!("remote_config lookup failed: {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let userid = Userid::backup_userid().clone();
|
||||
|
||||
let username = String::from("backup@pam");
|
||||
|
||||
let delete = job_config.remove_vanished.unwrap_or(true);
|
||||
|
||||
if let Err(err) = WorkerTask::spawn(
|
||||
worker_type,
|
||||
Some(job_id.clone()),
|
||||
&username.clone(),
|
||||
false,
|
||||
move |worker| async move {
|
||||
worker.log(format!("Starting datastore sync job '{}'", job_id));
|
||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||
worker.log(format!("Sync datastore '{}' from '{}/{}'",
|
||||
job_config.store, job_config.remote, job_config.remote_store));
|
||||
|
||||
let options = HttpClientOptions::new()
|
||||
.password(Some(remote.password.clone()))
|
||||
.fingerprint(remote.fingerprint.clone());
|
||||
|
||||
let client = HttpClient::new(&remote.host, &remote.userid, options)?;
|
||||
let _auth_info = client.login() // make sure we can auth
|
||||
.await
|
||||
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
|
||||
|
||||
let src_repo = BackupRepository::new(Some(remote.userid), Some(remote.host), job_config.remote_store);
|
||||
|
||||
pull_store(&worker, &client, &src_repo, tgt_store, delete, username).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
) {
|
||||
eprintln!("unable to start datastore sync job {} - {}", job_id2, err);
|
||||
if let Err(err) = do_sync_job(job, job_config, &userid, Some(event_str)) {
|
||||
eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|