Compare commits
506 Commits
Author | SHA1 | Date | |
---|---|---|---|
4d86df04a0 | |||
2165f0d450 | |||
1e7639bfc4 | |||
4121628d99 | |||
da78b90f9c | |||
1ef6e8b6a7 | |||
10351f7075 | |||
70a152deb7 | |||
5446bfbba8 | |||
400885e620 | |||
f960fc3b6f | |||
ddfa4d679a | |||
10e8026786 | |||
2527c039df | |||
93d8a2044e | |||
d2354a16cd | |||
34ee1f1c76 | |||
2de4dc3a81 | |||
b90036dadd | |||
4708f4fc21 | |||
062cf75cdf | |||
e5950360ca | |||
5b358ff0b1 | |||
4c00391d78 | |||
9594362e35 | |||
3420029b5e | |||
f432a1c927 | |||
e8b32f2d87 | |||
3e3b505cc8 | |||
0bca966ec5 | |||
84737fb33f | |||
e21a15ab17 | |||
90066d22a0 | |||
dbf5dad1c4 | |||
c793da1edc | |||
f8735e5988 | |||
e9805b2486 | |||
eb90405a78 | |||
ecf5f468c3 | |||
51aee8cac8 | |||
7d5049c350 | |||
01a99f5651 | |||
2914e99ff3 | |||
f9b824ac30 | |||
9a535ec77b | |||
ffba023c91 | |||
e01689978e | |||
68ac8976eb | |||
afb790db73 | |||
0732de361a | |||
d455270fa1 | |||
1336be16c9 | |||
03380db560 | |||
927ebc702c | |||
c24cb13382 | |||
3a804a8a20 | |||
1fde4167ea | |||
75f9f40922 | |||
e9c2638f90 | |||
338c545f85 | |||
e379b4a31c | |||
3d7ca2bdb9 | |||
d34019e246 | |||
7cb2ebba79 | |||
4e8581950e | |||
2a9a3d632e | |||
b6d07fa038 | |||
4599e7959c | |||
82ed13c7d7 | |||
5aaa81ab89 | |||
8a06d1935e | |||
f44254b4bd | |||
07875ce13e | |||
98dc770efa | |||
8848f1d487 | |||
5128ae48a0 | |||
104ae6093a | |||
e830d63f6a | |||
ce32cd487a | |||
f36c659365 | |||
47e5cbdb03 | |||
4923a76f22 | |||
e01ca6a2dd | |||
5e989333cd | |||
af39c399bc | |||
64591e731e | |||
5658504b90 | |||
64e0786aa9 | |||
90761f0f62 | |||
74f74d1e64 | |||
4db4b9706c | |||
00a5072ad3 | |||
3d3d698bb3 | |||
1b9521bb87 | |||
1d781c5b20 | |||
8e8836d1ea | |||
a904e3755d | |||
7ba99fef86 | |||
7d2be91bc9 | |||
578895336a | |||
8c090937f5 | |||
4229633d98 | |||
3ed7e87538 | |||
5b43cc4487 | |||
3241392117 | |||
c474a66b41 | |||
b32cf6a1e0 | |||
f32791b4b2 | |||
8f33fe8e59 | |||
d19010481d | |||
6b11524a8b | |||
e953029e8f | |||
10f788b7eb | |||
9348544e46 | |||
126ccbcfa6 | |||
440472cb32 | |||
4ce7da516d | |||
a7f8efcf35 | |||
9fe4c79005 | |||
f09f4d5fd5 | |||
38b4f9b534 | |||
fca1cef29f | |||
45b8a0327f | |||
a723c08715 | |||
c381a162fb | |||
b4931192c3 | |||
cc269b9ff9 | |||
a5e3be4992 | |||
137309cc4e | |||
85f4e834d8 | |||
065013ccec | |||
56d98ba966 | |||
dda1b4fa44 | |||
68b102269f | |||
0ecdaa0dc0 | |||
13f435caab | |||
ff99780303 | |||
fa9507020a | |||
1bff50afea | |||
37ff72720b | |||
2d5d264f99 | |||
c9c07445b7 | |||
a4388ffc36 | |||
ea1458923e | |||
e857f1fae8 | |||
3ec42e81b1 | |||
be1163acfe | |||
d308dc8af7 | |||
60643023ad | |||
875d53ef6c | |||
b41f9e9fec | |||
a1b71c3c7d | |||
013fa2d886 | |||
72e311c6b2 | |||
2732c47466 | |||
0466089316 | |||
5e42d38598 | |||
82a4bb5e80 | |||
94bc7957c1 | |||
c9e6b07145 | |||
3c06eba17a | |||
8081e4aa7b | |||
d8769d659e | |||
572cd0381b | |||
5e91b40087 | |||
936eceda61 | |||
61c4087041 | |||
7d39e47182 | |||
c4e1af3069 | |||
3e234af16e | |||
bbbf662d20 | |||
25d78b1068 | |||
78bf292343 | |||
e5ef69ecf7 | |||
b7b9a57425 | |||
c4a04b7c62 | |||
2e41dbe828 | |||
56d36ca439 | |||
e0ba5553be | |||
8d6fb677c1 | |||
a2daecc25d | |||
ee0c5c8e01 | |||
ae5b1e188f | |||
49f9aca627 | |||
4cba875379 | |||
7ab4382476 | |||
eaef6c8d00 | |||
95f3692545 | |||
686173dc2a | |||
39c5db7f0f | |||
603aa09d54 | |||
88aa3076f0 | |||
5400fe171c | |||
87bf9f569f | |||
8fb24a2c0a | |||
4b5d9b6e64 | |||
72bd8293e3 | |||
09989d9963 | |||
4088d5bc62 | |||
d4b84c1dec | |||
426847e1ce | |||
79b902d512 | |||
73c607497e | |||
f2f526b61d | |||
cb67ecaddb | |||
5bf9b0b0bb | |||
7a61f89e5a | |||
671c6a96e7 | |||
f0d23e5370 | |||
d1bee4344d | |||
d724116c0c | |||
888d89e2dd | |||
a6471bc346 | |||
6b1da1c166 | |||
18210d8958 | |||
bc5c1a9aa6 | |||
3df77ef5da | |||
e8d9d9adfa | |||
01d152720f | |||
5e58381ea9 | |||
0b6d9442bd | |||
134ed9e14f | |||
0796b642de | |||
f912ba6a3e | |||
a576e6685b | |||
b1c793cfa5 | |||
c0147e49c4 | |||
d52b120905 | |||
84c8a580b5 | |||
467bd01cdf | |||
7a7fcb4715 | |||
cf8e44bc30 | |||
279e7eb497 | |||
606828cc65 | |||
aac424674c | |||
8fd1e10830 | |||
12509a6d9e | |||
5e169f387c | |||
8369ade880 | |||
73cef112eb | |||
4a0132382a | |||
6ee69fccd3 | |||
a862835be2 | |||
ddbd63ed5f | |||
6a59fa0e18 | |||
1ed9069ad3 | |||
a588b67906 | |||
37a634f550 | |||
951fe0cb7d | |||
4ca3f0c6ae | |||
69e5ba29c4 | |||
e045d154e9 | |||
6526709d48 | |||
603f80d813 | |||
398636b61c | |||
eb70464839 | |||
75054859ff | |||
8e898895cc | |||
4be6beab6f | |||
a3b4b5b50e | |||
33b8d7e5e8 | |||
f2f43e1904 | |||
c002d48b0c | |||
15998ed12a | |||
9d8ab62769 | |||
3526a76ef3 | |||
b9e0fcbdcd | |||
a7188b3a75 | |||
b6c06dce9d | |||
4adf47b606 | |||
4d0dc29951 | |||
1011fb552b | |||
2fd2d29281 | |||
9104152a83 | |||
02a58862dd | |||
26153589ba | |||
17b3e4451f | |||
a2072cc346 | |||
fea23d0323 | |||
71e83e1b1f | |||
28570d19a6 | |||
1369bcdbba | |||
5e4d81e957 | |||
0f4721f305 | |||
5547f90ba7 | |||
2e1b63fb25 | |||
7b2d3a5fe9 | |||
0216f56241 | |||
80acdd71fa | |||
26af61debc | |||
e7f94010d3 | |||
a4e871f52c | |||
bc3072ef7a | |||
f4bb2510b9 | |||
2ab12cd0cb | |||
c894909e17 | |||
7f394c807b | |||
7afb98a912 | |||
3847008e1b | |||
f6ed2eff47 | |||
23eed6755a | |||
384a2b4d4f | |||
910177a388 | |||
54311a38c6 | |||
983edbc54a | |||
10439718e2 | |||
ebddccef5f | |||
9cfe0ff350 | |||
295bae14b7 | |||
53939bb438 | |||
329c2cbe66 | |||
55334cf45a | |||
a2e30cd51d | |||
4bf2ab1109 | |||
1dd1c9eb5c | |||
6dde015f8c | |||
5f3b2330c8 | |||
4ba5d3b3dd | |||
e7e3d7360a | |||
fd8b00aed7 | |||
2631e57d20 | |||
90461b76fb | |||
629103d60c | |||
dc232b8946 | |||
6fed819dc2 | |||
646fc7f086 | |||
ecc5602c88 | |||
6a15cce540 | |||
f281b8d3a9 | |||
4465b76812 | |||
61df02cda1 | |||
3b0321365b | |||
0dfce17a43 | |||
a38dccf0e8 | |||
f05085ab22 | |||
bc42bb3c6e | |||
94b7f56e65 | |||
0417e9af1b | |||
ce5327badc | |||
368f4c5416 | |||
318b310638 | |||
164ad7b706 | |||
a5322f3c50 | |||
fa29d7eb49 | |||
a21f9852fd | |||
79e2473c63 | |||
375b1f6150 | |||
109ccd300f | |||
c287b28725 | |||
c560cfddca | |||
44f6bb019c | |||
d6d42702d1 | |||
3fafd0e2a1 | |||
59648eac3d | |||
5b6b5bba68 | |||
b13089cdf5 | |||
1f03196c0b | |||
edf0940649 | |||
801ec1dbf9 | |||
34ac5cd889 | |||
58421ec112 | |||
a5bdc987dc | |||
d32a8652bd | |||
a26ebad5f9 | |||
dd9cef56fc | |||
26858dba84 | |||
9fe3358ce6 | |||
76425d84b3 | |||
42355b11a4 | |||
511e4f6987 | |||
3f0e344bc1 | |||
a316178768 | |||
dff8ea92aa | |||
88e1f7997c | |||
4c3eabeaf3 | |||
4c7be5f59d | |||
6d4fbbc3ea | |||
1a23132262 | |||
48c4193f7c | |||
8204d9b095 | |||
fad95a334a | |||
973e985d73 | |||
e5a13382b2 | |||
81c0b90447 | |||
ee9fa953de | |||
09acf0a70d | |||
15d1435789 | |||
80ea23e1b9 | |||
5d6379f8db | |||
566b946f9b | |||
7f7459677d | |||
0892a512bc | |||
b717871d2a | |||
7b11a8098d | |||
8b2c6f5dbc | |||
d26985a600 | |||
e29f456efc | |||
a79082a0dd | |||
1336ae8249 | |||
0db5712493 | |||
c47609fedb | |||
b84e8aaee9 | |||
d84e4073af | |||
e8656da70d | |||
59477ad252 | |||
2f29f1c765 | |||
4d84e869bf | |||
79d841014e | |||
ea62611d8e | |||
f3c867a034 | |||
aae5db916e | |||
a417c8a93e | |||
79e58a903e | |||
9f40e09d0a | |||
553e57f914 | |||
2200a38671 | |||
ba39ab20fb | |||
ff8945fd2f | |||
4876393562 | |||
971bc6f94b | |||
cab92acb3c | |||
a1d90719e4 | |||
eeff085d9d | |||
d43c407a00 | |||
6bc87d3952 | |||
04c1c68f31 | |||
94b17c804a | |||
94352256b7 | |||
b3bed7e41f | |||
a4672dd0b1 | |||
17bbcb57d7 | |||
843146479a | |||
cf1e117fc7 | |||
03eac20b87 | |||
11f5d59396 | |||
6f63c29306 | |||
c0e365fd49 | |||
93fb2e0d21 | |||
c553407e98 | |||
4830de408b | |||
7f78528308 | |||
2843ba9017 | |||
e244b9d03d | |||
657c47db35 | |||
a32bb86df9 | |||
654c56e05d | |||
589c4dad9e | |||
0320deb0a9 | |||
4c4e5c2b1e | |||
924373d2df | |||
3b60b5098f | |||
4abb3edd9f | |||
932e69a837 | |||
ef6d49670b | |||
52ea00e9df | |||
870681013a | |||
c046739461 | |||
8b1289f3e4 | |||
f1d76ecf6c | |||
074503f288 | |||
c6f55139f8 | |||
20cc25d749 | |||
30316192b3 | |||
e93263be1e | |||
2ab2ca9c24 | |||
54fcb7f5d8 | |||
4abd4dbe38 | |||
eac1beef3c | |||
166a48f903 | |||
82775c4764 | |||
88bc9635aa | |||
1037f2bc2d | |||
f24cbee77d | |||
25b4d52dce | |||
2729d134bd | |||
32b75d36a8 | |||
c4430a937d | |||
237314ad0d | |||
caf76ec592 | |||
0af8c26b74 | |||
825dfe7e0d | |||
30a0809553 | |||
6ee3035523 | |||
b627ebbf40 | |||
ef4bdf6b8b | |||
54722acada | |||
0e2bf3aa1d | |||
365126efa9 | |||
03d4c9217d | |||
8498290848 | |||
654db565cb | |||
51f83548ed | |||
5847a6bdb5 | |||
313e5e2047 | |||
7914e62b10 | |||
84d3284609 | |||
70fab5b46e | |||
e36135031d | |||
5a5ee0326e | |||
776dabfb2e | |||
5c4755ad08 | |||
7c1666289d | |||
cded320e92 | |||
b31cdec225 | |||
591b120d35 | |||
e8913fea12 |
21
Cargo.toml
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "1.0.10"
|
version = "1.1.10"
|
||||||
authors = [
|
authors = [
|
||||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||||
@ -15,6 +15,7 @@ edition = "2018"
|
|||||||
license = "AGPL-3"
|
license = "AGPL-3"
|
||||||
description = "Proxmox Backup"
|
description = "Proxmox Backup"
|
||||||
homepage = "https://www.proxmox.com"
|
homepage = "https://www.proxmox.com"
|
||||||
|
build = "build.rs"
|
||||||
|
|
||||||
exclude = [ "build", "debian", "tests/catar_data/test_symlink/symlink1"]
|
exclude = [ "build", "debian", "tests/catar_data/test_symlink/symlink1"]
|
||||||
|
|
||||||
@ -29,7 +30,11 @@ bitflags = "1.2.1"
|
|||||||
bytes = "1.0"
|
bytes = "1.0"
|
||||||
crc32fast = "1"
|
crc32fast = "1"
|
||||||
endian_trait = { version = "0.6", features = ["arrays"] }
|
endian_trait = { version = "0.6", features = ["arrays"] }
|
||||||
|
env_logger = "0.7"
|
||||||
|
flate2 = "1.0"
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
|
foreign-types = "0.3"
|
||||||
|
thiserror = "1.0"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
h2 = { version = "0.3", features = [ "stream" ] }
|
h2 = { version = "0.3", features = [ "stream" ] }
|
||||||
handlebars = "3.0"
|
handlebars = "3.0"
|
||||||
@ -48,11 +53,13 @@ percent-encoding = "2.1"
|
|||||||
pin-utils = "0.1.0"
|
pin-utils = "0.1.0"
|
||||||
pin-project = "1.0"
|
pin-project = "1.0"
|
||||||
pathpatterns = "0.1.2"
|
pathpatterns = "0.1.2"
|
||||||
proxmox = { version = "0.11.0", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
proxmox = { version = "0.11.5", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro" ] }
|
||||||
proxmox-fuse = "0.1.1"
|
proxmox-fuse = "0.1.1"
|
||||||
pxar = { version = "0.9.0", features = [ "tokio-io" ] }
|
proxmox-http = { version = "0.2.1", features = [ "client", "http-helpers", "websocket" ] }
|
||||||
|
#proxmox-http = { version = "0.2.0", path = "../proxmox/proxmox-http", features = [ "client", "http-helpers", "websocket" ] }
|
||||||
|
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
||||||
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
|
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
|
||||||
regex = "1.2"
|
regex = "1.2"
|
||||||
rustyline = "7"
|
rustyline = "7"
|
||||||
@ -60,10 +67,10 @@ serde = { version = "1.0", features = ["derive"] }
|
|||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
siphasher = "0.3"
|
siphasher = "0.3"
|
||||||
syslog = "4.0"
|
syslog = "4.0"
|
||||||
tokio = { version = "1.0", features = [ "fs", "io-util", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
|
tokio = { version = "1.6", features = [ "fs", "io-util", "io-std", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
|
||||||
tokio-openssl = "0.6.1"
|
tokio-openssl = "0.6.1"
|
||||||
tokio-stream = "0.1.0"
|
tokio-stream = "0.1.0"
|
||||||
tokio-util = { version = "0.6", features = [ "codec" ] }
|
tokio-util = { version = "0.6", features = [ "codec", "io" ] }
|
||||||
tower-service = "0.3.0"
|
tower-service = "0.3.0"
|
||||||
udev = ">= 0.3, <0.5"
|
udev = ">= 0.3, <0.5"
|
||||||
url = "2.1"
|
url = "2.1"
|
||||||
@ -75,6 +82,8 @@ zstd = { version = "0.4", features = [ "bindgen" ] }
|
|||||||
nom = "5.1"
|
nom = "5.1"
|
||||||
crossbeam-channel = "0.5"
|
crossbeam-channel = "0.5"
|
||||||
|
|
||||||
|
proxmox-acme-rs = "0.2.1"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
#valgrind = ["valgrind_request"]
|
#valgrind = ["valgrind_request"]
|
||||||
|
35
Makefile
@ -9,6 +9,7 @@ SUBDIRS := etc www docs
|
|||||||
# Binaries usable by users
|
# Binaries usable by users
|
||||||
USR_BIN := \
|
USR_BIN := \
|
||||||
proxmox-backup-client \
|
proxmox-backup-client \
|
||||||
|
proxmox-file-restore \
|
||||||
pxar \
|
pxar \
|
||||||
proxmox-tape \
|
proxmox-tape \
|
||||||
pmtx \
|
pmtx \
|
||||||
@ -25,6 +26,10 @@ SERVICE_BIN := \
|
|||||||
proxmox-backup-proxy \
|
proxmox-backup-proxy \
|
||||||
proxmox-daily-update
|
proxmox-daily-update
|
||||||
|
|
||||||
|
# Single file restore daemon
|
||||||
|
RESTORE_BIN := \
|
||||||
|
proxmox-restore-daemon
|
||||||
|
|
||||||
ifeq ($(BUILD_MODE), release)
|
ifeq ($(BUILD_MODE), release)
|
||||||
CARGO_BUILD_ARGS += --release
|
CARGO_BUILD_ARGS += --release
|
||||||
COMPILEDIR := target/release
|
COMPILEDIR := target/release
|
||||||
@ -39,7 +44,7 @@ endif
|
|||||||
CARGO ?= cargo
|
CARGO ?= cargo
|
||||||
|
|
||||||
COMPILED_BINS := \
|
COMPILED_BINS := \
|
||||||
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN))
|
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN))
|
||||||
|
|
||||||
export DEB_VERSION DEB_VERSION_UPSTREAM
|
export DEB_VERSION DEB_VERSION_UPSTREAM
|
||||||
|
|
||||||
@ -47,9 +52,12 @@ SERVER_DEB=${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb
|
|||||||
SERVER_DBG_DEB=${PACKAGE}-server-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
SERVER_DBG_DEB=${PACKAGE}-server-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||||
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
||||||
CLIENT_DBG_DEB=${PACKAGE}-client-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
CLIENT_DBG_DEB=${PACKAGE}-client-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||||
|
RESTORE_DEB=proxmox-backup-file-restore_${DEB_VERSION}_${ARCH}.deb
|
||||||
|
RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||||
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
||||||
|
|
||||||
DEBS=${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB}
|
DEBS=${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB} \
|
||||||
|
${RESTORE_DEB} ${RESTORE_DBG_DEB}
|
||||||
|
|
||||||
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
|
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
|
||||||
|
|
||||||
@ -74,7 +82,13 @@ doc:
|
|||||||
build:
|
build:
|
||||||
rm -rf build
|
rm -rf build
|
||||||
rm -f debian/control
|
rm -f debian/control
|
||||||
debcargo package --config debian/debcargo.toml --changelog-ready --no-overlay-write-back --directory build proxmox-backup $(shell dpkg-parsechangelog -l debian/changelog -SVersion | sed -e 's/-.*//')
|
debcargo package \
|
||||||
|
--config debian/debcargo.toml \
|
||||||
|
--changelog-ready \
|
||||||
|
--no-overlay-write-back \
|
||||||
|
--directory build \
|
||||||
|
proxmox-backup \
|
||||||
|
$(shell dpkg-parsechangelog -l debian/changelog -SVersion | sed -e 's/-.*//')
|
||||||
sed -e '1,/^$$/ ! d' build/debian/control > build/debian/control.src
|
sed -e '1,/^$$/ ! d' build/debian/control > build/debian/control.src
|
||||||
cat build/debian/control.src build/debian/control.in > build/debian/control
|
cat build/debian/control.src build/debian/control.in > build/debian/control
|
||||||
rm build/debian/control.in build/debian/control.src
|
rm build/debian/control.in build/debian/control.src
|
||||||
@ -117,8 +131,8 @@ clean:
|
|||||||
find . -name '*~' -exec rm {} ';'
|
find . -name '*~' -exec rm {} ';'
|
||||||
|
|
||||||
.PHONY: dinstall
|
.PHONY: dinstall
|
||||||
dinstall: ${DEBS}
|
dinstall: ${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB}
|
||||||
dpkg -i ${DEBS}
|
dpkg -i $^
|
||||||
|
|
||||||
# make sure we build binaries before docs
|
# make sure we build binaries before docs
|
||||||
docs: cargo-build
|
docs: cargo-build
|
||||||
@ -144,6 +158,9 @@ install: $(COMPILED_BINS)
|
|||||||
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(SBINDIR)/ ; \
|
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(SBINDIR)/ ; \
|
||||||
install -m644 zsh-completions/_$(i) $(DESTDIR)$(ZSH_COMPL_DEST)/ ;)
|
install -m644 zsh-completions/_$(i) $(DESTDIR)$(ZSH_COMPL_DEST)/ ;)
|
||||||
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup
|
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup
|
||||||
|
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/file-restore
|
||||||
|
$(foreach i,$(RESTORE_BIN), \
|
||||||
|
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/file-restore/ ;)
|
||||||
# install sg-tape-cmd as setuid binary
|
# install sg-tape-cmd as setuid binary
|
||||||
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
|
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
|
||||||
$(foreach i,$(SERVICE_BIN), \
|
$(foreach i,$(SERVICE_BIN), \
|
||||||
@ -152,8 +169,10 @@ install: $(COMPILED_BINS)
|
|||||||
$(MAKE) -C docs install
|
$(MAKE) -C docs install
|
||||||
|
|
||||||
.PHONY: upload
|
.PHONY: upload
|
||||||
upload: ${SERVER_DEB} ${CLIENT_DEB} ${DOC_DEB}
|
upload: ${SERVER_DEB} ${CLIENT_DEB} ${RESTORE_DEB} ${DOC_DEB}
|
||||||
# check if working directory is clean
|
# check if working directory is clean
|
||||||
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
||||||
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB} | \
|
||||||
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve,pmg" --dist buster
|
ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
||||||
|
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist buster
|
||||||
|
tar cf - ${RESTORE_DEB} ${RESTORE_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist buster
|
||||||
|
24
build.rs
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
// build.rs
|
||||||
|
use std::env;
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
let repoid = match env::var("REPOID") {
|
||||||
|
Ok(repoid) => repoid,
|
||||||
|
Err(_) => {
|
||||||
|
match Command::new("git")
|
||||||
|
.args(&["rev-parse", "HEAD"])
|
||||||
|
.output()
|
||||||
|
{
|
||||||
|
Ok(output) => {
|
||||||
|
String::from_utf8(output.stdout).unwrap()
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
panic!("git rev-parse failed: {}", err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
println!("cargo:rustc-env=REPOID={}", repoid);
|
||||||
|
}
|
343
debian/changelog
vendored
@ -1,3 +1,346 @@
|
|||||||
|
rust-proxmox-backup (1.1.10-1) buster; urgency=medium
|
||||||
|
|
||||||
|
* ui: datastore list summary: catch and show errors per datastore
|
||||||
|
|
||||||
|
* ui: dashboard: task summary: add a 'close' tool to the header
|
||||||
|
|
||||||
|
* ensure that backups which are currently being restored or backed up to a
|
||||||
|
tape won't get pruned
|
||||||
|
|
||||||
|
* improve error handling when locking a tape drive for a backup job
|
||||||
|
|
||||||
|
* client/pull: log snapshots that are skipped because of creation time being
|
||||||
|
older than last sync time
|
||||||
|
|
||||||
|
* ui: datastore options: add remove button to drop a datastore from the
|
||||||
|
configuration, without removing any actual data
|
||||||
|
|
||||||
|
* ui: tape: drive selector: do not autoselect the drive
|
||||||
|
|
||||||
|
* ui: tape: backup job: use correct default value for pbsUserSelector
|
||||||
|
|
||||||
|
* fix #3433: disks: port over Proxmox VE's S.M.A.R.T wearout logic
|
||||||
|
|
||||||
|
* backup: add helpers for async last recently used (LRU) caches for chunk
|
||||||
|
and index reading of backup snapshot
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 16 Jun 2021 09:46:15 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.9-1) stable; urgency=medium
|
||||||
|
|
||||||
|
* lto/sg_tape/encryption: remove non lto-4 supported byte
|
||||||
|
|
||||||
|
* ui: improve tape restore
|
||||||
|
|
||||||
|
* ui: panel/UsageChart: change downloadServerUrl
|
||||||
|
|
||||||
|
* ui: css fixes and cleanups
|
||||||
|
|
||||||
|
* api2/tape: add api call to list media sets
|
||||||
|
|
||||||
|
* ui: tape/BackupOverview: expand pools by default
|
||||||
|
|
||||||
|
* api: node/journal: fix parameter extraction of /nodes/node/journal
|
||||||
|
|
||||||
|
* file-restore-daemon: limit concurrent download calls
|
||||||
|
|
||||||
|
* file-restore-daemon: watchdog: add inhibit for long downloads
|
||||||
|
|
||||||
|
* file-restore-daemon: work around tokio DuplexStream bug
|
||||||
|
|
||||||
|
* apt: fix removal of non-existant http-proxy config
|
||||||
|
|
||||||
|
* file-restore-daemon: disk: add RawFs bucket type
|
||||||
|
|
||||||
|
* file-restore-daemon: disk: ignore "invalid fs" error
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 01 Jun 2021 08:24:01 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.8-1) stable; urgency=medium
|
||||||
|
|
||||||
|
* api-proxy: implement 'reload-certificate' command and hot-reload proxy
|
||||||
|
certificate when updating via the API
|
||||||
|
|
||||||
|
* ui: add task descriptions for ACME/Let's Encrypt related tasks
|
||||||
|
|
||||||
|
* correctly set apt proxy configuration
|
||||||
|
|
||||||
|
* ui: configuration: support setting a HTTP proxy for APT and subscription
|
||||||
|
checks.
|
||||||
|
|
||||||
|
* ui: tape: add 'Force new Media-Set' checkbox to manual backup
|
||||||
|
|
||||||
|
* ui: datastore/Content: add forget (delete) button for whole backup groups
|
||||||
|
|
||||||
|
* ui: tape: backup overview: move restore buttons inline to action-buttons,
|
||||||
|
making the UX more similar to the datastore content tree-view
|
||||||
|
|
||||||
|
* ui: tape restore: enabling selecting multiple snapshots
|
||||||
|
|
||||||
|
* ui: dashboards statistics: visualize datastores where querying the usage
|
||||||
|
failed
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 21 May 2021 18:21:28 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.7-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* client: use stderr for all fingerprint confirm msgs
|
||||||
|
|
||||||
|
* fix #3391: improve mismatched fingerprint handling
|
||||||
|
|
||||||
|
* tape: add single snapshot restore
|
||||||
|
|
||||||
|
* docs/api-viewer: improve rendering of array format
|
||||||
|
|
||||||
|
* tape/pool_writer: do not unwrap on channel send
|
||||||
|
|
||||||
|
* ui: window/SyncJobEdit: disable autoSelect for remote datastore
|
||||||
|
|
||||||
|
* ui: tape: rename 'Datastore' to 'Target Datastore'
|
||||||
|
|
||||||
|
* manager: acme plugin: auto-complete available DNS challenge types
|
||||||
|
|
||||||
|
* manager: acme plugin: remove ID completion helper from add command
|
||||||
|
|
||||||
|
* completion: ACME plugin type: comment out http type for now, not useful
|
||||||
|
|
||||||
|
* acme: use proxmox-acme-plugins and load schema from there
|
||||||
|
|
||||||
|
* fix 3296: add http_proxy to node config, and provide a cli
|
||||||
|
|
||||||
|
* fix #3331: improve progress for last snapshot in group
|
||||||
|
|
||||||
|
* file-restore: add debug mode with serial access
|
||||||
|
|
||||||
|
* file-restore: support more drives
|
||||||
|
|
||||||
|
* file-restore: add more RAM for VMs with many drives or debug
|
||||||
|
|
||||||
|
* file-restore: try to kill VM when stale
|
||||||
|
|
||||||
|
* make sure URI paths start with a slash
|
||||||
|
|
||||||
|
* tape: use LOCATE(16) SCSI command
|
||||||
|
|
||||||
|
* call create_run_dir() at daemon startup
|
||||||
|
|
||||||
|
* tape/drive: add 'move_to_file' to TapeDriver trait
|
||||||
|
|
||||||
|
* proxmox_restore_daemon: mount ntfs with 'utf8' option
|
||||||
|
|
||||||
|
* client/http_client: add necessary brackets for ipv6
|
||||||
|
|
||||||
|
* docs: tape: clarify LTO-4/5 support
|
||||||
|
|
||||||
|
* tape/restore: optimize chunk restore behaviour
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 11 May 2021 13:22:49 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.6-2) unstable; urgency=medium
|
||||||
|
|
||||||
|
* fix permissions set in create_run_dir
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 04 May 2021 12:25:00 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.6-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* tape restore: do not verify restored files
|
||||||
|
|
||||||
|
* tape restore: add restore speed to logs
|
||||||
|
|
||||||
|
* tape restore: write datastore in separate thread
|
||||||
|
|
||||||
|
* add ACME support
|
||||||
|
|
||||||
|
* add node config
|
||||||
|
|
||||||
|
* docs: user-management: add note about untrusted certificates for
|
||||||
|
webauthn
|
||||||
|
|
||||||
|
* bin: use extract_output_format where necessary
|
||||||
|
|
||||||
|
* add ctime and size function to IndexFile trait
|
||||||
|
|
||||||
|
* ui: tape: handle tapes in changers without barcode
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 04 May 2021 12:09:25 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.5-3) stable; urgency=medium
|
||||||
|
|
||||||
|
* file-restore: use 'norecovery' for XFS filesystem to allow mounting
|
||||||
|
those which where not un-mounted during backup
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 29 Apr 2021 15:26:13 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.5-2) stable; urgency=medium
|
||||||
|
|
||||||
|
* file-restore: strip .img.fidx suffix from drive serials to avoid running
|
||||||
|
in the 20 character limit SCSI serial values have.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 28 Apr 2021 11:15:08 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.5-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* tools/sgutils2: add size workaround for mode_sense
|
||||||
|
|
||||||
|
* tape: add read_medium_configuration_page() to detect WORM media
|
||||||
|
|
||||||
|
* file-restore: fix package name for kernel/initramfs image
|
||||||
|
|
||||||
|
* tape: remove MediumType struct, which is only valid on IBM drives
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 27 Apr 2021 12:20:04 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.4-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* file-restore: add size to image files and components
|
||||||
|
|
||||||
|
* file-restore: exit with code 1 in case streaming fails
|
||||||
|
|
||||||
|
* file-restore: use less memory for VM (now 128 MiB) and reboot on panic
|
||||||
|
|
||||||
|
* ui: tape: improve reload drive-status logic on user actions
|
||||||
|
|
||||||
|
* tape backup: list the snapshots we could back up on failed backup
|
||||||
|
notification
|
||||||
|
|
||||||
|
* Improve on a scheduling issue when updating the calendar event such, that
|
||||||
|
it would had triggered between the last-run and now. Use the next future
|
||||||
|
event as actual next trigger instead.
|
||||||
|
|
||||||
|
* SCSI mode sense: include the expected and unexpected sizes in the error
|
||||||
|
message, to allow easier debugging
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 27 Apr 2021 08:27:10 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.3-2) unstable; urgency=medium
|
||||||
|
|
||||||
|
* improve check for LTO4 tapes
|
||||||
|
|
||||||
|
* api: node status: return further information about SWAP, IO-wait, CPU info
|
||||||
|
and Kernel version
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 23 Apr 2021 10:52:08 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.3-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* tape restore: improve datastore locking when GC runs at the same time
|
||||||
|
|
||||||
|
* tape restore: always do quick chunk verification
|
||||||
|
|
||||||
|
* tape: improve compatibillity with some changers
|
||||||
|
|
||||||
|
* tape: work-around missing format command on LTO-4 drives, fall-back to
|
||||||
|
slower rewind erease
|
||||||
|
|
||||||
|
* fix #3393: pxar: allow and safe the 'security.NTACL' extended attribute
|
||||||
|
|
||||||
|
* file-restore: support encrypted VM backups
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 22 Apr 2021 20:14:58 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.2-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* backup verify: always re-check if we can skip a chunk in the actual verify
|
||||||
|
loop.
|
||||||
|
|
||||||
|
* tape: do not try to backup unfinished backups
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 15 Apr 2021 13:26:52 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.1-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* docs: include tape in table of contents
|
||||||
|
|
||||||
|
* docs: tape: improve definition-list format and add screenshots
|
||||||
|
|
||||||
|
* docs: reorder maintenance and network chapters after client-usage/tools
|
||||||
|
chapters
|
||||||
|
|
||||||
|
* ui: tape changer status: add Format button to drive grid
|
||||||
|
|
||||||
|
* backup/verify: improve speed on disks with slow random-IO (spinners) by
|
||||||
|
iterating over chunks sorted by inode
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 14 Apr 2021 14:50:29 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* enable tape backup as technology preview by default
|
||||||
|
|
||||||
|
* tape: read drive status: clear deferred error or media changed events.
|
||||||
|
|
||||||
|
* tape: improve end-of-tape (EOT) error handling
|
||||||
|
|
||||||
|
* tape: cleanup media catalog on tape reuse
|
||||||
|
|
||||||
|
* zfs: re-use underlying pool wide IO stats for datasets
|
||||||
|
|
||||||
|
* api daemon: only log error from accepting new connections to avoid opening
|
||||||
|
to many file descriptors
|
||||||
|
|
||||||
|
* api/datastore: allow downloading the entire archive as ZIP archive, not
|
||||||
|
only sub-paths
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 13 Apr 2021 14:42:18 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.14-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* server: compress API call response and static files if client accepts that
|
||||||
|
|
||||||
|
* compress generated ZIP archives with deflate
|
||||||
|
|
||||||
|
* tape: implement LTO userspace driver
|
||||||
|
|
||||||
|
* docs: mention new user space tape driver, adopt device path names
|
||||||
|
|
||||||
|
* tape: always clear encryption key after backup (for security reasons)
|
||||||
|
|
||||||
|
* ui: improve changer status view
|
||||||
|
|
||||||
|
* add proxmox-file-restore package, providing a central file-restore binary
|
||||||
|
with preparations for restoring files also from block level backups using
|
||||||
|
QEMU for a safe encapsulation.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 08 Apr 2021 16:35:11 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.13-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* pxar: improve handling ACL entries on create and restore
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 02 Apr 2021 15:32:01 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.12-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* tape: write catalogs to tape (speedup catalog restore)
|
||||||
|
|
||||||
|
* tape: add --scan option for catalog restore
|
||||||
|
|
||||||
|
* tape: improve locking (lock media-sets)
|
||||||
|
|
||||||
|
* tape: ui: enable datastore mappings
|
||||||
|
|
||||||
|
* fix #3359: fix blocking writes in async code during pxar create
|
||||||
|
|
||||||
|
* api2/tape/backup: wait indefinitely for lock in scheduled backup jobs
|
||||||
|
|
||||||
|
* docu improvements
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 26 Mar 2021 14:08:47 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.11-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* fix feature flag logic in pxar create
|
||||||
|
|
||||||
|
* tools/zip: add missing start_disk field for zip64 extension to improve
|
||||||
|
compatibility with some strict archive tools
|
||||||
|
|
||||||
|
* tape: speedup backup by doing read/write in parallel
|
||||||
|
|
||||||
|
* tape: store datastore name in tape archives and media catalog
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 18 Mar 2021 12:36:01 +0100
|
||||||
|
|
||||||
rust-proxmox-backup (1.0.10-1) unstable; urgency=medium
|
rust-proxmox-backup (1.0.10-1) unstable; urgency=medium
|
||||||
|
|
||||||
* tape: improve MediaPool allocation by sorting tapes by creation time and
|
* tape: improve MediaPool allocation by sorting tapes by creation time and
|
||||||
|
60
debian/control
vendored
@ -15,6 +15,9 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-crossbeam-channel-0.5+default-dev,
|
librust-crossbeam-channel-0.5+default-dev,
|
||||||
librust-endian-trait-0.6+arrays-dev,
|
librust-endian-trait-0.6+arrays-dev,
|
||||||
librust-endian-trait-0.6+default-dev,
|
librust-endian-trait-0.6+default-dev,
|
||||||
|
librust-env-logger-0.7+default-dev,
|
||||||
|
librust-flate2-1+default-dev,
|
||||||
|
librust-foreign-types-0.3+default-dev,
|
||||||
librust-futures-0.3+default-dev,
|
librust-futures-0.3+default-dev,
|
||||||
librust-h2-0.3+default-dev,
|
librust-h2-0.3+default-dev,
|
||||||
librust-h2-0.3+stream-dev,
|
librust-h2-0.3+stream-dev,
|
||||||
@ -36,13 +39,17 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||||
librust-pin-project-1+default-dev,
|
librust-pin-project-1+default-dev,
|
||||||
librust-pin-utils-0.1+default-dev,
|
librust-pin-utils-0.1+default-dev,
|
||||||
librust-proxmox-0.11+api-macro-dev,
|
librust-proxmox-0.11+api-macro-dev (>= 0.11.5-~~),
|
||||||
librust-proxmox-0.11+default-dev,
|
librust-proxmox-0.11+default-dev (>= 0.11.5-~~),
|
||||||
librust-proxmox-0.11+sortable-macro-dev,
|
librust-proxmox-0.11+sortable-macro-dev (>= 0.11.5-~~),
|
||||||
librust-proxmox-0.11+websocket-dev,
|
librust-proxmox-acme-rs-0.2+default-dev (>= 0.2.1-~~),
|
||||||
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
||||||
librust-pxar-0.9+default-dev,
|
librust-proxmox-http-0.2+client-dev (>= 0.2.1-~~),
|
||||||
librust-pxar-0.9+tokio-io-dev,
|
librust-proxmox-http-0.2+default-dev (>= 0.2.1-~~),
|
||||||
|
librust-proxmox-http-0.2+http-helpers-dev (>= 0.2.1-~~),
|
||||||
|
librust-proxmox-http-0.2+websocket-dev (>= 0.2.1-~~),
|
||||||
|
librust-pxar-0.10+default-dev (>= 0.10.1-~~),
|
||||||
|
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
|
||||||
librust-regex-1+default-dev (>= 1.2-~~),
|
librust-regex-1+default-dev (>= 1.2-~~),
|
||||||
librust-rustyline-7+default-dev,
|
librust-rustyline-7+default-dev,
|
||||||
librust-serde-1+default-dev,
|
librust-serde-1+default-dev,
|
||||||
@ -50,21 +57,24 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-serde-json-1+default-dev,
|
librust-serde-json-1+default-dev,
|
||||||
librust-siphasher-0.3+default-dev,
|
librust-siphasher-0.3+default-dev,
|
||||||
librust-syslog-4+default-dev,
|
librust-syslog-4+default-dev,
|
||||||
librust-tokio-1+default-dev,
|
librust-thiserror-1+default-dev,
|
||||||
librust-tokio-1+fs-dev,
|
librust-tokio-1+default-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+io-util-dev,
|
librust-tokio-1+fs-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+macros-dev,
|
librust-tokio-1+io-std-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+net-dev,
|
librust-tokio-1+io-util-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+parking-lot-dev,
|
librust-tokio-1+macros-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+process-dev,
|
librust-tokio-1+net-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+rt-dev,
|
librust-tokio-1+parking-lot-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+rt-multi-thread-dev,
|
librust-tokio-1+process-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+signal-dev,
|
librust-tokio-1+rt-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+time-dev,
|
librust-tokio-1+rt-multi-thread-dev (>= 1.6-~~),
|
||||||
|
librust-tokio-1+signal-dev (>= 1.6-~~),
|
||||||
|
librust-tokio-1+time-dev (>= 1.6-~~),
|
||||||
librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~),
|
librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~),
|
||||||
librust-tokio-stream-0.1+default-dev,
|
librust-tokio-stream-0.1+default-dev,
|
||||||
librust-tokio-util-0.6+codec-dev,
|
librust-tokio-util-0.6+codec-dev,
|
||||||
librust-tokio-util-0.6+default-dev,
|
librust-tokio-util-0.6+default-dev,
|
||||||
|
librust-tokio-util-0.6+io-dev,
|
||||||
librust-tower-service-0.3+default-dev,
|
librust-tower-service-0.3+default-dev,
|
||||||
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
|
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
|
||||||
librust-url-2+default-dev (>= 2.1-~~),
|
librust-url-2+default-dev (>= 2.1-~~),
|
||||||
@ -106,17 +116,16 @@ Architecture: any
|
|||||||
Depends: fonts-font-awesome,
|
Depends: fonts-font-awesome,
|
||||||
libjs-extjs (>= 6.0.1),
|
libjs-extjs (>= 6.0.1),
|
||||||
libjs-qrcodejs (>= 1.20201119),
|
libjs-qrcodejs (>= 1.20201119),
|
||||||
|
libproxmox-acme-plugins,
|
||||||
libsgutils2-2,
|
libsgutils2-2,
|
||||||
libzstd1 (>= 1.3.8),
|
libzstd1 (>= 1.3.8),
|
||||||
lvm2,
|
lvm2,
|
||||||
mt-st,
|
|
||||||
mtx,
|
|
||||||
openssh-server,
|
openssh-server,
|
||||||
pbs-i18n,
|
pbs-i18n,
|
||||||
postfix | mail-transport-agent,
|
postfix | mail-transport-agent,
|
||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.3-6),
|
proxmox-widget-toolkit (>= 2.5-6),
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
sg3-utils,
|
sg3-utils,
|
||||||
smartmontools,
|
smartmontools,
|
||||||
@ -146,3 +155,14 @@ Depends: libjs-extjs,
|
|||||||
Architecture: all
|
Architecture: all
|
||||||
Description: Proxmox Backup Documentation
|
Description: Proxmox Backup Documentation
|
||||||
This package contains the Proxmox Backup Documentation files.
|
This package contains the Proxmox Backup Documentation files.
|
||||||
|
|
||||||
|
Package: proxmox-backup-file-restore
|
||||||
|
Architecture: any
|
||||||
|
Depends: ${misc:Depends},
|
||||||
|
${shlibs:Depends},
|
||||||
|
Recommends: pve-qemu-kvm (>= 5.0.0-9),
|
||||||
|
proxmox-backup-restore-image,
|
||||||
|
Description: Proxmox Backup single file restore tools for pxar and block device backups
|
||||||
|
This package contains the Proxmox Backup single file restore client for
|
||||||
|
restoring individual files and folders from both host/container and VM/block
|
||||||
|
device backups. It includes a block device restore driver using QEMU.
|
||||||
|
16
debian/control.in
vendored
@ -3,17 +3,16 @@ Architecture: any
|
|||||||
Depends: fonts-font-awesome,
|
Depends: fonts-font-awesome,
|
||||||
libjs-extjs (>= 6.0.1),
|
libjs-extjs (>= 6.0.1),
|
||||||
libjs-qrcodejs (>= 1.20201119),
|
libjs-qrcodejs (>= 1.20201119),
|
||||||
|
libproxmox-acme-plugins,
|
||||||
libsgutils2-2,
|
libsgutils2-2,
|
||||||
libzstd1 (>= 1.3.8),
|
libzstd1 (>= 1.3.8),
|
||||||
lvm2,
|
lvm2,
|
||||||
mt-st,
|
|
||||||
mtx,
|
|
||||||
openssh-server,
|
openssh-server,
|
||||||
pbs-i18n,
|
pbs-i18n,
|
||||||
postfix | mail-transport-agent,
|
postfix | mail-transport-agent,
|
||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.3-6),
|
proxmox-widget-toolkit (>= 2.5-6),
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
sg3-utils,
|
sg3-utils,
|
||||||
smartmontools,
|
smartmontools,
|
||||||
@ -43,3 +42,14 @@ Depends: libjs-extjs,
|
|||||||
Architecture: all
|
Architecture: all
|
||||||
Description: Proxmox Backup Documentation
|
Description: Proxmox Backup Documentation
|
||||||
This package contains the Proxmox Backup Documentation files.
|
This package contains the Proxmox Backup Documentation files.
|
||||||
|
|
||||||
|
Package: proxmox-backup-file-restore
|
||||||
|
Architecture: any
|
||||||
|
Depends: ${misc:Depends},
|
||||||
|
${shlibs:Depends},
|
||||||
|
Recommends: pve-qemu-kvm (>= 5.0.0-9),
|
||||||
|
proxmox-backup-restore-image,
|
||||||
|
Description: Proxmox Backup single file restore tools for pxar and block device backups
|
||||||
|
This package contains the Proxmox Backup single file restore client for
|
||||||
|
restoring individual files and folders from both host/container and VM/block
|
||||||
|
device backups. It includes a block device restore driver using QEMU.
|
||||||
|
10
debian/postinst
vendored
@ -48,6 +48,16 @@ case "$1" in
|
|||||||
/etc/proxmox-backup/remote.cfg || true
|
/etc/proxmox-backup/remote.cfg || true
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
if dpkg --compare-versions "$2" 'le' '1.0.14-1'; then
|
||||||
|
# FIXME: Remove with 2.0
|
||||||
|
if grep -s -q -P -e '^linux:' /etc/proxmox-backup/tape.cfg; then
|
||||||
|
echo "========="
|
||||||
|
echo "= NOTE: You have now unsupported 'linux' tape drives configured."
|
||||||
|
echo "= * Execute 'udevadm control --reload-rules && udevadm trigger' to update /dev"
|
||||||
|
echo "= * Edit '/etc/proxmox-backup/tape.cfg', remove 'linux' entries and re-add over CLI/GUI"
|
||||||
|
echo "========="
|
||||||
|
fi
|
||||||
|
fi
|
||||||
# FIXME: remove with 2.0
|
# FIXME: remove with 2.0
|
||||||
if [ -d "/var/lib/proxmox-backup/tape" ] &&
|
if [ -d "/var/lib/proxmox-backup/tape" ] &&
|
||||||
[ "$(stat --printf '%a' '/var/lib/proxmox-backup/tape')" != "750" ]; then
|
[ "$(stat --printf '%a' '/var/lib/proxmox-backup/tape')" != "750" ]; then
|
||||||
|
1
debian/proxmox-backup-file-restore.bash-completion
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
debian/proxmox-file-restore.bc proxmox-file-restore
|
8
debian/proxmox-backup-file-restore.bc
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# proxmox-file-restore bash completion
|
||||||
|
|
||||||
|
# see http://tiswww.case.edu/php/chet/bash/FAQ
|
||||||
|
# and __ltrim_colon_completions() in /usr/share/bash-completion/bash_completion
|
||||||
|
# this modifies global var, but I found no better way
|
||||||
|
COMP_WORDBREAKS=${COMP_WORDBREAKS//:}
|
||||||
|
|
||||||
|
complete -C 'proxmox-file-restore bashcomplete' proxmox-file-restore
|
4
debian/proxmox-backup-file-restore.install
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
usr/bin/proxmox-file-restore
|
||||||
|
usr/share/man/man1/proxmox-file-restore.1
|
||||||
|
usr/share/zsh/vendor-completions/_proxmox-file-restore
|
||||||
|
usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore/proxmox-restore-daemon
|
74
debian/proxmox-backup-file-restore.postinst
vendored
Executable file
@ -0,0 +1,74 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
update_initramfs() {
|
||||||
|
# regenerate initramfs for single file restore VM
|
||||||
|
INST_PATH="/usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore"
|
||||||
|
CACHE_PATH="/var/cache/proxmox-backup/file-restore-initramfs.img"
|
||||||
|
CACHE_PATH_DBG="/var/cache/proxmox-backup/file-restore-initramfs-debug.img"
|
||||||
|
|
||||||
|
# cleanup first, in case proxmox-file-restore was uninstalled since we do
|
||||||
|
# not want an unuseable image lying around
|
||||||
|
rm -f "$CACHE_PATH"
|
||||||
|
|
||||||
|
if [ ! -f "$INST_PATH/initramfs.img" ]; then
|
||||||
|
echo "proxmox-backup-restore-image is not installed correctly, skipping update" >&2
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Updating file-restore initramfs..."
|
||||||
|
|
||||||
|
# avoid leftover temp file
|
||||||
|
cleanup() {
|
||||||
|
rm -f "$CACHE_PATH.tmp" "$CACHE_PATH_DBG.tmp"
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
mkdir -p "/var/cache/proxmox-backup"
|
||||||
|
cp "$INST_PATH/initramfs.img" "$CACHE_PATH.tmp"
|
||||||
|
|
||||||
|
# cpio uses passed in path as offset inside the archive as well, so we need
|
||||||
|
# to be in the same dir as the daemon binary to ensure it's placed in /
|
||||||
|
( cd "$INST_PATH"; \
|
||||||
|
printf "./proxmox-restore-daemon" \
|
||||||
|
| cpio -o --format=newc -A -F "$CACHE_PATH.tmp" )
|
||||||
|
mv -f "$CACHE_PATH.tmp" "$CACHE_PATH"
|
||||||
|
|
||||||
|
if [ -f "$INST_PATH/initramfs-debug.img" ]; then
|
||||||
|
echo "Updating file-restore debug initramfs..."
|
||||||
|
cp "$INST_PATH/initramfs-debug.img" "$CACHE_PATH_DBG.tmp"
|
||||||
|
( cd "$INST_PATH"; \
|
||||||
|
printf "./proxmox-restore-daemon" \
|
||||||
|
| cpio -o --format=newc -A -F "$CACHE_PATH_DBG.tmp" )
|
||||||
|
mv -f "$CACHE_PATH_DBG.tmp" "$CACHE_PATH_DBG"
|
||||||
|
fi
|
||||||
|
|
||||||
|
trap - EXIT
|
||||||
|
}
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
configure)
|
||||||
|
# in case restore daemon was updated
|
||||||
|
update_initramfs
|
||||||
|
;;
|
||||||
|
|
||||||
|
triggered)
|
||||||
|
if [ "$2" = "proxmox-backup-restore-image-update" ]; then
|
||||||
|
# in case base-image was updated
|
||||||
|
update_initramfs
|
||||||
|
else
|
||||||
|
echo "postinst called with unknown trigger name: \`$2'" >&2
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
abort-upgrade|abort-remove|abort-deconfigure)
|
||||||
|
;;
|
||||||
|
|
||||||
|
*)
|
||||||
|
echo "postinst called with unknown argument \`$1'" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
exit 0
|
1
debian/proxmox-backup-file-restore.triggers
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
interest-noawait proxmox-backup-restore-image-update
|
18
debian/proxmox-backup-server.udev
vendored
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# do not edit this file, it will be overwritten on update
|
||||||
|
|
||||||
|
# persistent storage links: /dev/tape/{by-id,by-path}
|
||||||
|
|
||||||
|
ACTION=="remove", GOTO="persistent_storage_tape_end"
|
||||||
|
ENV{UDEV_DISABLE_PERSISTENT_STORAGE_RULES_FLAG}=="1", GOTO="persistent_storage_tape_end"
|
||||||
|
|
||||||
|
# also see: /lib/udev/rules.d/60-persistent-storage-tape.rules
|
||||||
|
|
||||||
|
SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", IMPORT{program}="scsi_id --sg-version=3 --export --whitelisted -d $devnode", \
|
||||||
|
SYMLINK+="tape/by-id/scsi-$env{ID_SERIAL}-sg"
|
||||||
|
|
||||||
|
# iSCSI devices from the same host have all the same ID_SERIAL,
|
||||||
|
# but additionally a property named ID_SCSI_SERIAL.
|
||||||
|
SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", ENV{ID_SCSI_SERIAL}=="?*", \
|
||||||
|
SYMLINK+="tape/by-id/scsi-$env{ID_SCSI_SERIAL}-sg"
|
||||||
|
|
||||||
|
LABEL="persistent_storage_tape_end"
|
7
debian/rules
vendored
@ -52,8 +52,11 @@ override_dh_dwz:
|
|||||||
|
|
||||||
override_dh_strip:
|
override_dh_strip:
|
||||||
dh_strip
|
dh_strip
|
||||||
for exe in $$(find debian/proxmox-backup-client/usr \
|
for exe in $$(find \
|
||||||
debian/proxmox-backup-server/usr -executable -type f); do \
|
debian/proxmox-backup-client/usr \
|
||||||
|
debian/proxmox-backup-server/usr \
|
||||||
|
debian/proxmox-backup-file-restore \
|
||||||
|
-executable -type f); do \
|
||||||
debian/scripts/elf-strip-unused-dependencies.sh "$$exe" || true; \
|
debian/scripts/elf-strip-unused-dependencies.sh "$$exe" || true; \
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@ GENERATED_SYNOPSIS := \
|
|||||||
proxmox-backup-client/synopsis.rst \
|
proxmox-backup-client/synopsis.rst \
|
||||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||||
proxmox-backup-manager/synopsis.rst \
|
proxmox-backup-manager/synopsis.rst \
|
||||||
|
proxmox-file-restore/synopsis.rst \
|
||||||
pxar/synopsis.rst \
|
pxar/synopsis.rst \
|
||||||
pmtx/synopsis.rst \
|
pmtx/synopsis.rst \
|
||||||
pmt/synopsis.rst \
|
pmt/synopsis.rst \
|
||||||
@ -25,7 +26,8 @@ MAN1_PAGES := \
|
|||||||
proxmox-tape.1 \
|
proxmox-tape.1 \
|
||||||
proxmox-backup-proxy.1 \
|
proxmox-backup-proxy.1 \
|
||||||
proxmox-backup-client.1 \
|
proxmox-backup-client.1 \
|
||||||
proxmox-backup-manager.1
|
proxmox-backup-manager.1 \
|
||||||
|
proxmox-file-restore.1
|
||||||
|
|
||||||
MAN5_PAGES := \
|
MAN5_PAGES := \
|
||||||
media-pool.cfg.5 \
|
media-pool.cfg.5 \
|
||||||
@ -179,10 +181,16 @@ proxmox-backup-manager.1: proxmox-backup-manager/man1.rst proxmox-backup-manage
|
|||||||
proxmox-backup-proxy.1: proxmox-backup-proxy/man1.rst proxmox-backup-proxy/description.rst
|
proxmox-backup-proxy.1: proxmox-backup-proxy/man1.rst proxmox-backup-proxy/description.rst
|
||||||
rst2man $< >$@
|
rst2man $< >$@
|
||||||
|
|
||||||
|
proxmox-file-restore/synopsis.rst: ${COMPILEDIR}/proxmox-file-restore
|
||||||
|
${COMPILEDIR}/proxmox-file-restore printdoc > proxmox-file-restore/synopsis.rst
|
||||||
|
|
||||||
|
proxmox-file-restore.1: proxmox-file-restore/man1.rst proxmox-file-restore/description.rst proxmox-file-restore/synopsis.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
.PHONY: onlinehelpinfo
|
.PHONY: onlinehelpinfo
|
||||||
onlinehelpinfo:
|
onlinehelpinfo:
|
||||||
@echo "Generating OnlineHelpInfo.js..."
|
@echo "Generating OnlineHelpInfo.js..."
|
||||||
$(SPHINXBUILD) -b proxmox-scanrefs $(ALLSPHINXOPTS) $(BUILDDIR)/scanrefs
|
$(SPHINXBUILD) -b proxmox-scanrefs -Q $(ALLSPHINXOPTS) $(BUILDDIR)/scanrefs
|
||||||
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
|
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
|
||||||
|
|
||||||
api-viewer/apidata.js: ${COMPILEDIR}/docgen
|
api-viewer/apidata.js: ${COMPILEDIR}/docgen
|
||||||
|
@ -86,13 +86,9 @@ Ext.onReady(function() {
|
|||||||
return pdef['enum'] ? 'enum' : (pdef.type || 'string');
|
return pdef['enum'] ? 'enum' : (pdef.type || 'string');
|
||||||
};
|
};
|
||||||
|
|
||||||
var render_format = function(value, metaData, record) {
|
let render_simple_format = function(pdef, type_fallback) {
|
||||||
var pdef = record.data;
|
|
||||||
|
|
||||||
metaData.style = 'white-space:normal;'
|
|
||||||
|
|
||||||
if (pdef.typetext)
|
if (pdef.typetext)
|
||||||
return Ext.htmlEncode(pdef.typetext);
|
return pdef.typetext;
|
||||||
|
|
||||||
if (pdef['enum'])
|
if (pdef['enum'])
|
||||||
return pdef['enum'].join(' | ');
|
return pdef['enum'].join(' | ');
|
||||||
@ -101,9 +97,28 @@ Ext.onReady(function() {
|
|||||||
return pdef.format;
|
return pdef.format;
|
||||||
|
|
||||||
if (pdef.pattern)
|
if (pdef.pattern)
|
||||||
return Ext.htmlEncode(pdef.pattern);
|
return pdef.pattern;
|
||||||
|
|
||||||
return '';
|
if (pdef.type === 'boolean')
|
||||||
|
return `<true|false>`;
|
||||||
|
|
||||||
|
if (type_fallback && pdef.type)
|
||||||
|
return `<${pdef.type}>`;
|
||||||
|
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
let render_format = function(value, metaData, record) {
|
||||||
|
let pdef = record.data;
|
||||||
|
|
||||||
|
metaData.style = 'white-space:normal;'
|
||||||
|
|
||||||
|
if (pdef.type === 'array' && pdef.items) {
|
||||||
|
let format = render_simple_format(pdef.items, true);
|
||||||
|
return `[${Ext.htmlEncode(format)}, ...]`;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Ext.htmlEncode(render_simple_format(pdef) || '');
|
||||||
};
|
};
|
||||||
|
|
||||||
var real_path = function(path) {
|
var real_path = function(path) {
|
||||||
@ -143,7 +158,7 @@ Ext.onReady(function() {
|
|||||||
permhtml += "</div></div>";
|
permhtml += "</div></div>";
|
||||||
} else {
|
} else {
|
||||||
//console.log(permission);
|
//console.log(permission);
|
||||||
permhtml += "Unknown systax!";
|
permhtml += "Unknown syntax!";
|
||||||
}
|
}
|
||||||
|
|
||||||
return permhtml;
|
return permhtml;
|
||||||
|
@ -3,9 +3,10 @@ Backup Client Usage
|
|||||||
|
|
||||||
The command line client is called :command:`proxmox-backup-client`.
|
The command line client is called :command:`proxmox-backup-client`.
|
||||||
|
|
||||||
|
.. _client_repository:
|
||||||
|
|
||||||
Repository Locations
|
Backup Repository Locations
|
||||||
--------------------
|
---------------------------
|
||||||
|
|
||||||
The client uses the following notation to specify a datastore repository
|
The client uses the following notation to specify a datastore repository
|
||||||
on the backup server.
|
on the backup server.
|
||||||
@ -471,7 +472,7 @@ located in ``/etc``, you could do the following:
|
|||||||
pxar:/ > restore target/ --pattern etc/**/*.conf
|
pxar:/ > restore target/ --pattern etc/**/*.conf
|
||||||
...
|
...
|
||||||
|
|
||||||
The above will scan trough all the directories below ``/etc`` and restore all
|
The above will scan through all the directories below ``/etc`` and restore all
|
||||||
files ending in ``.conf``.
|
files ending in ``.conf``.
|
||||||
|
|
||||||
.. todo:: Explain interactive restore in more detail
|
.. todo:: Explain interactive restore in more detail
|
||||||
@ -691,8 +692,15 @@ Benchmarking
|
|||||||
------------
|
------------
|
||||||
|
|
||||||
The backup client also comes with a benchmarking tool. This tool measures
|
The backup client also comes with a benchmarking tool. This tool measures
|
||||||
various metrics relating to compression and encryption speeds. You can run a
|
various metrics relating to compression and encryption speeds. If a Proxmox
|
||||||
benchmark using the ``benchmark`` subcommand of ``proxmox-backup-client``:
|
Backup repository (remote or local) is specified, the TLS upload speed will get
|
||||||
|
measured too.
|
||||||
|
|
||||||
|
You can run a benchmark using the ``benchmark`` subcommand of
|
||||||
|
``proxmox-backup-client``:
|
||||||
|
|
||||||
|
.. note:: The TLS speed test is only included if a :ref:`backup server
|
||||||
|
repository is specified <client_repository>`.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -723,8 +731,7 @@ benchmark using the ``benchmark`` subcommand of ``proxmox-backup-client``:
|
|||||||
|
|
||||||
|
|
||||||
.. note:: The percentages given in the output table correspond to a
|
.. note:: The percentages given in the output table correspond to a
|
||||||
comparison against a Ryzen 7 2700X. The TLS test connects to the
|
comparison against a Ryzen 7 2700X.
|
||||||
local host, so there is no network involved.
|
|
||||||
|
|
||||||
You can also pass the ``--output-format`` parameter to output stats in ``json``,
|
You can also pass the ``--output-format`` parameter to output stats in ``json``,
|
||||||
rather than the default table format.
|
rather than the default table format.
|
||||||
|
@ -6,6 +6,11 @@ Command Line Tools
|
|||||||
|
|
||||||
.. include:: proxmox-backup-client/description.rst
|
.. include:: proxmox-backup-client/description.rst
|
||||||
|
|
||||||
|
``proxmox-file-restore``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. include:: proxmox-file-restore/description.rst
|
||||||
|
|
||||||
``proxmox-backup-manager``
|
``proxmox-backup-manager``
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
@ -26,6 +26,27 @@ Those command are available when you start an interactive restore shell:
|
|||||||
.. include:: proxmox-backup-manager/synopsis.rst
|
.. include:: proxmox-backup-manager/synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
|
``proxmox-tape``
|
||||||
|
----------------
|
||||||
|
|
||||||
|
.. include:: proxmox-tape/synopsis.rst
|
||||||
|
|
||||||
|
``pmt``
|
||||||
|
-------
|
||||||
|
|
||||||
|
.. include:: pmt/options.rst
|
||||||
|
|
||||||
|
....
|
||||||
|
|
||||||
|
.. include:: pmt/synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
|
``pmtx``
|
||||||
|
--------
|
||||||
|
|
||||||
|
.. include:: pmtx/synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
``pxar``
|
``pxar``
|
||||||
--------
|
--------
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ PygmentsBridge.latex_formatter = CustomLatexFormatter
|
|||||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||||
# ones.
|
# ones.
|
||||||
|
|
||||||
extensions = ["sphinx.ext.graphviz", "sphinx.ext.todo", "proxmox-scanrefs"]
|
extensions = ["sphinx.ext.graphviz", 'sphinx.ext.mathjax', "sphinx.ext.todo", "proxmox-scanrefs"]
|
||||||
|
|
||||||
todo_link_only = True
|
todo_link_only = True
|
||||||
|
|
||||||
@ -307,6 +307,9 @@ html_show_sourcelink = False
|
|||||||
# Output file base name for HTML help builder.
|
# Output file base name for HTML help builder.
|
||||||
htmlhelp_basename = 'ProxmoxBackupdoc'
|
htmlhelp_basename = 'ProxmoxBackupdoc'
|
||||||
|
|
||||||
|
# use local mathjax package, symlink comes from debian/proxmox-backup-docs.links
|
||||||
|
mathjax_path = "mathjax/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
|
||||||
|
|
||||||
# -- Options for LaTeX output ---------------------------------------------
|
# -- Options for LaTeX output ---------------------------------------------
|
||||||
|
|
||||||
latex_engine = 'xelatex'
|
latex_engine = 'xelatex'
|
||||||
@ -464,6 +467,3 @@ epub_exclude_files = ['search.html']
|
|||||||
# If false, no index is generated.
|
# If false, no index is generated.
|
||||||
#
|
#
|
||||||
# epub_use_index = True
|
# epub_use_index = True
|
||||||
|
|
||||||
# use local mathjax package, symlink comes from debian/proxmox-backup-docs.links
|
|
||||||
mathjax_path = "mathjax/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
Each drive configuration section starts with a header ``linux: <name>``,
|
Each LTO drive configuration section starts with a header ``lto: <name>``,
|
||||||
followed by the drive configuration options.
|
followed by the drive configuration options.
|
||||||
|
|
||||||
Tape changer configurations starts with ``changer: <name>``,
|
Tape changer configurations starts with ``changer: <name>``,
|
||||||
@ -6,7 +6,7 @@ followed by the changer configuration options.
|
|||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
linux: hh8
|
lto: hh8
|
||||||
changer sl3
|
changer sl3
|
||||||
path /dev/tape/by-id/scsi-10WT065325-nst
|
path /dev/tape/by-id/scsi-10WT065325-nst
|
||||||
|
|
||||||
|
@ -37,8 +37,53 @@ Options
|
|||||||
.. include:: config/datastore/config.rst
|
.. include:: config/datastore/config.rst
|
||||||
|
|
||||||
|
|
||||||
|
``media-pool.cfg``
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/media-pool/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/media-pool/config.rst
|
||||||
|
|
||||||
|
|
||||||
|
``tape.cfg``
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/tape/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/tape/config.rst
|
||||||
|
|
||||||
|
|
||||||
|
``tape-job.cfg``
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/tape-job/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/tape-job/config.rst
|
||||||
|
|
||||||
|
|
||||||
``user.cfg``
|
``user.cfg``
|
||||||
~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
File Format
|
File Format
|
||||||
^^^^^^^^^^^
|
^^^^^^^^^^^
|
||||||
|
@ -57,6 +57,11 @@ div.sphinxsidebar h3 {
|
|||||||
div.sphinxsidebar h1.logo-name {
|
div.sphinxsidebar h1.logo-name {
|
||||||
display: none;
|
display: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
div.document, div.footer {
|
||||||
|
width: min(100%, 1320px);
|
||||||
|
}
|
||||||
|
|
||||||
@media screen and (max-width: 875px) {
|
@media screen and (max-width: 875px) {
|
||||||
div.sphinxsidebar p.logo {
|
div.sphinxsidebar p.logo {
|
||||||
display: initial;
|
display: initial;
|
||||||
@ -65,9 +70,19 @@ div.sphinxsidebar h1.logo-name {
|
|||||||
display: block;
|
display: block;
|
||||||
}
|
}
|
||||||
div.sphinxsidebar span {
|
div.sphinxsidebar span {
|
||||||
color: #AAA;
|
color: #EEE;
|
||||||
}
|
}
|
||||||
ul li.toctree-l1 > a {
|
.sphinxsidebar ul li.toctree-l1 > a, div.sphinxsidebar a {
|
||||||
color: #FFF;
|
color: #FFF;
|
||||||
}
|
}
|
||||||
|
div.sphinxsidebar {
|
||||||
|
background-color: #555;
|
||||||
|
}
|
||||||
|
div.body {
|
||||||
|
min-width: 300px;
|
||||||
|
}
|
||||||
|
div.footer {
|
||||||
|
display: block;
|
||||||
|
margin: 15px auto 0px auto;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -61,9 +61,7 @@ attacker gains access to the server or any point of the network, they will not
|
|||||||
be able to read the data.
|
be able to read the data.
|
||||||
|
|
||||||
.. note:: Encryption is not enabled by default. To set up encryption, see the
|
.. note:: Encryption is not enabled by default. To set up encryption, see the
|
||||||
`Encryption
|
:ref:`backup client encryption section <client_encryption>`.
|
||||||
<https://pbs.proxmox.com/docs/administration-guide.html#encryption>`_ section
|
|
||||||
of the Proxmox Backup Server Administration Guide.
|
|
||||||
|
|
||||||
|
|
||||||
Is the backup incremental/deduplicated?
|
Is the backup incremental/deduplicated?
|
||||||
|
12
docs/gui.rst
@ -112,6 +112,18 @@ The administration menu item also contains a disk management subsection:
|
|||||||
* **Directory**: Create and view information on *ext4* and *xfs* disks
|
* **Directory**: Create and view information on *ext4* and *xfs* disks
|
||||||
* **ZFS**: Create and view information on *ZFS* disks
|
* **ZFS**: Create and view information on *ZFS* disks
|
||||||
|
|
||||||
|
Tape Backup
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-changer-overview.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Tape changer overview
|
||||||
|
|
||||||
|
The `Tape Backup`_ section contains a top panel, managing tape media sets,
|
||||||
|
inventories, drives, changers and the tape backup jobs itself.
|
||||||
|
|
||||||
|
It also contains a subsection per standalone drive and per changer, with a
|
||||||
|
status and management view for those devices.
|
||||||
|
|
||||||
Datastore
|
Datastore
|
||||||
^^^^^^^^^
|
^^^^^^^^^
|
||||||
|
BIN
docs/images/screenshots/pbs-gui-tape-backup-jobs-add.png
Normal file
After Width: | Height: | Size: 28 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-backup-jobs.png
Normal file
After Width: | Height: | Size: 75 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-changer-overview.png
Normal file
After Width: | Height: | Size: 117 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-changers-add.png
Normal file
After Width: | Height: | Size: 12 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-changers.png
Normal file
After Width: | Height: | Size: 79 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-crypt-keys.png
Normal file
After Width: | Height: | Size: 72 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-drives-add.png
Normal file
After Width: | Height: | Size: 13 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-drives.png
Normal file
After Width: | Height: | Size: 112 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-pools-add.png
Normal file
After Width: | Height: | Size: 18 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-pools.png
Normal file
After Width: | Height: | Size: 70 KiB |
@ -25,14 +25,15 @@ in the section entitled "GNU Free Documentation License".
|
|||||||
terminology.rst
|
terminology.rst
|
||||||
gui.rst
|
gui.rst
|
||||||
storage.rst
|
storage.rst
|
||||||
network-management.rst
|
|
||||||
user-management.rst
|
user-management.rst
|
||||||
managing-remotes.rst
|
|
||||||
maintenance.rst
|
|
||||||
backup-client.rst
|
backup-client.rst
|
||||||
pve-integration.rst
|
pve-integration.rst
|
||||||
pxar-tool.rst
|
pxar-tool.rst
|
||||||
|
tape-backup.rst
|
||||||
|
managing-remotes.rst
|
||||||
|
maintenance.rst
|
||||||
sysadmin.rst
|
sysadmin.rst
|
||||||
|
network-management.rst
|
||||||
technical-overview.rst
|
technical-overview.rst
|
||||||
faq.rst
|
faq.rst
|
||||||
|
|
||||||
|
@ -113,9 +113,9 @@ Client Installation
|
|||||||
Install `Proxmox Backup`_ Client on Debian
|
Install `Proxmox Backup`_ Client on Debian
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Proxmox ships as a set of Debian packages to be installed on
|
Proxmox ships as a set of Debian packages to be installed on top of a standard
|
||||||
top of a standard Debian installation. After configuring the
|
Debian installation. After configuring the :ref:`package_repositories_client_only_apt`,
|
||||||
:ref:`sysadmin_package_repositories`, you need to run:
|
you need to run:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -123,12 +123,6 @@ top of a standard Debian installation. After configuring the
|
|||||||
# apt-get install proxmox-backup-client
|
# apt-get install proxmox-backup-client
|
||||||
|
|
||||||
|
|
||||||
Installing from source
|
.. note:: The client-only repository should be usable by most recent Debian and
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
Ubuntu derivatives.
|
||||||
|
|
||||||
.. todo:: Add section "Installing from source"
|
|
||||||
|
|
||||||
Installing statically linked binary
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
.. todo:: Add section "Installing statically linked binary"
|
|
||||||
|
@ -65,10 +65,10 @@ Main Features
|
|||||||
:Compression: The ultra-fast Zstandard_ compression is able to compress
|
:Compression: The ultra-fast Zstandard_ compression is able to compress
|
||||||
several gigabytes of data per second.
|
several gigabytes of data per second.
|
||||||
|
|
||||||
:Encryption: Backups can be encrypted on the client-side, using AES-256 in
|
:Encryption: Backups can be encrypted on the client-side, using AES-256 GCM_.
|
||||||
Galois/Counter Mode (GCM_). This authenticated encryption (AE_) mode
|
This authenticated encryption (AE_) mode provides very high performance on
|
||||||
provides very high performance on modern hardware. In addition to client-side
|
modern hardware. In addition to client-side encryption, all data is
|
||||||
encryption, all data is transferred via a secure TLS connection.
|
transferred via a secure TLS connection.
|
||||||
|
|
||||||
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
||||||
user interface.
|
user interface.
|
||||||
@ -76,8 +76,16 @@ Main Features
|
|||||||
:Open Source: No secrets. Proxmox Backup Server is free and open-source
|
:Open Source: No secrets. Proxmox Backup Server is free and open-source
|
||||||
software. The source code is licensed under AGPL, v3.
|
software. The source code is licensed under AGPL, v3.
|
||||||
|
|
||||||
:Support: Enterprise support will be available from `Proxmox`_ once the beta
|
:No Limits: Proxmox Backup Server has no artificial limits for backup storage or
|
||||||
phase is over.
|
backup-clients.
|
||||||
|
|
||||||
|
:Enterprise Support: Proxmox Server Solutions GmbH offers enterprise support in
|
||||||
|
form of `Proxmox Backup Server Subscription Plans
|
||||||
|
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_. Users at every
|
||||||
|
subscription level get access to the Proxmox Backup :ref:`Enterprise
|
||||||
|
Repository <sysadmin_package_repos_enterprise>`. In addition, with a Basic,
|
||||||
|
Standard or Premium subscription, users have access to the :ref:`Proxmox
|
||||||
|
Customer Portal <get_help_enterprise_support>`.
|
||||||
|
|
||||||
|
|
||||||
Reasons for Data Backup?
|
Reasons for Data Backup?
|
||||||
@ -117,8 +125,8 @@ Proxmox Backup Server consists of multiple components:
|
|||||||
* A client CLI tool (`proxmox-backup-client`) to access the server easily from
|
* A client CLI tool (`proxmox-backup-client`) to access the server easily from
|
||||||
any `Linux amd64` environment
|
any `Linux amd64` environment
|
||||||
|
|
||||||
Aside from the web interface, everything is written in the Rust programming
|
Aside from the web interface, most parts of Proxmox Backup Server are written in
|
||||||
language.
|
the Rust programming language.
|
||||||
|
|
||||||
"The Rust programming language helps you write faster, more reliable software.
|
"The Rust programming language helps you write faster, more reliable software.
|
||||||
High-level ergonomics and low-level control are often at odds in programming
|
High-level ergonomics and low-level control are often at odds in programming
|
||||||
@ -134,6 +142,17 @@ language.
|
|||||||
Getting Help
|
Getting Help
|
||||||
------------
|
------------
|
||||||
|
|
||||||
|
.. _get_help_enterprise_support:
|
||||||
|
|
||||||
|
Enterprise Support
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Users with a `Proxmox Backup Server Basic, Standard or Premium Subscription Plan
|
||||||
|
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_ have access to the
|
||||||
|
`Proxmox Customer Portal <https://my.proxmox.com>`_. The customer portal
|
||||||
|
provides support with guaranteed response times from the Proxmox developers.
|
||||||
|
For more information or for volume discounts, please contact office@proxmox.com.
|
||||||
|
|
||||||
Community Support Forum
|
Community Support Forum
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
@ -148,7 +148,7 @@ are checked again. The interface for creating verify jobs can be found under the
|
|||||||
**Verify Jobs** tab of the datastore.
|
**Verify Jobs** tab of the datastore.
|
||||||
|
|
||||||
.. Note:: It is recommended that you reverify all backups at least monthly, even
|
.. Note:: It is recommended that you reverify all backups at least monthly, even
|
||||||
if a previous verification was successful. This is becuase physical drives
|
if a previous verification was successful. This is because physical drives
|
||||||
are susceptible to damage over time, which can cause an old, working backup
|
are susceptible to damage over time, which can cause an old, working backup
|
||||||
to become corrupted in a process known as `bit rot/data degradation
|
to become corrupted in a process known as `bit rot/data degradation
|
||||||
<https://en.wikipedia.org/wiki/Data_degradation>`_. It is good practice to
|
<https://en.wikipedia.org/wiki/Data_degradation>`_. It is good practice to
|
||||||
|
@ -29,6 +29,8 @@ update``.
|
|||||||
In addition, you need a package repository from Proxmox to get Proxmox Backup
|
In addition, you need a package repository from Proxmox to get Proxmox Backup
|
||||||
updates.
|
updates.
|
||||||
|
|
||||||
|
.. _package_repos_secure_apt:
|
||||||
|
|
||||||
SecureApt
|
SecureApt
|
||||||
~~~~~~~~~
|
~~~~~~~~~
|
||||||
|
|
||||||
@ -69,10 +71,12 @@ Here, the output should be:
|
|||||||
|
|
||||||
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||||
|
|
||||||
|
.. _sysadmin_package_repos_enterprise:
|
||||||
|
|
||||||
`Proxmox Backup`_ Enterprise Repository
|
`Proxmox Backup`_ Enterprise Repository
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
This will be the default, stable, and recommended repository. It is available for
|
This is the stable, recommended repository. It is available for
|
||||||
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
||||||
and is suitable for production use. The ``pbs-enterprise`` repository is
|
and is suitable for production use. The ``pbs-enterprise`` repository is
|
||||||
enabled by default:
|
enabled by default:
|
||||||
@ -137,3 +141,56 @@ You can access this repository by adding the following line to
|
|||||||
:caption: sources.list entry for ``pbstest``
|
:caption: sources.list entry for ``pbstest``
|
||||||
|
|
||||||
deb http://download.proxmox.com/debian/pbs buster pbstest
|
deb http://download.proxmox.com/debian/pbs buster pbstest
|
||||||
|
|
||||||
|
.. _package_repositories_client_only:
|
||||||
|
|
||||||
|
Proxmox Backup Client-only Repository
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If you want to :ref:`use the the Proxmox Backup Client <client_creating_backups>`
|
||||||
|
on systems using a Linux distribution not based on Proxmox projects, you can
|
||||||
|
use the client-only repository.
|
||||||
|
|
||||||
|
Currently there's only a client-repository for APT based systems.
|
||||||
|
|
||||||
|
.. _package_repositories_client_only_apt:
|
||||||
|
|
||||||
|
APT-based Proxmox Backup Client Repository
|
||||||
|
++++++++++++++++++++++++++++++++++++++++++
|
||||||
|
|
||||||
|
For modern Linux distributions using `apt` as package manager, like all Debian
|
||||||
|
and Ubuntu Derivative do, you may be able to use the APT-based repository.
|
||||||
|
|
||||||
|
This repository is tested with:
|
||||||
|
|
||||||
|
- Debian Buster
|
||||||
|
- Ubuntu 20.04 LTS
|
||||||
|
|
||||||
|
It may work with older, and should work with more recent released versions.
|
||||||
|
|
||||||
|
In order to configure this repository you need to first :ref:`setup the Proxmox
|
||||||
|
release key <package_repos_secure_apt>`. After that, add the repository URL to
|
||||||
|
the APT sources lists.
|
||||||
|
Edit the file ``/etc/apt/sources.list.d/pbs-client.list`` and add the following
|
||||||
|
snipped
|
||||||
|
|
||||||
|
.. code-block:: sources.list
|
||||||
|
:caption: File: ``/etc/apt/sources.list``
|
||||||
|
|
||||||
|
deb http://download.proxmox.com/debian/pbs-client buster main
|
||||||
|
|
||||||
|
.. _node_options_http_proxy:
|
||||||
|
|
||||||
|
Repository Access Behind HTTP Proxy
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Some setups have restricted access to the internet, sometimes only through a
|
||||||
|
central proxy. You can setup a HTTP proxy through the Proxmox Backup Server's
|
||||||
|
web-interface in the `Configuration -> Authentication` tab.
|
||||||
|
|
||||||
|
Once configured this proxy will be used for apt network requests and for
|
||||||
|
checking a Proxmox Backup Server support subscription.
|
||||||
|
|
||||||
|
Standard HTTP proxy configurations are accepted, `[http://]<host>[:port]` where
|
||||||
|
the `<host>` part may include an authorization, for example:
|
||||||
|
`http://user:pass@proxy.example.org:12345`
|
||||||
|
@ -13,39 +13,3 @@ parameter. It accepts the following values:
|
|||||||
:``json``: JSON (single line).
|
:``json``: JSON (single line).
|
||||||
|
|
||||||
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
||||||
|
|
||||||
|
|
||||||
Device driver options can be specified as integer numbers (see
|
|
||||||
``/usr/include/linux/mtio.h``), or using symbolic names:
|
|
||||||
|
|
||||||
:``buffer-writes``: Enable buffered writes
|
|
||||||
|
|
||||||
:``async-writes``: Enable async writes
|
|
||||||
|
|
||||||
:``read-ahead``: Use read-ahead for fixed block size
|
|
||||||
|
|
||||||
:``debugging``: Enable debugging if compiled into the driver
|
|
||||||
|
|
||||||
:``two-fm``: Write two file marks when closing the file
|
|
||||||
|
|
||||||
:``fast-mteom``: Space directly to eod (and lose file number)
|
|
||||||
|
|
||||||
:``auto-lock``: Automatically lock/unlock drive door
|
|
||||||
|
|
||||||
:``def-writes``: Defaults are meant only for writes
|
|
||||||
|
|
||||||
:``can-bsr``: Indicates that the drive can space backwards
|
|
||||||
|
|
||||||
:``no-blklims``: Drive does not support read block limits
|
|
||||||
|
|
||||||
:``can-partitions``: Drive can handle partitioned tapes
|
|
||||||
|
|
||||||
:``scsi2locical``: Seek and tell use SCSI-2 logical block addresses
|
|
||||||
|
|
||||||
:``sysv``: Enable the System V semantics
|
|
||||||
|
|
||||||
:``nowait``: Do not wait for rewind, etc. to complete
|
|
||||||
|
|
||||||
:``sili``: Enables setting the SILI bit in SCSI commands when reading
|
|
||||||
in variable block mode to enhance performance when reading blocks
|
|
||||||
shorter than the byte count
|
|
||||||
|
3
docs/proxmox-file-restore/description.rst
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
Command line tool for restoring files and directories from PBS archives. In contrast to
|
||||||
|
proxmox-backup-client, this supports both container/host and VM backups.
|
||||||
|
|
28
docs/proxmox-file-restore/man1.rst
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
==========================
|
||||||
|
proxmox-file-restore
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../epilog.rst
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------
|
||||||
|
Command line tool for restoring files and directories from PBS archives
|
||||||
|
-----------------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 1
|
||||||
|
|
||||||
|
|
||||||
|
Synopsis
|
||||||
|
==========
|
||||||
|
|
||||||
|
.. include:: synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
|
Description
|
||||||
|
============
|
||||||
|
|
||||||
|
.. include:: description.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../pbs-copyright.rst
|
@ -3,6 +3,26 @@
|
|||||||
`Proxmox VE`_ Integration
|
`Proxmox VE`_ Integration
|
||||||
-------------------------
|
-------------------------
|
||||||
|
|
||||||
|
A Proxmox Backup Server can be integrated into a Proxmox VE setup by adding the
|
||||||
|
former as a storage in a Proxmox VE standalone or cluster setup.
|
||||||
|
|
||||||
|
See also the `Proxmox VE Storage - Proxmox Backup Server
|
||||||
|
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#storage_pbs>`_ section
|
||||||
|
of the Proxmox VE Administration Guide for Proxmox VE specific documentation.
|
||||||
|
|
||||||
|
|
||||||
|
Using the Proxmox VE Web-Interface
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Proxmox VE has native API and web-interface integration of Proxmox Backup
|
||||||
|
Server since the `Proxmox VE 6.3 release
|
||||||
|
<https://pve.proxmox.com/wiki/Roadmap#Proxmox_VE_6.3>`_.
|
||||||
|
|
||||||
|
A Proxmox Backup Server can be added under ``Datacenter -> Storage``.
|
||||||
|
|
||||||
|
Using the Proxmox VE Command-Line
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
|
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
|
||||||
node. The following example uses ``store2`` as storage name, and
|
node. The following example uses ``store2`` as storage name, and
|
||||||
assumes the server address is ``localhost``, and you want to connect
|
assumes the server address is ``localhost``, and you want to connect
|
||||||
@ -41,9 +61,9 @@ After that you should be able to see storage status with:
|
|||||||
Name Type Status Total Used Available %
|
Name Type Status Total Used Available %
|
||||||
store2 pbs active 3905109820 1336687816 2568422004 34.23%
|
store2 pbs active 3905109820 1336687816 2568422004 34.23%
|
||||||
|
|
||||||
Having added the PBS datastore to `Proxmox VE`_, you can backup VMs and
|
Having added the Proxmox Backup Server datastore to `Proxmox VE`_, you can
|
||||||
containers in the same way you would for any other storage device within the
|
backup VMs and containers in the same way you would for any other storage
|
||||||
environment (see `PVE Admin Guide: Backup and Restore
|
device within the environment (see `Proxmox VE Admin Guide: Backup and Restore
|
||||||
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_vzdump>`_.
|
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_vzdump>`_.
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
Storage
|
Backup Storage
|
||||||
=======
|
==============
|
||||||
|
|
||||||
.. _storage_disk_management:
|
.. _storage_disk_management:
|
||||||
|
|
||||||
|
@ -4,12 +4,11 @@ Tape Backup
|
|||||||
===========
|
===========
|
||||||
|
|
||||||
.. CAUTION:: Tape Backup is a technical preview feature, not meant for
|
.. CAUTION:: Tape Backup is a technical preview feature, not meant for
|
||||||
production use. To enable it in the GUI, you need to issue the
|
production use.
|
||||||
following command (as root user on the console):
|
|
||||||
|
|
||||||
.. code-block:: console
|
.. image:: images/screenshots/pbs-gui-tape-changer-overview.png
|
||||||
|
:align: right
|
||||||
# touch /etc/proxmox-backup/tape.cfg
|
:alt: Tape Backup: Tape changer overview
|
||||||
|
|
||||||
Proxmox tape backup provides an easy way to store datastore content
|
Proxmox tape backup provides an easy way to store datastore content
|
||||||
onto magnetic tapes. This increases data safety because you get:
|
onto magnetic tapes. This increases data safety because you get:
|
||||||
@ -59,7 +58,7 @@ In general, LTO tapes offer the following advantages:
|
|||||||
- Cold Media
|
- Cold Media
|
||||||
- Movable (storable inside vault)
|
- Movable (storable inside vault)
|
||||||
- Multiple vendors (for both media and drives)
|
- Multiple vendors (for both media and drives)
|
||||||
- Build in AES-GCM Encryption engine
|
- Built in AES-GCM Encryption engine
|
||||||
|
|
||||||
Note that `Proxmox Backup Server` already stores compressed data, so using the
|
Note that `Proxmox Backup Server` already stores compressed data, so using the
|
||||||
tape compression feature has no advantage.
|
tape compression feature has no advantage.
|
||||||
@ -68,13 +67,19 @@ tape compression feature has no advantage.
|
|||||||
Supported Hardware
|
Supported Hardware
|
||||||
------------------
|
------------------
|
||||||
|
|
||||||
Proxmox Backup Server supports `Linear Tape-Open`_ generation 4 (LTO-4)
|
Proxmox Backup Server supports `Linear Tape-Open`_ generation 5 (LTO-5)
|
||||||
or later. In general, all SCSI-2 tape drives supported by the Linux
|
or later and has best-effort support for generation 4 (LTO-4). While
|
||||||
kernel should work, but features like hardware encryption need LTO-4
|
many LTO-4 systems are known to work, some might need firmware updates or
|
||||||
or later.
|
do not implement necessary features to work with Proxmox Backup Server.
|
||||||
|
|
||||||
Tape changing is carried out using the Linux 'mtx' command line
|
Tape changing is carried out using the SCSI Medium Changer protocol,
|
||||||
tool, so any changer device supported by this tool should work.
|
so all modern tape libraries should work.
|
||||||
|
|
||||||
|
.. Note:: We use a custom user space tape driver written in Rust_. This
|
||||||
|
driver directly communicates with the tape drive using the SCSI
|
||||||
|
generic interface. This may have negative side effects when used with the old
|
||||||
|
Linux kernel tape driver, so you should not use that driver with
|
||||||
|
Proxmox tape backup.
|
||||||
|
|
||||||
|
|
||||||
Drive Performance
|
Drive Performance
|
||||||
@ -84,7 +89,7 @@ Current LTO-8 tapes provide read/write speeds of up to 360 MB/s. This means,
|
|||||||
that it still takes a minimum of 9 hours to completely write or
|
that it still takes a minimum of 9 hours to completely write or
|
||||||
read a single tape (even at maximum speed).
|
read a single tape (even at maximum speed).
|
||||||
|
|
||||||
The only way to speed up that data rate is to use more than one
|
The only way to speed that data rate up is to use more than one
|
||||||
drive. That way, you can run several backup jobs in parallel, or run
|
drive. That way, you can run several backup jobs in parallel, or run
|
||||||
restore jobs while the other dives are used for backups.
|
restore jobs while the other dives are used for backups.
|
||||||
|
|
||||||
@ -93,15 +98,16 @@ Also consider that you first need to read data from your datastore
|
|||||||
rate. We measured a maximum rate of about 60MB/s to 100MB/s in practice,
|
rate. We measured a maximum rate of about 60MB/s to 100MB/s in practice,
|
||||||
so it takes 33 hours to read the 12TB needed to fill up an LTO-8 tape. If you want
|
so it takes 33 hours to read the 12TB needed to fill up an LTO-8 tape. If you want
|
||||||
to write to your tape at full speed, please make sure that the source
|
to write to your tape at full speed, please make sure that the source
|
||||||
datastore is able to deliver that performance (e.g, by using SSDs).
|
datastore is able to deliver that performance (for example, by using SSDs).
|
||||||
|
|
||||||
|
|
||||||
Terminology
|
Terminology
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
:Tape Labels: are used to uniquely identify a tape. You would normally apply a
|
**Tape Labels:**
|
||||||
sticky paper label to the front of the cartridge. We additionally store the
|
are used to uniquely identify a tape. You would normally apply a
|
||||||
label text magnetically on the tape (first file on tape).
|
sticky paper label to the front of the cartridge. We additionally
|
||||||
|
store the label text magnetically on the tape (first file on tape).
|
||||||
|
|
||||||
.. _Code 39: https://en.wikipedia.org/wiki/Code_39
|
.. _Code 39: https://en.wikipedia.org/wiki/Code_39
|
||||||
|
|
||||||
@ -109,7 +115,8 @@ Terminology
|
|||||||
|
|
||||||
.. _LTO Barcode Generator: lto-barcode/index.html
|
.. _LTO Barcode Generator: lto-barcode/index.html
|
||||||
|
|
||||||
:Barcodes: are a special form of tape labels, which are electronically
|
**Barcodes:**
|
||||||
|
are a special form of tape labels, which are electronically
|
||||||
readable. Most LTO tape robots use an 8 character string encoded as
|
readable. Most LTO tape robots use an 8 character string encoded as
|
||||||
`Code 39`_, as defined in the `LTO Ultrium Cartridge Label
|
`Code 39`_, as defined in the `LTO Ultrium Cartridge Label
|
||||||
Specification`_.
|
Specification`_.
|
||||||
@ -118,42 +125,49 @@ Terminology
|
|||||||
or print them yourself. You can use our `LTO Barcode Generator`_
|
or print them yourself. You can use our `LTO Barcode Generator`_
|
||||||
app, if you would like to print them yourself.
|
app, if you would like to print them yourself.
|
||||||
|
|
||||||
.. Note:: Physical labels and the associated adhesive should have an
|
.. Note:: Physical labels and the associated adhesive should have an
|
||||||
environmental performance to match or exceed the environmental
|
environmental performance to match or exceed the environmental
|
||||||
specifications of the cartridge to which it is applied.
|
specifications of the cartridge to which it is applied.
|
||||||
|
|
||||||
:Media Pools: A media pool is a logical container for tapes. A backup
|
**Media Pools:**
|
||||||
job targets one media pool, so a job only uses tapes from that
|
A media pool is a logical container for tapes. A backup job targets
|
||||||
pool. The pool additionally defines how long a backup job can
|
one media pool, so a job only uses tapes from that pool. The pool
|
||||||
append data to tapes (allocation policy) and how long you want to
|
additionally defines how long a backup job can append data to tapes
|
||||||
keep the data (retention policy).
|
(allocation policy) and how long you want to keep the data
|
||||||
|
(retention policy).
|
||||||
|
|
||||||
:Media Set: A group of continuously written tapes (all from the same
|
**Media Set:**
|
||||||
media pool).
|
A group of continuously written tapes (all from the same media pool).
|
||||||
|
|
||||||
:Tape drive: The device used to read and write data to the tape. There
|
**Tape drive:**
|
||||||
are standalone drives, but drives are usually shipped within tape libraries.
|
The device used to read and write data to the tape. There are
|
||||||
|
standalone drives, but drives are usually shipped within tape
|
||||||
|
libraries.
|
||||||
|
|
||||||
:Tape changer: A device which can change the tapes inside a tape drive
|
**Tape changer:**
|
||||||
(tape robot). They are usually part of a tape library.
|
A device which can change the tapes inside a tape drive (tape
|
||||||
|
robot). They are usually part of a tape library.
|
||||||
|
|
||||||
.. _Tape Library: https://en.wikipedia.org/wiki/Tape_library
|
.. _Tape Library: https://en.wikipedia.org/wiki/Tape_library
|
||||||
|
|
||||||
:`Tape library`_: A storage device that contains one or more tape drives,
|
`Tape library`_:
|
||||||
a number of slots to hold tape cartridges, a barcode reader to
|
A storage device that contains one or more tape drives, a number of
|
||||||
identify tape cartridges, and an automated method for loading tapes
|
slots to hold tape cartridges, a barcode reader to identify tape
|
||||||
(a robot).
|
cartridges, and an automated method for loading tapes (a robot).
|
||||||
|
|
||||||
This is also commonly known as an 'autoloader', 'tape robot' or 'tape jukebox'.
|
This is also commonly known as an 'autoloader', 'tape robot' or
|
||||||
|
'tape jukebox'.
|
||||||
|
|
||||||
:Inventory: The inventory stores the list of known tapes (with
|
**Inventory:**
|
||||||
additional status information).
|
The inventory stores the list of known tapes (with additional status
|
||||||
|
information).
|
||||||
|
|
||||||
:Catalog: A media catalog stores information about the media content.
|
**Catalog:**
|
||||||
|
A media catalog stores information about the media content.
|
||||||
|
|
||||||
|
|
||||||
Tape Quick Start
|
Tape Quick Start
|
||||||
---------------
|
----------------
|
||||||
|
|
||||||
1. Configure your tape hardware (drives and changers)
|
1. Configure your tape hardware (drives and changers)
|
||||||
|
|
||||||
@ -176,8 +190,15 @@ same configuration.
|
|||||||
Tape changers
|
Tape changers
|
||||||
~~~~~~~~~~~~~
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
Tape changers (robots) are part of a `Tape Library`_. You can skip
|
.. image:: images/screenshots/pbs-gui-tape-changers.png
|
||||||
this step if you are using a standalone drive.
|
:align: right
|
||||||
|
:alt: Tape Backup: Tape Changers
|
||||||
|
|
||||||
|
Tape changers (robots) are part of a `Tape Library`_. They contain a number of
|
||||||
|
slots to hold tape cartridges, a barcode reader to identify tape cartridges and
|
||||||
|
an automated method for loading tapes.
|
||||||
|
|
||||||
|
You can skip this step if you are using a standalone drive.
|
||||||
|
|
||||||
Linux is able to auto detect these devices, and you can get a list
|
Linux is able to auto detect these devices, and you can get a list
|
||||||
of available devices using:
|
of available devices using:
|
||||||
@ -204,6 +225,13 @@ Where ``sl3`` is an arbitrary name you can choose.
|
|||||||
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
|
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
|
||||||
different device after reboot, and that is not what you want.
|
different device after reboot, and that is not what you want.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-changers-add.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Add a new tape changer
|
||||||
|
|
||||||
|
This operation can also be carried out from the GUI, by navigating to the
|
||||||
|
**Changers** tab of **Tape Backup** and clicking **Add**.
|
||||||
|
|
||||||
You can display the final configuration with:
|
You can display the final configuration with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -217,7 +245,8 @@ You can display the final configuration with:
|
|||||||
│ path │ /dev/tape/by-id/scsi-CC2C52 │
|
│ path │ /dev/tape/by-id/scsi-CC2C52 │
|
||||||
└──────┴─────────────────────────────┘
|
└──────┴─────────────────────────────┘
|
||||||
|
|
||||||
Or simply list all configured changer devices:
|
Or simply list all configured changer devices (as seen in the **Changers** tab
|
||||||
|
of the GUI):
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -228,7 +257,7 @@ Or simply list all configured changer devices:
|
|||||||
│ sl3 │ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
│ sl3 │ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
||||||
└──────┴─────────────────────────────┴─────────┴──────────────┴────────────┘
|
└──────┴─────────────────────────────┴─────────┴──────────────┴────────────┘
|
||||||
|
|
||||||
The Vendor, Model and Serial number are auto detected, but only shown
|
The Vendor, Model and Serial number are auto-detected, but only shown
|
||||||
if the device is online.
|
if the device is online.
|
||||||
|
|
||||||
To test your setup, please query the status of the changer device with:
|
To test your setup, please query the status of the changer device with:
|
||||||
@ -261,12 +290,12 @@ It's worth noting that some of the smaller tape libraries don't have
|
|||||||
such slots. While they have something called a "Mail Slot", that slot
|
such slots. While they have something called a "Mail Slot", that slot
|
||||||
is just a way to grab the tape from the gripper. They are unable
|
is just a way to grab the tape from the gripper. They are unable
|
||||||
to hold media while the robot does other things. They also do not
|
to hold media while the robot does other things. They also do not
|
||||||
expose that "Mail Slot" over the SCSI interface, so you wont see them in
|
expose that "Mail Slot" over the SCSI interface, so you won't see them in
|
||||||
the status output.
|
the status output.
|
||||||
|
|
||||||
As a workaround, you can mark some of the normal slots as export
|
As a workaround, you can mark some of the normal slots as export
|
||||||
slot. The software treats those slots like real ``import-export``
|
slot. The software treats those slots like real ``import-export``
|
||||||
slots, and the media inside those slots is considered to be 'offline'
|
slots, and the media inside those slots are considered to be 'offline'
|
||||||
(not available for backup):
|
(not available for backup):
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -302,6 +331,10 @@ the status output:
|
|||||||
Tape drives
|
Tape drives
|
||||||
~~~~~~~~~~~
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-drives.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Drive list
|
||||||
|
|
||||||
Linux is able to auto detect tape drives, and you can get a list
|
Linux is able to auto detect tape drives, and you can get a list
|
||||||
of available tape drives using:
|
of available tape drives using:
|
||||||
|
|
||||||
@ -311,18 +344,23 @@ of available tape drives using:
|
|||||||
┌────────────────────────────────┬────────┬─────────────┬────────┐
|
┌────────────────────────────────┬────────┬─────────────┬────────┐
|
||||||
│ path │ vendor │ model │ serial │
|
│ path │ vendor │ model │ serial │
|
||||||
╞════════════════════════════════╪════════╪═════════════╪════════╡
|
╞════════════════════════════════╪════════╪═════════════╪════════╡
|
||||||
│ /dev/tape/by-id/scsi-12345-nst │ IBM │ ULT3580-TD4 │ 12345 │
|
│ /dev/tape/by-id/scsi-12345-sg │ IBM │ ULT3580-TD4 │ 12345 │
|
||||||
└────────────────────────────────┴────────┴─────────────┴────────┘
|
└────────────────────────────────┴────────┴─────────────┴────────┘
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-drives-add.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Add a tape drive
|
||||||
|
|
||||||
In order to use that drive with Proxmox, you need to create a
|
In order to use that drive with Proxmox, you need to create a
|
||||||
configuration entry:
|
configuration entry. This can be done through **Tape Backup -> Drives** in the
|
||||||
|
GUI or by using the command below:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape drive create mydrive --path /dev/tape/by-id/scsi-12345-nst
|
# proxmox-tape drive create mydrive --path /dev/tape/by-id/scsi-12345-sg
|
||||||
|
|
||||||
.. Note:: Please use the persistent device path names from inside
|
.. Note:: Please use the persistent device path names from inside
|
||||||
``/dev/tape/by-id/``. Names like ``/dev/nst0`` may point to a
|
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
|
||||||
different device after reboot, and that is not what you want.
|
different device after reboot, and that is not what you want.
|
||||||
|
|
||||||
If you have a tape library, you also need to set the associated
|
If you have a tape library, you also need to set the associated
|
||||||
@ -346,7 +384,7 @@ You can display the final configuration with:
|
|||||||
╞═════════╪════════════════════════════════╡
|
╞═════════╪════════════════════════════════╡
|
||||||
│ name │ mydrive │
|
│ name │ mydrive │
|
||||||
├─────────┼────────────────────────────────┤
|
├─────────┼────────────────────────────────┤
|
||||||
│ path │ /dev/tape/by-id/scsi-12345-nst │
|
│ path │ /dev/tape/by-id/scsi-12345-sg │
|
||||||
├─────────┼────────────────────────────────┤
|
├─────────┼────────────────────────────────┤
|
||||||
│ changer │ sl3 │
|
│ changer │ sl3 │
|
||||||
└─────────┴────────────────────────────────┘
|
└─────────┴────────────────────────────────┘
|
||||||
@ -362,10 +400,10 @@ To list all configured drives use:
|
|||||||
┌──────────┬────────────────────────────────┬─────────┬────────┬─────────────┬────────┐
|
┌──────────┬────────────────────────────────┬─────────┬────────┬─────────────┬────────┐
|
||||||
│ name │ path │ changer │ vendor │ model │ serial │
|
│ name │ path │ changer │ vendor │ model │ serial │
|
||||||
╞══════════╪════════════════════════════════╪═════════╪════════╪═════════════╪════════╡
|
╞══════════╪════════════════════════════════╪═════════╪════════╪═════════════╪════════╡
|
||||||
│ mydrive │ /dev/tape/by-id/scsi-12345-nst │ sl3 │ IBM │ ULT3580-TD4 │ 12345 │
|
│ mydrive │ /dev/tape/by-id/scsi-12345-sg │ sl3 │ IBM │ ULT3580-TD4 │ 12345 │
|
||||||
└──────────┴────────────────────────────────┴─────────┴────────┴─────────────┴────────┘
|
└──────────┴────────────────────────────────┴─────────┴────────┴─────────────┴────────┘
|
||||||
|
|
||||||
The Vendor, Model and Serial number are auto detected, but only shown
|
The Vendor, Model and Serial number are auto detected and only shown
|
||||||
if the device is online.
|
if the device is online.
|
||||||
|
|
||||||
For testing, you can simply query the drive status with:
|
For testing, you can simply query the drive status with:
|
||||||
@ -373,13 +411,35 @@ For testing, you can simply query the drive status with:
|
|||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape status --drive mydrive
|
# proxmox-tape status --drive mydrive
|
||||||
┌───────────┬────────────────────────┐
|
┌────────────────┬──────────────────────────┐
|
||||||
│ Name │ Value │
|
│ Name │ Value │
|
||||||
╞═══════════╪════════════════════════╡
|
╞════════════════╪══════════════════════════╡
|
||||||
│ blocksize │ 0 │
|
│ blocksize │ 0 │
|
||||||
├───────────┼────────────────────────┤
|
├────────────────┼──────────────────────────┤
|
||||||
│ status │ DRIVE_OPEN | IM_REP_EN │
|
│ density │ LTO4 │
|
||||||
└───────────┴────────────────────────┘
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ compression │ 1 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ buffer-mode │ 1 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ alert-flags │ (empty) │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ file-number │ 0 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ block-number │ 0 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ manufactured │ Fri Dec 13 01:00:00 2019 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ bytes-written │ 501.80 GiB │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ bytes-read │ 4.00 MiB │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ medium-passes │ 20 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ medium-wearout │ 0.12% │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ volume-mounts │ 2 │
|
||||||
|
└────────────────┴──────────────────────────┘
|
||||||
|
|
||||||
.. NOTE:: Blocksize should always be 0 (variable block size
|
.. NOTE:: Blocksize should always be 0 (variable block size
|
||||||
mode). This is the default anyway.
|
mode). This is the default anyway.
|
||||||
@ -390,8 +450,12 @@ For testing, you can simply query the drive status with:
|
|||||||
Media Pools
|
Media Pools
|
||||||
~~~~~~~~~~~
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-pools.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Media Pools
|
||||||
|
|
||||||
A media pool is a logical container for tapes. A backup job targets
|
A media pool is a logical container for tapes. A backup job targets
|
||||||
one media pool, so a job only uses tapes from that pool.
|
a single media pool, so a job only uses tapes from that pool.
|
||||||
|
|
||||||
.. topic:: Media Set
|
.. topic:: Media Set
|
||||||
|
|
||||||
@ -411,7 +475,7 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
The pool additionally defines how long backup jobs can append data
|
The pool additionally defines how long backup jobs can append data
|
||||||
to a media set. The following settings are possible:
|
to a media set. The following settings are possible:
|
||||||
|
|
||||||
- Try to use the current media set.
|
- Try to use the current media set (``continue``).
|
||||||
|
|
||||||
This setting produces one large media set. While this is very
|
This setting produces one large media set. While this is very
|
||||||
space efficient (deduplication, no unused space), it can lead to
|
space efficient (deduplication, no unused space), it can lead to
|
||||||
@ -433,7 +497,7 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
.. NOTE:: Retention period starts with the existence of a newer
|
.. NOTE:: Retention period starts with the existence of a newer
|
||||||
media set.
|
media set.
|
||||||
|
|
||||||
- Always create a new media set.
|
- Always create a new media set (``always``).
|
||||||
|
|
||||||
With this setting, each backup job creates a new media set. This
|
With this setting, each backup job creates a new media set. This
|
||||||
is less space efficient, because the media from the last set
|
is less space efficient, because the media from the last set
|
||||||
@ -510,8 +574,12 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
if the sources are from different namespaces with conflicting names
|
if the sources are from different namespaces with conflicting names
|
||||||
(for example, if the sources are from different Proxmox VE clusters).
|
(for example, if the sources are from different Proxmox VE clusters).
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-pools-add.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Add a media pool
|
||||||
|
|
||||||
The following command creates a new media pool:
|
To create a new media pool, add one from **Tape Backup -> Media Pools** in the
|
||||||
|
GUI, or enter the following command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -520,7 +588,7 @@ The following command creates a new media pool:
|
|||||||
# proxmox-tape pool create daily --drive mydrive
|
# proxmox-tape pool create daily --drive mydrive
|
||||||
|
|
||||||
|
|
||||||
Additional option can be set later, using the update command:
|
Additional options can be set later, using the update command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -543,6 +611,10 @@ To list all configured pools use:
|
|||||||
Tape Backup Jobs
|
Tape Backup Jobs
|
||||||
~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-backup-jobs.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Tape Backup Jobs
|
||||||
|
|
||||||
To automate tape backup, you can configure tape backup jobs which
|
To automate tape backup, you can configure tape backup jobs which
|
||||||
write datastore content to a media pool, based on a specific time schedule.
|
write datastore content to a media pool, based on a specific time schedule.
|
||||||
The required settings are:
|
The required settings are:
|
||||||
@ -618,6 +690,14 @@ To remove a job, please use:
|
|||||||
|
|
||||||
# proxmox-tape backup-job remove job2
|
# proxmox-tape backup-job remove job2
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-backup-jobs-add.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Add a backup job
|
||||||
|
|
||||||
|
This same functionality also exists in the GUI, under the **Backup Jobs** tab of
|
||||||
|
**Tape Backup**, where *Local Datastore* relates to the datastore you want to
|
||||||
|
backup and *Media Pool* is the pool to back up to.
|
||||||
|
|
||||||
|
|
||||||
Administration
|
Administration
|
||||||
--------------
|
--------------
|
||||||
@ -633,7 +713,7 @@ variable:
|
|||||||
|
|
||||||
You can then omit the ``--drive`` parameter from the command. If the
|
You can then omit the ``--drive`` parameter from the command. If the
|
||||||
drive has an associated changer device, you may also omit the changer
|
drive has an associated changer device, you may also omit the changer
|
||||||
parameter from commands that needs a changer device, for example:
|
parameter from commands that need a changer device, for example:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -707,7 +787,7 @@ can then label all unlabeled tapes with a single command:
|
|||||||
Run Tape Backups
|
Run Tape Backups
|
||||||
~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
To manually run a backup job use:
|
To manually run a backup job click *Run Now* in the GUI or use the command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -772,7 +852,14 @@ Restore Catalog
|
|||||||
Encryption Key Management
|
Encryption Key Management
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Creating a new encryption key:
|
.. image:: images/screenshots/pbs-gui-tape-crypt-keys.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Encryption Keys
|
||||||
|
|
||||||
|
Proxmox Backup Server also provides an interface for handling encryption keys on
|
||||||
|
the backup server. Encryption keys can be managed from the **Tape Backup ->
|
||||||
|
Encryption Keys** section of the GUI or through the ``proxmox-tape key`` command
|
||||||
|
line tool. To create a new encryption key from the command line:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -883,78 +970,3 @@ This command does the following:
|
|||||||
- run drive cleaning operation
|
- run drive cleaning operation
|
||||||
|
|
||||||
- unload the cleaning tape (to slot 3)
|
- unload the cleaning tape (to slot 3)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Configuration Files
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
``media-pool.cfg``
|
|
||||||
~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
File Format
|
|
||||||
^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/media-pool/format.rst
|
|
||||||
|
|
||||||
|
|
||||||
Options
|
|
||||||
^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/media-pool/config.rst
|
|
||||||
|
|
||||||
|
|
||||||
``tape.cfg``
|
|
||||||
~~~~~~~~~~~~
|
|
||||||
|
|
||||||
File Format
|
|
||||||
^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/tape/format.rst
|
|
||||||
|
|
||||||
|
|
||||||
Options
|
|
||||||
^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/tape/config.rst
|
|
||||||
|
|
||||||
|
|
||||||
``tape-job.cfg``
|
|
||||||
~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
File Format
|
|
||||||
^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/tape-job/format.rst
|
|
||||||
|
|
||||||
|
|
||||||
Options
|
|
||||||
^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/tape-job/config.rst
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Command Syntax
|
|
||||||
--------------
|
|
||||||
|
|
||||||
``proxmox-tape``
|
|
||||||
----------------
|
|
||||||
|
|
||||||
.. include:: proxmox-tape/synopsis.rst
|
|
||||||
|
|
||||||
|
|
||||||
``pmt``
|
|
||||||
-------
|
|
||||||
|
|
||||||
.. include:: pmt/options.rst
|
|
||||||
|
|
||||||
....
|
|
||||||
|
|
||||||
.. include:: pmt/synopsis.rst
|
|
||||||
|
|
||||||
|
|
||||||
``pmtx``
|
|
||||||
--------
|
|
||||||
|
|
||||||
.. include:: pmtx/synopsis.rst
|
|
||||||
|
@ -100,7 +100,7 @@ can be encrypted, and they are handled in a slightly different manner than
|
|||||||
normal chunks.
|
normal chunks.
|
||||||
|
|
||||||
The hashes of encrypted chunks are calculated not with the actual (encrypted)
|
The hashes of encrypted chunks are calculated not with the actual (encrypted)
|
||||||
chunk content, but with the plaintext content concatenated with the encryption
|
chunk content, but with the plain-text content concatenated with the encryption
|
||||||
key. This way, two chunks of the same data encrypted with different keys
|
key. This way, two chunks of the same data encrypted with different keys
|
||||||
generate two different checksums and no collisions occur for multiple
|
generate two different checksums and no collisions occur for multiple
|
||||||
encryption keys.
|
encryption keys.
|
||||||
@ -138,7 +138,7 @@ will see that the probability of a collision in that scenario is:
|
|||||||
|
|
||||||
For context, in a lottery game of guessing 6 out of 45, the chance to correctly
|
For context, in a lottery game of guessing 6 out of 45, the chance to correctly
|
||||||
guess all 6 numbers is only :math:`1.2277 * 10^{-7}`, that means the chance of
|
guess all 6 numbers is only :math:`1.2277 * 10^{-7}`, that means the chance of
|
||||||
collission is about the same as winning 13 such lotto games *in a row*.
|
a collision is about the same as winning 13 such lotto games *in a row*.
|
||||||
|
|
||||||
In conclusion, it is extremely unlikely that such a collision would occur by
|
In conclusion, it is extremely unlikely that such a collision would occur by
|
||||||
accident in a normal datastore.
|
accident in a normal datastore.
|
||||||
|
@ -360,7 +360,9 @@ WebAuthn
|
|||||||
For WebAuthn to work, you need to have two things:
|
For WebAuthn to work, you need to have two things:
|
||||||
|
|
||||||
* a trusted HTTPS certificate (for example, by using `Let's Encrypt
|
* a trusted HTTPS certificate (for example, by using `Let's Encrypt
|
||||||
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_)
|
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_).
|
||||||
|
While it probably works with an untrusted certificate, some browsers may warn
|
||||||
|
or refuse WebAuthn operations if it is not trusted.
|
||||||
|
|
||||||
* setup the WebAuthn configuration (see *Configuration -> Authentication* in the
|
* setup the WebAuthn configuration (see *Configuration -> Authentication* in the
|
||||||
Proxmox Backup Server web-interface). This can be auto-filled in most setups.
|
Proxmox Backup Server web-interface). This can be auto-filled in most setups.
|
||||||
|
684
src/acme/client.rs
Normal file
@ -0,0 +1,684 @@
|
|||||||
|
//! HTTP Client for the ACME protocol.
|
||||||
|
|
||||||
|
use std::fs::OpenOptions;
|
||||||
|
use std::io;
|
||||||
|
use std::os::unix::fs::OpenOptionsExt;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err};
|
||||||
|
use bytes::Bytes;
|
||||||
|
use hyper::{Body, Request};
|
||||||
|
use nix::sys::stat::Mode;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||||
|
use proxmox_acme_rs::account::AccountCreator;
|
||||||
|
use proxmox_acme_rs::account::AccountData as AcmeAccountData;
|
||||||
|
use proxmox_acme_rs::order::{Order, OrderData};
|
||||||
|
use proxmox_acme_rs::Request as AcmeRequest;
|
||||||
|
use proxmox_acme_rs::{Account, Authorization, Challenge, Directory, Error, ErrorResponse};
|
||||||
|
use proxmox_http::client::SimpleHttp;
|
||||||
|
|
||||||
|
use crate::api2::types::AcmeAccountName;
|
||||||
|
use crate::config::acme::account_path;
|
||||||
|
use crate::tools::pbs_simple_http;
|
||||||
|
|
||||||
|
/// Our on-disk format inherited from PVE's proxmox-acme code.
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct AccountData {
|
||||||
|
/// The account's location URL.
|
||||||
|
location: String,
|
||||||
|
|
||||||
|
/// The account data.
|
||||||
|
account: AcmeAccountData,
|
||||||
|
|
||||||
|
/// The private key as PEM formatted string.
|
||||||
|
key: String,
|
||||||
|
|
||||||
|
/// ToS URL the user agreed to.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
tos: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "is_false", default)]
|
||||||
|
debug: bool,
|
||||||
|
|
||||||
|
/// The directory's URL.
|
||||||
|
directory_url: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn is_false(b: &bool) -> bool {
|
||||||
|
!*b
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct AcmeClient {
|
||||||
|
directory_url: String,
|
||||||
|
debug: bool,
|
||||||
|
account_path: Option<String>,
|
||||||
|
tos: Option<String>,
|
||||||
|
account: Option<Account>,
|
||||||
|
directory: Option<Directory>,
|
||||||
|
nonce: Option<String>,
|
||||||
|
http_client: SimpleHttp,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AcmeClient {
|
||||||
|
/// Create a new ACME client for a given ACME directory URL.
|
||||||
|
pub fn new(directory_url: String) -> Self {
|
||||||
|
Self {
|
||||||
|
directory_url,
|
||||||
|
debug: false,
|
||||||
|
account_path: None,
|
||||||
|
tos: None,
|
||||||
|
account: None,
|
||||||
|
directory: None,
|
||||||
|
nonce: None,
|
||||||
|
http_client: pbs_simple_http(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Load an existing ACME account by name.
|
||||||
|
pub async fn load(account_name: &AcmeAccountName) -> Result<Self, anyhow::Error> {
|
||||||
|
let account_path = account_path(account_name.as_ref());
|
||||||
|
let data = match tokio::fs::read(&account_path).await {
|
||||||
|
Ok(data) => data,
|
||||||
|
Err(err) if err.kind() == io::ErrorKind::NotFound => {
|
||||||
|
bail!("acme account '{}' does not exist", account_name)
|
||||||
|
}
|
||||||
|
Err(err) => bail!(
|
||||||
|
"failed to load acme account from '{}' - {}",
|
||||||
|
account_path,
|
||||||
|
err
|
||||||
|
),
|
||||||
|
};
|
||||||
|
let data: AccountData = serde_json::from_slice(&data).map_err(|err| {
|
||||||
|
format_err!(
|
||||||
|
"failed to parse acme account from '{}' - {}",
|
||||||
|
account_path,
|
||||||
|
err
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let account = Account::from_parts(data.location, data.key, data.account);
|
||||||
|
|
||||||
|
let mut me = Self::new(data.directory_url);
|
||||||
|
me.debug = data.debug;
|
||||||
|
me.account_path = Some(account_path);
|
||||||
|
me.tos = data.tos;
|
||||||
|
me.account = Some(account);
|
||||||
|
|
||||||
|
Ok(me)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn new_account<'a>(
|
||||||
|
&'a mut self,
|
||||||
|
account_name: &AcmeAccountName,
|
||||||
|
tos_agreed: bool,
|
||||||
|
contact: Vec<String>,
|
||||||
|
rsa_bits: Option<u32>,
|
||||||
|
) -> Result<&'a Account, anyhow::Error> {
|
||||||
|
self.tos = if tos_agreed {
|
||||||
|
self.terms_of_service_url().await?.map(str::to_owned)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let account = Account::creator()
|
||||||
|
.set_contacts(contact)
|
||||||
|
.agree_to_tos(tos_agreed);
|
||||||
|
|
||||||
|
let account = if let Some(bits) = rsa_bits {
|
||||||
|
account.generate_rsa_key(bits)?
|
||||||
|
} else {
|
||||||
|
account.generate_ec_key()?
|
||||||
|
};
|
||||||
|
|
||||||
|
let _ = self.register_account(account).await?;
|
||||||
|
|
||||||
|
crate::config::acme::make_acme_account_dir()?;
|
||||||
|
let account_path = account_path(account_name.as_ref());
|
||||||
|
let file = OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.create(true)
|
||||||
|
.mode(0o600)
|
||||||
|
.open(&account_path)
|
||||||
|
.map_err(|err| format_err!("failed to open {:?} for writing: {}", account_path, err))?;
|
||||||
|
self.write_to(file).map_err(|err| {
|
||||||
|
format_err!(
|
||||||
|
"failed to write acme account to {:?}: {}",
|
||||||
|
account_path,
|
||||||
|
err
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
self.account_path = Some(account_path);
|
||||||
|
|
||||||
|
// unwrap: Setting `self.account` is literally this function's job, we just can't keep
|
||||||
|
// the borrow from from `self.register_account()` active due to clashes.
|
||||||
|
Ok(self.account.as_ref().unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn save(&self) -> Result<(), anyhow::Error> {
|
||||||
|
let mut data = Vec::<u8>::new();
|
||||||
|
self.write_to(&mut data)?;
|
||||||
|
let account_path = self.account_path.as_ref().ok_or_else(|| {
|
||||||
|
format_err!("no account path set, cannot save upated account information")
|
||||||
|
})?;
|
||||||
|
crate::config::acme::make_acme_account_dir()?;
|
||||||
|
replace_file(
|
||||||
|
account_path,
|
||||||
|
&data,
|
||||||
|
CreateOptions::new()
|
||||||
|
.perm(Mode::from_bits_truncate(0o600))
|
||||||
|
.owner(nix::unistd::ROOT)
|
||||||
|
.group(nix::unistd::Gid::from_raw(0)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Shortcut to `account().ok_or_else(...).key_authorization()`.
|
||||||
|
pub fn key_authorization(&self, token: &str) -> Result<String, anyhow::Error> {
|
||||||
|
Ok(Self::need_account(&self.account)?.key_authorization(token)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Shortcut to `account().ok_or_else(...).dns_01_txt_value()`.
|
||||||
|
/// the key authorization value.
|
||||||
|
pub fn dns_01_txt_value(&self, token: &str) -> Result<String, anyhow::Error> {
|
||||||
|
Ok(Self::need_account(&self.account)?.dns_01_txt_value(token)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn register_account(
|
||||||
|
&mut self,
|
||||||
|
account: AccountCreator,
|
||||||
|
) -> Result<&Account, anyhow::Error> {
|
||||||
|
let mut retry = retry();
|
||||||
|
let mut response = loop {
|
||||||
|
retry.tick()?;
|
||||||
|
|
||||||
|
let (directory, nonce) = Self::get_dir_nonce(
|
||||||
|
&mut self.http_client,
|
||||||
|
&self.directory_url,
|
||||||
|
&mut self.directory,
|
||||||
|
&mut self.nonce,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
let request = account.request(directory, nonce)?;
|
||||||
|
match self.run_request(request).await {
|
||||||
|
Ok(response) => break response,
|
||||||
|
Err(err) if err.is_bad_nonce() => continue,
|
||||||
|
Err(err) => return Err(err.into()),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let account = account.response(response.location_required()?, &response.body)?;
|
||||||
|
|
||||||
|
self.account = Some(account);
|
||||||
|
Ok(self.account.as_ref().unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update_account<T: Serialize>(
|
||||||
|
&mut self,
|
||||||
|
data: &T,
|
||||||
|
) -> Result<&Account, anyhow::Error> {
|
||||||
|
let account = Self::need_account(&self.account)?;
|
||||||
|
|
||||||
|
let mut retry = retry();
|
||||||
|
let response = loop {
|
||||||
|
retry.tick()?;
|
||||||
|
|
||||||
|
let (_directory, nonce) = Self::get_dir_nonce(
|
||||||
|
&mut self.http_client,
|
||||||
|
&self.directory_url,
|
||||||
|
&mut self.directory,
|
||||||
|
&mut self.nonce,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let request = account.post_request(&account.location, &nonce, data)?;
|
||||||
|
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
||||||
|
Ok(response) => break response,
|
||||||
|
Err(err) if err.is_bad_nonce() => continue,
|
||||||
|
Err(err) => return Err(err.into()),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// unwrap: we've been keeping an immutable reference to it from the top of the method
|
||||||
|
let _ = account;
|
||||||
|
self.account.as_mut().unwrap().data = response.json()?;
|
||||||
|
self.save()?;
|
||||||
|
Ok(self.account.as_ref().unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn new_order<I>(&mut self, domains: I) -> Result<Order, anyhow::Error>
|
||||||
|
where
|
||||||
|
I: IntoIterator<Item = String>,
|
||||||
|
{
|
||||||
|
let account = Self::need_account(&self.account)?;
|
||||||
|
|
||||||
|
let order = domains
|
||||||
|
.into_iter()
|
||||||
|
.fold(OrderData::new(), |order, domain| order.domain(domain));
|
||||||
|
|
||||||
|
let mut retry = retry();
|
||||||
|
loop {
|
||||||
|
retry.tick()?;
|
||||||
|
|
||||||
|
let (directory, nonce) = Self::get_dir_nonce(
|
||||||
|
&mut self.http_client,
|
||||||
|
&self.directory_url,
|
||||||
|
&mut self.directory,
|
||||||
|
&mut self.nonce,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let mut new_order = account.new_order(&order, directory, nonce)?;
|
||||||
|
let mut response = match Self::execute(
|
||||||
|
&mut self.http_client,
|
||||||
|
new_order.request.take().unwrap(),
|
||||||
|
&mut self.nonce,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(response) => response,
|
||||||
|
Err(err) if err.is_bad_nonce() => continue,
|
||||||
|
Err(err) => return Err(err.into()),
|
||||||
|
};
|
||||||
|
|
||||||
|
return Ok(
|
||||||
|
new_order.response(response.location_required()?, response.bytes().as_ref())?
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Low level "POST-as-GET" request.
|
||||||
|
async fn post_as_get(&mut self, url: &str) -> Result<AcmeResponse, anyhow::Error> {
|
||||||
|
let account = Self::need_account(&self.account)?;
|
||||||
|
|
||||||
|
let mut retry = retry();
|
||||||
|
loop {
|
||||||
|
retry.tick()?;
|
||||||
|
|
||||||
|
let (_directory, nonce) = Self::get_dir_nonce(
|
||||||
|
&mut self.http_client,
|
||||||
|
&self.directory_url,
|
||||||
|
&mut self.directory,
|
||||||
|
&mut self.nonce,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let request = account.get_request(url, nonce)?;
|
||||||
|
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
||||||
|
Ok(response) => return Ok(response),
|
||||||
|
Err(err) if err.is_bad_nonce() => continue,
|
||||||
|
Err(err) => return Err(err.into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Low level POST request.
|
||||||
|
async fn post<T: Serialize>(
|
||||||
|
&mut self,
|
||||||
|
url: &str,
|
||||||
|
data: &T,
|
||||||
|
) -> Result<AcmeResponse, anyhow::Error> {
|
||||||
|
let account = Self::need_account(&self.account)?;
|
||||||
|
|
||||||
|
let mut retry = retry();
|
||||||
|
loop {
|
||||||
|
retry.tick()?;
|
||||||
|
|
||||||
|
let (_directory, nonce) = Self::get_dir_nonce(
|
||||||
|
&mut self.http_client,
|
||||||
|
&self.directory_url,
|
||||||
|
&mut self.directory,
|
||||||
|
&mut self.nonce,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let request = account.post_request(url, nonce, data)?;
|
||||||
|
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
||||||
|
Ok(response) => return Ok(response),
|
||||||
|
Err(err) if err.is_bad_nonce() => continue,
|
||||||
|
Err(err) => return Err(err.into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Request challenge validation. Afterwards, the challenge should be polled.
|
||||||
|
pub async fn request_challenge_validation(
|
||||||
|
&mut self,
|
||||||
|
url: &str,
|
||||||
|
) -> Result<Challenge, anyhow::Error> {
|
||||||
|
Ok(self
|
||||||
|
.post(url, &serde_json::Value::Object(Default::default()))
|
||||||
|
.await?
|
||||||
|
.json()?)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Assuming the provided URL is an 'Authorization' URL, get and deserialize it.
|
||||||
|
pub async fn get_authorization(&mut self, url: &str) -> Result<Authorization, anyhow::Error> {
|
||||||
|
Ok(self.post_as_get(url).await?.json()?)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Assuming the provided URL is an 'Order' URL, get and deserialize it.
|
||||||
|
pub async fn get_order(&mut self, url: &str) -> Result<OrderData, anyhow::Error> {
|
||||||
|
Ok(self.post_as_get(url).await?.json()?)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Finalize an Order via its `finalize` URL property and the DER encoded CSR.
|
||||||
|
pub async fn finalize(&mut self, url: &str, csr: &[u8]) -> Result<(), anyhow::Error> {
|
||||||
|
let csr = base64::encode_config(csr, base64::URL_SAFE_NO_PAD);
|
||||||
|
let data = serde_json::json!({ "csr": csr });
|
||||||
|
self.post(url, &data).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Download a certificate via its 'certificate' URL property.
|
||||||
|
///
|
||||||
|
/// The certificate will be a PEM certificate chain.
|
||||||
|
pub async fn get_certificate(&mut self, url: &str) -> Result<Bytes, anyhow::Error> {
|
||||||
|
Ok(self.post_as_get(url).await?.body)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Revoke an existing certificate (PEM or DER formatted).
|
||||||
|
pub async fn revoke_certificate(
|
||||||
|
&mut self,
|
||||||
|
certificate: &[u8],
|
||||||
|
reason: Option<u32>,
|
||||||
|
) -> Result<(), anyhow::Error> {
|
||||||
|
// TODO: This can also work without an account.
|
||||||
|
let account = Self::need_account(&self.account)?;
|
||||||
|
|
||||||
|
let revocation = account.revoke_certificate(certificate, reason)?;
|
||||||
|
|
||||||
|
let mut retry = retry();
|
||||||
|
loop {
|
||||||
|
retry.tick()?;
|
||||||
|
|
||||||
|
let (directory, nonce) = Self::get_dir_nonce(
|
||||||
|
&mut self.http_client,
|
||||||
|
&self.directory_url,
|
||||||
|
&mut self.directory,
|
||||||
|
&mut self.nonce,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let request = revocation.request(&directory, nonce)?;
|
||||||
|
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
||||||
|
Ok(_response) => return Ok(()),
|
||||||
|
Err(err) if err.is_bad_nonce() => continue,
|
||||||
|
Err(err) => return Err(err.into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn need_account(account: &Option<Account>) -> Result<&Account, anyhow::Error> {
|
||||||
|
account
|
||||||
|
.as_ref()
|
||||||
|
.ok_or_else(|| format_err!("cannot use client without an account"))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn account(&self) -> Result<&Account, anyhow::Error> {
|
||||||
|
Self::need_account(&self.account)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn tos(&self) -> Option<&str> {
|
||||||
|
self.tos.as_deref()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn directory_url(&self) -> &str {
|
||||||
|
&self.directory_url
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_account_data(&self) -> Result<AccountData, anyhow::Error> {
|
||||||
|
let account = self.account()?;
|
||||||
|
|
||||||
|
Ok(AccountData {
|
||||||
|
location: account.location.clone(),
|
||||||
|
key: account.private_key.clone(),
|
||||||
|
account: AcmeAccountData {
|
||||||
|
only_return_existing: false, // don't actually write this out in case it's set
|
||||||
|
..account.data.clone()
|
||||||
|
},
|
||||||
|
tos: self.tos.clone(),
|
||||||
|
debug: self.debug,
|
||||||
|
directory_url: self.directory_url.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_to<T: io::Write>(&self, out: T) -> Result<(), anyhow::Error> {
|
||||||
|
let data = self.to_account_data()?;
|
||||||
|
|
||||||
|
Ok(serde_json::to_writer_pretty(out, &data)?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct AcmeResponse {
|
||||||
|
body: Bytes,
|
||||||
|
location: Option<String>,
|
||||||
|
got_nonce: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AcmeResponse {
|
||||||
|
/// Convenience helper to assert that a location header was part of the response.
|
||||||
|
fn location_required(&mut self) -> Result<String, anyhow::Error> {
|
||||||
|
self.location
|
||||||
|
.take()
|
||||||
|
.ok_or_else(|| format_err!("missing Location header"))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convenience shortcut to perform json deserialization of the returned body.
|
||||||
|
fn json<T: for<'a> Deserialize<'a>>(&self) -> Result<T, Error> {
|
||||||
|
Ok(serde_json::from_slice(&self.body)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convenience shortcut to get the body as bytes.
|
||||||
|
fn bytes(&self) -> &[u8] {
|
||||||
|
&self.body
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AcmeClient {
|
||||||
|
/// Non-self-borrowing run_request version for borrow workarounds.
|
||||||
|
async fn execute(
|
||||||
|
http_client: &mut SimpleHttp,
|
||||||
|
request: AcmeRequest,
|
||||||
|
nonce: &mut Option<String>,
|
||||||
|
) -> Result<AcmeResponse, Error> {
|
||||||
|
let req_builder = Request::builder().method(request.method).uri(&request.url);
|
||||||
|
|
||||||
|
let http_request = if !request.content_type.is_empty() {
|
||||||
|
req_builder
|
||||||
|
.header("Content-Type", request.content_type)
|
||||||
|
.header("Content-Length", request.body.len())
|
||||||
|
.body(request.body.into())
|
||||||
|
} else {
|
||||||
|
req_builder.body(Body::empty())
|
||||||
|
}
|
||||||
|
.map_err(|err| Error::Custom(format!("failed to create http request: {}", err)))?;
|
||||||
|
|
||||||
|
let response = http_client
|
||||||
|
.request(http_request)
|
||||||
|
.await
|
||||||
|
.map_err(|err| Error::Custom(err.to_string()))?;
|
||||||
|
let (parts, body) = response.into_parts();
|
||||||
|
|
||||||
|
let status = parts.status.as_u16();
|
||||||
|
let body = hyper::body::to_bytes(body)
|
||||||
|
.await
|
||||||
|
.map_err(|err| Error::Custom(format!("failed to retrieve response body: {}", err)))?;
|
||||||
|
|
||||||
|
let got_nonce = if let Some(new_nonce) = parts.headers.get(proxmox_acme_rs::REPLAY_NONCE) {
|
||||||
|
let new_nonce = new_nonce.to_str().map_err(|err| {
|
||||||
|
Error::Client(format!(
|
||||||
|
"received invalid replay-nonce header from ACME server: {}",
|
||||||
|
err
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
*nonce = Some(new_nonce.to_owned());
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
};
|
||||||
|
|
||||||
|
if parts.status.is_success() {
|
||||||
|
if status != request.expected {
|
||||||
|
return Err(Error::InvalidApi(format!(
|
||||||
|
"ACME server responded with unexpected status code: {:?}",
|
||||||
|
parts.status
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let location = parts
|
||||||
|
.headers
|
||||||
|
.get("Location")
|
||||||
|
.map(|header| {
|
||||||
|
header.to_str().map(str::to_owned).map_err(|err| {
|
||||||
|
Error::Client(format!(
|
||||||
|
"received invalid location header from ACME server: {}",
|
||||||
|
err
|
||||||
|
))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.transpose()?;
|
||||||
|
|
||||||
|
return Ok(AcmeResponse {
|
||||||
|
body,
|
||||||
|
location,
|
||||||
|
got_nonce,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let error: ErrorResponse = serde_json::from_slice(&body).map_err(|err| {
|
||||||
|
Error::Client(format!(
|
||||||
|
"error status with improper error ACME response: {}",
|
||||||
|
err
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if error.ty == proxmox_acme_rs::error::BAD_NONCE {
|
||||||
|
if !got_nonce {
|
||||||
|
return Err(Error::InvalidApi(
|
||||||
|
"badNonce without a new Replay-Nonce header".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
return Err(Error::BadNonce);
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(Error::Api(error))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Low-level API to run an n API request. This automatically updates the current nonce!
|
||||||
|
async fn run_request(&mut self, request: AcmeRequest) -> Result<AcmeResponse, Error> {
|
||||||
|
Self::execute(&mut self.http_client, request, &mut self.nonce).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn directory(&mut self) -> Result<&Directory, Error> {
|
||||||
|
Ok(Self::get_directory(
|
||||||
|
&mut self.http_client,
|
||||||
|
&self.directory_url,
|
||||||
|
&mut self.directory,
|
||||||
|
&mut self.nonce,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_directory<'a, 'b>(
|
||||||
|
http_client: &mut SimpleHttp,
|
||||||
|
directory_url: &str,
|
||||||
|
directory: &'a mut Option<Directory>,
|
||||||
|
nonce: &'b mut Option<String>,
|
||||||
|
) -> Result<(&'a Directory, Option<&'b str>), Error> {
|
||||||
|
if let Some(d) = directory {
|
||||||
|
return Ok((d, nonce.as_deref()));
|
||||||
|
}
|
||||||
|
|
||||||
|
let response = Self::execute(
|
||||||
|
http_client,
|
||||||
|
AcmeRequest {
|
||||||
|
url: directory_url.to_string(),
|
||||||
|
method: "GET",
|
||||||
|
content_type: "",
|
||||||
|
body: String::new(),
|
||||||
|
expected: 200,
|
||||||
|
},
|
||||||
|
nonce,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
*directory = Some(Directory::from_parts(
|
||||||
|
directory_url.to_string(),
|
||||||
|
response.json()?,
|
||||||
|
));
|
||||||
|
|
||||||
|
Ok((directory.as_ref().unwrap(), nonce.as_deref()))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Like `get_directory`, but if the directory provides no nonce, also performs a `HEAD`
|
||||||
|
/// request on the new nonce URL.
|
||||||
|
async fn get_dir_nonce<'a, 'b>(
|
||||||
|
http_client: &mut SimpleHttp,
|
||||||
|
directory_url: &str,
|
||||||
|
directory: &'a mut Option<Directory>,
|
||||||
|
nonce: &'b mut Option<String>,
|
||||||
|
) -> Result<(&'a Directory, &'b str), Error> {
|
||||||
|
// this let construct is a lifetime workaround:
|
||||||
|
let _ = Self::get_directory(http_client, directory_url, directory, nonce).await?;
|
||||||
|
let dir = directory.as_ref().unwrap(); // the above fails if it couldn't fill this option
|
||||||
|
if nonce.is_none() {
|
||||||
|
// this is also a lifetime issue...
|
||||||
|
let _ = Self::get_nonce(http_client, nonce, dir.new_nonce_url()).await?;
|
||||||
|
};
|
||||||
|
Ok((dir, nonce.as_deref().unwrap()))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn terms_of_service_url(&mut self) -> Result<Option<&str>, Error> {
|
||||||
|
Ok(self.directory().await?.terms_of_service_url())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_nonce<'a>(
|
||||||
|
http_client: &mut SimpleHttp,
|
||||||
|
nonce: &'a mut Option<String>,
|
||||||
|
new_nonce_url: &str,
|
||||||
|
) -> Result<&'a str, Error> {
|
||||||
|
let response = Self::execute(
|
||||||
|
http_client,
|
||||||
|
AcmeRequest {
|
||||||
|
url: new_nonce_url.to_owned(),
|
||||||
|
method: "HEAD",
|
||||||
|
content_type: "",
|
||||||
|
body: String::new(),
|
||||||
|
expected: 200,
|
||||||
|
},
|
||||||
|
nonce,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if !response.got_nonce {
|
||||||
|
return Err(Error::InvalidApi(
|
||||||
|
"no new nonce received from new nonce URL".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
nonce
|
||||||
|
.as_deref()
|
||||||
|
.ok_or_else(|| Error::Client("failed to update nonce".to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// bad nonce retry count helper
|
||||||
|
struct Retry(usize);
|
||||||
|
|
||||||
|
const fn retry() -> Retry {
|
||||||
|
Retry(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Retry {
|
||||||
|
fn tick(&mut self) -> Result<(), Error> {
|
||||||
|
if self.0 >= 3 {
|
||||||
|
Err(Error::Client(format!("kept getting a badNonce error!")))
|
||||||
|
} else {
|
||||||
|
self.0 += 1;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
5
src/acme/mod.rs
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
mod client;
|
||||||
|
pub use client::AcmeClient;
|
||||||
|
|
||||||
|
pub(crate) mod plugin;
|
||||||
|
pub(crate) use plugin::get_acme_plugin;
|
299
src/acme/plugin.rs
Normal file
@ -0,0 +1,299 @@
|
|||||||
|
use std::future::Future;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::process::Stdio;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use hyper::{Body, Request, Response};
|
||||||
|
use tokio::io::{AsyncBufReadExt, AsyncRead, AsyncWriteExt, BufReader};
|
||||||
|
use tokio::process::Command;
|
||||||
|
|
||||||
|
use proxmox_acme_rs::{Authorization, Challenge};
|
||||||
|
|
||||||
|
use crate::acme::AcmeClient;
|
||||||
|
use crate::api2::types::AcmeDomain;
|
||||||
|
use crate::server::WorkerTask;
|
||||||
|
|
||||||
|
use crate::config::acme::plugin::{DnsPlugin, PluginData};
|
||||||
|
|
||||||
|
const PROXMOX_ACME_SH_PATH: &str = "/usr/share/proxmox-acme/proxmox-acme";
|
||||||
|
|
||||||
|
pub(crate) fn get_acme_plugin(
|
||||||
|
plugin_data: &PluginData,
|
||||||
|
name: &str,
|
||||||
|
) -> Result<Option<Box<dyn AcmePlugin + Send + Sync + 'static>>, Error> {
|
||||||
|
let (ty, data) = match plugin_data.get(name) {
|
||||||
|
Some(plugin) => plugin,
|
||||||
|
None => return Ok(None),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Some(match ty.as_str() {
|
||||||
|
"dns" => {
|
||||||
|
let plugin: DnsPlugin = serde_json::from_value(data.clone())?;
|
||||||
|
Box::new(plugin)
|
||||||
|
}
|
||||||
|
"standalone" => {
|
||||||
|
// this one has no config
|
||||||
|
Box::new(StandaloneServer::default())
|
||||||
|
}
|
||||||
|
other => bail!("missing implementation for plugin type '{}'", other),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) trait AcmePlugin {
|
||||||
|
/// Setup everything required to trigger the validation and return the corresponding validation
|
||||||
|
/// URL.
|
||||||
|
fn setup<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||||
|
&'a mut self,
|
||||||
|
client: &'b mut AcmeClient,
|
||||||
|
authorization: &'c Authorization,
|
||||||
|
domain: &'d AcmeDomain,
|
||||||
|
task: Arc<WorkerTask>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<&'c str, Error>> + Send + 'fut>>;
|
||||||
|
|
||||||
|
fn teardown<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||||
|
&'a mut self,
|
||||||
|
client: &'b mut AcmeClient,
|
||||||
|
authorization: &'c Authorization,
|
||||||
|
domain: &'d AcmeDomain,
|
||||||
|
task: Arc<WorkerTask>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'fut>>;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn extract_challenge<'a>(
|
||||||
|
authorization: &'a Authorization,
|
||||||
|
ty: &str,
|
||||||
|
) -> Result<&'a Challenge, Error> {
|
||||||
|
authorization
|
||||||
|
.challenges
|
||||||
|
.iter()
|
||||||
|
.find(|ch| ch.ty == ty)
|
||||||
|
.ok_or_else(|| format_err!("no supported challenge type (dns-01) found"))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn pipe_to_tasklog<T: AsyncRead + Unpin>(
|
||||||
|
pipe: T,
|
||||||
|
task: Arc<WorkerTask>,
|
||||||
|
) -> Result<(), std::io::Error> {
|
||||||
|
let mut pipe = BufReader::new(pipe);
|
||||||
|
let mut line = String::new();
|
||||||
|
loop {
|
||||||
|
line.clear();
|
||||||
|
match pipe.read_line(&mut line).await {
|
||||||
|
Ok(0) => return Ok(()),
|
||||||
|
Ok(_) => task.log(line.as_str()),
|
||||||
|
Err(err) => return Err(err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DnsPlugin {
|
||||||
|
async fn action<'a>(
|
||||||
|
&self,
|
||||||
|
client: &mut AcmeClient,
|
||||||
|
authorization: &'a Authorization,
|
||||||
|
domain: &AcmeDomain,
|
||||||
|
task: Arc<WorkerTask>,
|
||||||
|
action: &str,
|
||||||
|
) -> Result<&'a str, Error> {
|
||||||
|
let challenge = extract_challenge(authorization, "dns-01")?;
|
||||||
|
let mut stdin_data = client
|
||||||
|
.dns_01_txt_value(
|
||||||
|
challenge
|
||||||
|
.token()
|
||||||
|
.ok_or_else(|| format_err!("missing token in challenge"))?,
|
||||||
|
)?
|
||||||
|
.into_bytes();
|
||||||
|
stdin_data.push(b'\n');
|
||||||
|
stdin_data.extend(self.data.as_bytes());
|
||||||
|
if stdin_data.last() != Some(&b'\n') {
|
||||||
|
stdin_data.push(b'\n');
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut command = Command::new("/usr/bin/setpriv");
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
command.args(&[
|
||||||
|
"--reuid", "nobody",
|
||||||
|
"--regid", "nogroup",
|
||||||
|
"--clear-groups",
|
||||||
|
"--reset-env",
|
||||||
|
"--",
|
||||||
|
"/bin/bash",
|
||||||
|
PROXMOX_ACME_SH_PATH,
|
||||||
|
action,
|
||||||
|
&self.core.api,
|
||||||
|
domain.alias.as_deref().unwrap_or(&domain.domain),
|
||||||
|
]);
|
||||||
|
|
||||||
|
// We could use 1 socketpair, but tokio wraps them all in `File` internally causing `close`
|
||||||
|
// to be called separately on all of them without exception, so we need 3 pipes :-(
|
||||||
|
|
||||||
|
let mut child = command
|
||||||
|
.stdin(Stdio::piped())
|
||||||
|
.stdout(Stdio::piped())
|
||||||
|
.stderr(Stdio::piped())
|
||||||
|
.spawn()?;
|
||||||
|
|
||||||
|
let mut stdin = child.stdin.take().expect("Stdio::piped()");
|
||||||
|
let stdout = child.stdout.take().expect("Stdio::piped() failed?");
|
||||||
|
let stdout = pipe_to_tasklog(stdout, Arc::clone(&task));
|
||||||
|
let stderr = child.stderr.take().expect("Stdio::piped() failed?");
|
||||||
|
let stderr = pipe_to_tasklog(stderr, Arc::clone(&task));
|
||||||
|
let stdin = async move {
|
||||||
|
stdin.write_all(&stdin_data).await?;
|
||||||
|
stdin.flush().await?;
|
||||||
|
Ok::<_, std::io::Error>(())
|
||||||
|
};
|
||||||
|
match futures::try_join!(stdin, stdout, stderr) {
|
||||||
|
Ok(((), (), ())) => (),
|
||||||
|
Err(err) => {
|
||||||
|
if let Err(err) = child.kill().await {
|
||||||
|
task.log(format!(
|
||||||
|
"failed to kill '{} {}' command: {}",
|
||||||
|
PROXMOX_ACME_SH_PATH, action, err
|
||||||
|
));
|
||||||
|
}
|
||||||
|
bail!("'{}' failed: {}", PROXMOX_ACME_SH_PATH, err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let status = child.wait().await?;
|
||||||
|
if !status.success() {
|
||||||
|
bail!(
|
||||||
|
"'{} {}' exited with error ({})",
|
||||||
|
PROXMOX_ACME_SH_PATH,
|
||||||
|
action,
|
||||||
|
status.code().unwrap_or(-1)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(&challenge.url)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AcmePlugin for DnsPlugin {
|
||||||
|
fn setup<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||||
|
&'a mut self,
|
||||||
|
client: &'b mut AcmeClient,
|
||||||
|
authorization: &'c Authorization,
|
||||||
|
domain: &'d AcmeDomain,
|
||||||
|
task: Arc<WorkerTask>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<&'c str, Error>> + Send + 'fut>> {
|
||||||
|
Box::pin(self.action(client, authorization, domain, task, "setup"))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn teardown<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||||
|
&'a mut self,
|
||||||
|
client: &'b mut AcmeClient,
|
||||||
|
authorization: &'c Authorization,
|
||||||
|
domain: &'d AcmeDomain,
|
||||||
|
task: Arc<WorkerTask>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'fut>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.action(client, authorization, domain, task, "teardown")
|
||||||
|
.await
|
||||||
|
.map(drop)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct StandaloneServer {
|
||||||
|
abort_handle: Option<futures::future::AbortHandle>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// In case the "order_certificates" future gets dropped between setup & teardown, let's also cancel
|
||||||
|
// the HTTP listener on Drop:
|
||||||
|
impl Drop for StandaloneServer {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.stop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StandaloneServer {
|
||||||
|
fn stop(&mut self) {
|
||||||
|
if let Some(abort) = self.abort_handle.take() {
|
||||||
|
abort.abort();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn standalone_respond(
|
||||||
|
req: Request<Body>,
|
||||||
|
path: Arc<String>,
|
||||||
|
key_auth: Arc<String>,
|
||||||
|
) -> Result<Response<Body>, hyper::Error> {
|
||||||
|
if req.method() == hyper::Method::GET && req.uri().path() == path.as_str() {
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(http::StatusCode::OK)
|
||||||
|
.body(key_auth.as_bytes().to_vec().into())
|
||||||
|
.unwrap())
|
||||||
|
} else {
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(http::StatusCode::NOT_FOUND)
|
||||||
|
.body("Not found.".into())
|
||||||
|
.unwrap())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AcmePlugin for StandaloneServer {
|
||||||
|
fn setup<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||||
|
&'a mut self,
|
||||||
|
client: &'b mut AcmeClient,
|
||||||
|
authorization: &'c Authorization,
|
||||||
|
_domain: &'d AcmeDomain,
|
||||||
|
_task: Arc<WorkerTask>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<&'c str, Error>> + Send + 'fut>> {
|
||||||
|
use hyper::server::conn::AddrIncoming;
|
||||||
|
use hyper::service::{make_service_fn, service_fn};
|
||||||
|
|
||||||
|
Box::pin(async move {
|
||||||
|
self.stop();
|
||||||
|
|
||||||
|
let challenge = extract_challenge(authorization, "http-01")?;
|
||||||
|
let token = challenge
|
||||||
|
.token()
|
||||||
|
.ok_or_else(|| format_err!("missing token in challenge"))?;
|
||||||
|
let key_auth = Arc::new(client.key_authorization(&token)?);
|
||||||
|
let path = Arc::new(format!("/.well-known/acme-challenge/{}", token));
|
||||||
|
|
||||||
|
let service = make_service_fn(move |_| {
|
||||||
|
let path = Arc::clone(&path);
|
||||||
|
let key_auth = Arc::clone(&key_auth);
|
||||||
|
async move {
|
||||||
|
Ok::<_, hyper::Error>(service_fn(move |request| {
|
||||||
|
standalone_respond(request, Arc::clone(&path), Arc::clone(&key_auth))
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// `[::]:80` first, then `*:80`
|
||||||
|
let incoming = AddrIncoming::bind(&(([0u16; 8], 80).into()))
|
||||||
|
.or_else(|_| AddrIncoming::bind(&(([0u8; 4], 80).into())))?;
|
||||||
|
|
||||||
|
let server = hyper::Server::builder(incoming).serve(service);
|
||||||
|
|
||||||
|
let (future, abort) = futures::future::abortable(server);
|
||||||
|
self.abort_handle = Some(abort);
|
||||||
|
tokio::spawn(future);
|
||||||
|
|
||||||
|
Ok(challenge.url.as_str())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn teardown<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||||
|
&'a mut self,
|
||||||
|
_client: &'b mut AcmeClient,
|
||||||
|
_authorization: &'c Authorization,
|
||||||
|
_domain: &'d AcmeDomain,
|
||||||
|
_task: Arc<WorkerTask>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'fut>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
if let Some(abort) = self.abort_handle.take() {
|
||||||
|
abort.abort();
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -12,7 +12,7 @@ pub mod version;
|
|||||||
pub mod ping;
|
pub mod ping;
|
||||||
pub mod pull;
|
pub mod pull;
|
||||||
pub mod tape;
|
pub mod tape;
|
||||||
mod helpers;
|
pub mod helpers;
|
||||||
|
|
||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
use proxmox::api::Router;
|
use proxmox::api::Router;
|
||||||
|
@ -18,8 +18,7 @@ use crate::api2::types::*;
|
|||||||
description: "User configuration (without password).",
|
description: "User configuration (without password).",
|
||||||
properties: {
|
properties: {
|
||||||
realm: {
|
realm: {
|
||||||
description: "Realm ID.",
|
schema: REALM_ID_SCHEMA,
|
||||||
type: String,
|
|
||||||
},
|
},
|
||||||
comment: {
|
comment: {
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
@ -477,6 +477,17 @@ pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error>
|
|||||||
|
|
||||||
user::save_config(&config)?;
|
user::save_config(&config)?;
|
||||||
|
|
||||||
|
let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
|
||||||
|
match authenticator.remove_password(userid.name()) {
|
||||||
|
Ok(()) => {},
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!(
|
||||||
|
"error removing password after deleting user {:?}: {}",
|
||||||
|
userid, err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
match crate::config::tfa::read().and_then(|mut cfg| {
|
match crate::config::tfa::read().and_then(|mut cfg| {
|
||||||
let _: bool = cfg.remove_user(&userid);
|
let _: bool = cfg.remove_user(&userid);
|
||||||
crate::config::tfa::write(&cfg)
|
crate::config::tfa::write(&cfg)
|
||||||
|
@ -219,6 +219,48 @@ pub fn list_groups(
|
|||||||
Ok(group_info)
|
Ok(group_info)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-type": {
|
||||||
|
schema: BACKUP_TYPE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-id": {
|
||||||
|
schema: BACKUP_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(
|
||||||
|
&["datastore", "{store}"],
|
||||||
|
PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
|
||||||
|
true),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Delete backup group including all snapshots.
|
||||||
|
pub fn delete_group(
|
||||||
|
store: String,
|
||||||
|
backup_type: String,
|
||||||
|
backup_id: String,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
let group = BackupGroup::new(backup_type, backup_id);
|
||||||
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
|
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
|
||||||
|
|
||||||
|
datastore.remove_backup_group(&group)?;
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -1140,7 +1182,7 @@ pub fn download_file_decoded(
|
|||||||
manifest.verify_file(&file_name, &csum, size)?;
|
manifest.verify_file(&file_name, &csum, size)?;
|
||||||
|
|
||||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
|
||||||
Body::wrap_stream(AsyncReaderStream::new(reader)
|
Body::wrap_stream(AsyncReaderStream::new(reader)
|
||||||
.map_err(move |err| {
|
.map_err(move |err| {
|
||||||
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
||||||
@ -1155,7 +1197,7 @@ pub fn download_file_decoded(
|
|||||||
manifest.verify_file(&file_name, &csum, size)?;
|
manifest.verify_file(&file_name, &csum, size)?;
|
||||||
|
|
||||||
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
|
||||||
let reader = AsyncIndexReader::new(index, chunk_reader);
|
let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
|
||||||
Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
|
Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
|
||||||
.map_err(move |err| {
|
.map_err(move |err| {
|
||||||
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
eprintln!("error during streaming of '{:?}' - {}", path, err);
|
||||||
@ -1385,7 +1427,7 @@ pub fn pxar_file_download(
|
|||||||
|
|
||||||
let mut split = components.splitn(2, |c| *c == b'/');
|
let mut split = components.splitn(2, |c| *c == b'/');
|
||||||
let pxar_name = std::str::from_utf8(split.next().unwrap())?;
|
let pxar_name = std::str::from_utf8(split.next().unwrap())?;
|
||||||
let file_path = split.next().ok_or_else(|| format_err!("filepath looks strange '{}'", filepath))?;
|
let file_path = split.next().unwrap_or(b"/");
|
||||||
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
||||||
for file in files {
|
for file in files {
|
||||||
if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
||||||
@ -1722,6 +1764,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
|
|||||||
"groups",
|
"groups",
|
||||||
&Router::new()
|
&Router::new()
|
||||||
.get(&API_METHOD_LIST_GROUPS)
|
.get(&API_METHOD_LIST_GROUPS)
|
||||||
|
.delete(&API_METHOD_DELETE_GROUP)
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"notes",
|
"notes",
|
||||||
|
@ -4,6 +4,7 @@ use proxmox::api::router::{Router, SubdirMap};
|
|||||||
use proxmox::list_subdirs_api_method;
|
use proxmox::list_subdirs_api_method;
|
||||||
|
|
||||||
pub mod access;
|
pub mod access;
|
||||||
|
pub mod acme;
|
||||||
pub mod datastore;
|
pub mod datastore;
|
||||||
pub mod remote;
|
pub mod remote;
|
||||||
pub mod sync;
|
pub mod sync;
|
||||||
@ -16,6 +17,7 @@ pub mod tape_backup_job;
|
|||||||
|
|
||||||
const SUBDIRS: SubdirMap = &[
|
const SUBDIRS: SubdirMap = &[
|
||||||
("access", &access::ROUTER),
|
("access", &access::ROUTER),
|
||||||
|
("acme", &acme::ROUTER),
|
||||||
("changer", &changer::ROUTER),
|
("changer", &changer::ROUTER),
|
||||||
("datastore", &datastore::ROUTER),
|
("datastore", &datastore::ROUTER),
|
||||||
("drive", &drive::ROUTER),
|
("drive", &drive::ROUTER),
|
||||||
|
727
src/api2/config/acme.rs
Normal file
@ -0,0 +1,727 @@
|
|||||||
|
use std::fs;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::time::SystemTime;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
|
use proxmox::api::router::SubdirMap;
|
||||||
|
use proxmox::api::schema::Updatable;
|
||||||
|
use proxmox::api::{api, Permission, Router, RpcEnvironment};
|
||||||
|
use proxmox::http_bail;
|
||||||
|
use proxmox::list_subdirs_api_method;
|
||||||
|
|
||||||
|
use proxmox_acme_rs::account::AccountData as AcmeAccountData;
|
||||||
|
use proxmox_acme_rs::Account;
|
||||||
|
|
||||||
|
use crate::acme::AcmeClient;
|
||||||
|
use crate::api2::types::{AcmeAccountName, AcmeChallengeSchema, Authid, KnownAcmeDirectory};
|
||||||
|
use crate::config::acl::PRIV_SYS_MODIFY;
|
||||||
|
use crate::config::acme::plugin::{
|
||||||
|
DnsPlugin, DnsPluginCore, DnsPluginCoreUpdater, PLUGIN_ID_SCHEMA,
|
||||||
|
};
|
||||||
|
use crate::server::WorkerTask;
|
||||||
|
use crate::tools::ControlFlow;
|
||||||
|
|
||||||
|
pub(crate) const ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||||
|
.subdirs(SUBDIRS);
|
||||||
|
|
||||||
|
const SUBDIRS: SubdirMap = &[
|
||||||
|
(
|
||||||
|
"account",
|
||||||
|
&Router::new()
|
||||||
|
.get(&API_METHOD_LIST_ACCOUNTS)
|
||||||
|
.post(&API_METHOD_REGISTER_ACCOUNT)
|
||||||
|
.match_all("name", &ACCOUNT_ITEM_ROUTER),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"challenge-schema",
|
||||||
|
&Router::new().get(&API_METHOD_GET_CHALLENGE_SCHEMA),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"directories",
|
||||||
|
&Router::new().get(&API_METHOD_GET_DIRECTORIES),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"plugins",
|
||||||
|
&Router::new()
|
||||||
|
.get(&API_METHOD_LIST_PLUGINS)
|
||||||
|
.post(&API_METHOD_ADD_PLUGIN)
|
||||||
|
.match_all("id", &PLUGIN_ITEM_ROUTER),
|
||||||
|
),
|
||||||
|
("tos", &Router::new().get(&API_METHOD_GET_TOS)),
|
||||||
|
];
|
||||||
|
|
||||||
|
const ACCOUNT_ITEM_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_GET_ACCOUNT)
|
||||||
|
.put(&API_METHOD_UPDATE_ACCOUNT)
|
||||||
|
.delete(&API_METHOD_DEACTIVATE_ACCOUNT);
|
||||||
|
|
||||||
|
const PLUGIN_ITEM_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_GET_PLUGIN)
|
||||||
|
.put(&API_METHOD_UPDATE_PLUGIN)
|
||||||
|
.delete(&API_METHOD_DELETE_PLUGIN);
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
name: { type: AcmeAccountName },
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// An ACME Account entry.
|
||||||
|
///
|
||||||
|
/// Currently only contains a 'name' property.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub struct AccountEntry {
|
||||||
|
name: AcmeAccountName,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: Array,
|
||||||
|
items: { type: AccountEntry },
|
||||||
|
description: "List of ACME accounts.",
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
)]
|
||||||
|
/// List ACME accounts.
|
||||||
|
pub fn list_accounts() -> Result<Vec<AccountEntry>, Error> {
|
||||||
|
let mut entries = Vec::new();
|
||||||
|
crate::config::acme::foreach_acme_account(|name| {
|
||||||
|
entries.push(AccountEntry { name });
|
||||||
|
ControlFlow::Continue(())
|
||||||
|
})?;
|
||||||
|
Ok(entries)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
account: { type: Object, properties: {}, additional_properties: true },
|
||||||
|
tos: {
|
||||||
|
type: String,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// ACME Account information.
|
||||||
|
///
|
||||||
|
/// This is what we return via the API.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub struct AccountInfo {
|
||||||
|
/// Raw account data.
|
||||||
|
account: AcmeAccountData,
|
||||||
|
|
||||||
|
/// The ACME directory URL the account was created at.
|
||||||
|
directory: String,
|
||||||
|
|
||||||
|
/// The account's own URL within the ACME directory.
|
||||||
|
location: String,
|
||||||
|
|
||||||
|
/// The ToS URL, if the user agreed to one.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
tos: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: { type: AcmeAccountName },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
returns: { type: AccountInfo },
|
||||||
|
protected: true,
|
||||||
|
)]
|
||||||
|
/// Return existing ACME account information.
|
||||||
|
pub async fn get_account(name: AcmeAccountName) -> Result<AccountInfo, Error> {
|
||||||
|
let client = AcmeClient::load(&name).await?;
|
||||||
|
let account = client.account()?;
|
||||||
|
Ok(AccountInfo {
|
||||||
|
location: account.location.clone(),
|
||||||
|
tos: client.tos().map(str::to_owned),
|
||||||
|
directory: client.directory_url().to_owned(),
|
||||||
|
account: AcmeAccountData {
|
||||||
|
only_return_existing: false, // don't actually write this out in case it's set
|
||||||
|
..account.data.clone()
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn account_contact_from_string(s: &str) -> Vec<String> {
|
||||||
|
s.split(&[' ', ';', ',', '\0'][..])
|
||||||
|
.map(|s| format!("mailto:{}", s))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
type: AcmeAccountName,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
contact: {
|
||||||
|
description: "List of email addresses.",
|
||||||
|
},
|
||||||
|
tos_url: {
|
||||||
|
description: "URL of CA TermsOfService - setting this indicates agreement.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
directory: {
|
||||||
|
type: String,
|
||||||
|
description: "The ACME Directory.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
)]
|
||||||
|
/// Register an ACME account.
|
||||||
|
fn register_account(
|
||||||
|
name: Option<AcmeAccountName>,
|
||||||
|
// Todo: email & email-list schema
|
||||||
|
contact: String,
|
||||||
|
tos_url: Option<String>,
|
||||||
|
directory: Option<String>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
let name = name.unwrap_or_else(|| unsafe {
|
||||||
|
AcmeAccountName::from_string_unchecked("default".to_string())
|
||||||
|
});
|
||||||
|
|
||||||
|
if Path::new(&crate::config::acme::account_path(&name)).exists() {
|
||||||
|
http_bail!(BAD_REQUEST, "account {} already exists", name);
|
||||||
|
}
|
||||||
|
|
||||||
|
let directory = directory.unwrap_or_else(|| {
|
||||||
|
crate::config::acme::DEFAULT_ACME_DIRECTORY_ENTRY
|
||||||
|
.url
|
||||||
|
.to_owned()
|
||||||
|
});
|
||||||
|
|
||||||
|
WorkerTask::spawn(
|
||||||
|
"acme-register",
|
||||||
|
Some(name.to_string()),
|
||||||
|
auth_id,
|
||||||
|
true,
|
||||||
|
move |worker| async move {
|
||||||
|
let mut client = AcmeClient::new(directory);
|
||||||
|
|
||||||
|
worker.log(format!("Registering ACME account '{}'...", &name));
|
||||||
|
|
||||||
|
let account =
|
||||||
|
do_register_account(&mut client, &name, tos_url.is_some(), contact, None).await?;
|
||||||
|
|
||||||
|
worker.log(format!(
|
||||||
|
"Registration successful, account URL: {}",
|
||||||
|
account.location
|
||||||
|
));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn do_register_account<'a>(
|
||||||
|
client: &'a mut AcmeClient,
|
||||||
|
name: &AcmeAccountName,
|
||||||
|
agree_to_tos: bool,
|
||||||
|
contact: String,
|
||||||
|
rsa_bits: Option<u32>,
|
||||||
|
) -> Result<&'a Account, Error> {
|
||||||
|
let contact = account_contact_from_string(&contact);
|
||||||
|
Ok(client
|
||||||
|
.new_account(name, agree_to_tos, contact, rsa_bits)
|
||||||
|
.await?)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: { type: AcmeAccountName },
|
||||||
|
contact: {
|
||||||
|
description: "List of email addresses.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
)]
|
||||||
|
/// Update an ACME account.
|
||||||
|
pub fn update_account(
|
||||||
|
name: AcmeAccountName,
|
||||||
|
// Todo: email & email-list schema
|
||||||
|
contact: Option<String>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
WorkerTask::spawn(
|
||||||
|
"acme-update",
|
||||||
|
Some(name.to_string()),
|
||||||
|
auth_id,
|
||||||
|
true,
|
||||||
|
move |_worker| async move {
|
||||||
|
let data = match contact {
|
||||||
|
Some(data) => json!({
|
||||||
|
"contact": account_contact_from_string(&data),
|
||||||
|
}),
|
||||||
|
None => json!({}),
|
||||||
|
};
|
||||||
|
|
||||||
|
AcmeClient::load(&name).await?.update_account(&data).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: { type: AcmeAccountName },
|
||||||
|
force: {
|
||||||
|
description:
|
||||||
|
"Delete account data even if the server refuses to deactivate the account.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
)]
|
||||||
|
/// Deactivate an ACME account.
|
||||||
|
pub fn deactivate_account(
|
||||||
|
name: AcmeAccountName,
|
||||||
|
force: bool,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
WorkerTask::spawn(
|
||||||
|
"acme-deactivate",
|
||||||
|
Some(name.to_string()),
|
||||||
|
auth_id,
|
||||||
|
true,
|
||||||
|
move |worker| async move {
|
||||||
|
match AcmeClient::load(&name)
|
||||||
|
.await?
|
||||||
|
.update_account(&json!({"status": "deactivated"}))
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(_account) => (),
|
||||||
|
Err(err) if !force => return Err(err),
|
||||||
|
Err(err) => {
|
||||||
|
worker.warn(format!(
|
||||||
|
"error deactivating account {}, proceedeing anyway - {}",
|
||||||
|
name, err,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
crate::config::acme::mark_account_deactivated(&name)?;
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
directory: {
|
||||||
|
type: String,
|
||||||
|
description: "The ACME Directory.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: String,
|
||||||
|
optional: true,
|
||||||
|
description: "The ACME Directory's ToS URL, if any.",
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get the Terms of Service URL for an ACME directory.
|
||||||
|
async fn get_tos(directory: Option<String>) -> Result<Option<String>, Error> {
|
||||||
|
let directory = directory.unwrap_or_else(|| {
|
||||||
|
crate::config::acme::DEFAULT_ACME_DIRECTORY_ENTRY
|
||||||
|
.url
|
||||||
|
.to_owned()
|
||||||
|
});
|
||||||
|
Ok(AcmeClient::new(directory)
|
||||||
|
.terms_of_service_url()
|
||||||
|
.await?
|
||||||
|
.map(str::to_owned))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "List of known ACME directories.",
|
||||||
|
type: Array,
|
||||||
|
items: { type: KnownAcmeDirectory },
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get named known ACME directory endpoints.
|
||||||
|
fn get_directories() -> Result<&'static [KnownAcmeDirectory], Error> {
|
||||||
|
Ok(crate::config::acme::KNOWN_ACME_DIRECTORIES)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wrapper for efficient Arc use when returning the ACME challenge-plugin schema for serializing
|
||||||
|
struct ChallengeSchemaWrapper {
|
||||||
|
inner: Arc<Vec<AcmeChallengeSchema>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for ChallengeSchemaWrapper {
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: serde::Serializer,
|
||||||
|
{
|
||||||
|
self.inner.serialize(serializer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_cached_challenge_schemas() -> Result<ChallengeSchemaWrapper, Error> {
|
||||||
|
lazy_static! {
|
||||||
|
static ref CACHE: Mutex<Option<(Arc<Vec<AcmeChallengeSchema>>, SystemTime)>> =
|
||||||
|
Mutex::new(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
// the actual loading code
|
||||||
|
let mut last = CACHE.lock().unwrap();
|
||||||
|
|
||||||
|
let actual_mtime = fs::metadata(crate::config::acme::ACME_DNS_SCHEMA_FN)?.modified()?;
|
||||||
|
|
||||||
|
let schema = match &*last {
|
||||||
|
Some((schema, cached_mtime)) if *cached_mtime >= actual_mtime => schema.clone(),
|
||||||
|
_ => {
|
||||||
|
let new_schema = Arc::new(crate::config::acme::load_dns_challenge_schema()?);
|
||||||
|
*last = Some((Arc::clone(&new_schema), actual_mtime));
|
||||||
|
new_schema
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(ChallengeSchemaWrapper { inner: schema })
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "ACME Challenge Plugin Shema.",
|
||||||
|
type: Array,
|
||||||
|
items: { type: AcmeChallengeSchema },
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get named known ACME directory endpoints.
|
||||||
|
fn get_challenge_schema() -> Result<ChallengeSchemaWrapper, Error> {
|
||||||
|
get_cached_challenge_schemas()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api]
|
||||||
|
#[derive(Default, Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// The API's format is inherited from PVE/PMG:
|
||||||
|
pub struct PluginConfig {
|
||||||
|
/// Plugin ID.
|
||||||
|
plugin: String,
|
||||||
|
|
||||||
|
/// Plugin type.
|
||||||
|
#[serde(rename = "type")]
|
||||||
|
ty: String,
|
||||||
|
|
||||||
|
/// DNS Api name.
|
||||||
|
api: Option<String>,
|
||||||
|
|
||||||
|
/// Plugin configuration data.
|
||||||
|
data: Option<String>,
|
||||||
|
|
||||||
|
/// Extra delay in seconds to wait before requesting validation.
|
||||||
|
///
|
||||||
|
/// Allows to cope with long TTL of DNS records.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||||
|
validation_delay: Option<u32>,
|
||||||
|
|
||||||
|
/// Flag to disable the config.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||||
|
disable: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// See PMG/PVE's $modify_cfg_for_api sub
|
||||||
|
fn modify_cfg_for_api(id: &str, ty: &str, data: &Value) -> PluginConfig {
|
||||||
|
let mut entry = data.clone();
|
||||||
|
|
||||||
|
let obj = entry.as_object_mut().unwrap();
|
||||||
|
obj.remove("id");
|
||||||
|
obj.insert("plugin".to_string(), Value::String(id.to_owned()));
|
||||||
|
obj.insert("type".to_string(), Value::String(ty.to_owned()));
|
||||||
|
|
||||||
|
// FIXME: This needs to go once the `Updater` is fixed.
|
||||||
|
// None of these should be able to fail unless the user changed the files by hand, in which
|
||||||
|
// case we leave the unmodified string in the Value for now. This will be handled with an error
|
||||||
|
// later.
|
||||||
|
if let Some(Value::String(ref mut data)) = obj.get_mut("data") {
|
||||||
|
if let Ok(new) = base64::decode_config(&data, base64::URL_SAFE_NO_PAD) {
|
||||||
|
if let Ok(utf8) = String::from_utf8(new) {
|
||||||
|
*data = utf8;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PVE/PMG do this explicitly for ACME plugins...
|
||||||
|
// obj.insert("digest".to_string(), Value::String(digest.clone()));
|
||||||
|
|
||||||
|
serde_json::from_value(entry).unwrap_or_else(|_| PluginConfig {
|
||||||
|
plugin: "*Error*".to_string(),
|
||||||
|
ty: "*Error*".to_string(),
|
||||||
|
..Default::default()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
returns: {
|
||||||
|
type: Array,
|
||||||
|
description: "List of ACME plugin configurations.",
|
||||||
|
items: { type: PluginConfig },
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List ACME challenge plugins.
|
||||||
|
pub fn list_plugins(mut rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<PluginConfig>, Error> {
|
||||||
|
use crate::config::acme::plugin;
|
||||||
|
|
||||||
|
let (plugins, digest) = plugin::config()?;
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
Ok(plugins
|
||||||
|
.iter()
|
||||||
|
.map(|(id, (ty, data))| modify_cfg_for_api(&id, &ty, data))
|
||||||
|
.collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
id: { schema: PLUGIN_ID_SCHEMA },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
returns: { type: PluginConfig },
|
||||||
|
)]
|
||||||
|
/// List ACME challenge plugins.
|
||||||
|
pub fn get_plugin(id: String, mut rpcenv: &mut dyn RpcEnvironment) -> Result<PluginConfig, Error> {
|
||||||
|
use crate::config::acme::plugin;
|
||||||
|
|
||||||
|
let (plugins, digest) = plugin::config()?;
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
match plugins.get(&id) {
|
||||||
|
Some((ty, data)) => Ok(modify_cfg_for_api(&id, &ty, &data)),
|
||||||
|
None => http_bail!(NOT_FOUND, "no such plugin"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Currently we only have "the" standalone plugin and DNS plugins so we can just flatten a
|
||||||
|
// DnsPluginUpdater:
|
||||||
|
//
|
||||||
|
// FIXME: The 'id' parameter should not be "optional" in the schema.
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
type: {
|
||||||
|
type: String,
|
||||||
|
description: "The ACME challenge plugin type.",
|
||||||
|
},
|
||||||
|
core: {
|
||||||
|
type: DnsPluginCoreUpdater,
|
||||||
|
flatten: true,
|
||||||
|
},
|
||||||
|
data: {
|
||||||
|
type: String,
|
||||||
|
// This is different in the API!
|
||||||
|
description: "DNS plugin data (base64 encoded with padding).",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
)]
|
||||||
|
/// Add ACME plugin configuration.
|
||||||
|
pub fn add_plugin(r#type: String, core: DnsPluginCoreUpdater, data: String) -> Result<(), Error> {
|
||||||
|
use crate::config::acme::plugin;
|
||||||
|
|
||||||
|
// Currently we only support DNS plugins and the standalone plugin is "fixed":
|
||||||
|
if r#type != "dns" {
|
||||||
|
bail!("invalid ACME plugin type: {:?}", r#type);
|
||||||
|
}
|
||||||
|
|
||||||
|
let data = String::from_utf8(base64::decode(&data)?)
|
||||||
|
.map_err(|_| format_err!("data must be valid UTF-8"))?;
|
||||||
|
//core.api_fixup()?;
|
||||||
|
|
||||||
|
// FIXME: Solve the Updater with non-optional fields thing...
|
||||||
|
let id = core
|
||||||
|
.id
|
||||||
|
.clone()
|
||||||
|
.ok_or_else(|| format_err!("missing required 'id' parameter"))?;
|
||||||
|
|
||||||
|
let _lock = plugin::lock()?;
|
||||||
|
|
||||||
|
let (mut plugins, _digest) = plugin::config()?;
|
||||||
|
if plugins.contains_key(&id) {
|
||||||
|
bail!("ACME plugin ID {:?} already exists", id);
|
||||||
|
}
|
||||||
|
|
||||||
|
let plugin = serde_json::to_value(DnsPlugin {
|
||||||
|
core: DnsPluginCore::try_build_from(core)?,
|
||||||
|
data,
|
||||||
|
})?;
|
||||||
|
|
||||||
|
plugins.insert(id, r#type, plugin);
|
||||||
|
|
||||||
|
plugin::save_config(&plugins)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
id: { schema: PLUGIN_ID_SCHEMA },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
)]
|
||||||
|
/// Delete an ACME plugin configuration.
|
||||||
|
pub fn delete_plugin(id: String) -> Result<(), Error> {
|
||||||
|
use crate::config::acme::plugin;
|
||||||
|
|
||||||
|
let _lock = plugin::lock()?;
|
||||||
|
|
||||||
|
let (mut plugins, _digest) = plugin::config()?;
|
||||||
|
if plugins.remove(&id).is_none() {
|
||||||
|
http_bail!(NOT_FOUND, "no such plugin");
|
||||||
|
}
|
||||||
|
plugin::save_config(&plugins)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
core_update: {
|
||||||
|
type: DnsPluginCoreUpdater,
|
||||||
|
flatten: true,
|
||||||
|
},
|
||||||
|
data: {
|
||||||
|
type: String,
|
||||||
|
optional: true,
|
||||||
|
// This is different in the API!
|
||||||
|
description: "DNS plugin data (base64 encoded with padding).",
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
description: "Digest to protect against concurrent updates",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
delete: {
|
||||||
|
description: "Options to remove from the configuration",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
)]
|
||||||
|
/// Update an ACME plugin configuration.
|
||||||
|
pub fn update_plugin(
|
||||||
|
core_update: DnsPluginCoreUpdater,
|
||||||
|
data: Option<String>,
|
||||||
|
delete: Option<String>,
|
||||||
|
digest: Option<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
use crate::config::acme::plugin;
|
||||||
|
|
||||||
|
let data = data
|
||||||
|
.as_deref()
|
||||||
|
.map(base64::decode)
|
||||||
|
.transpose()?
|
||||||
|
.map(String::from_utf8)
|
||||||
|
.transpose()
|
||||||
|
.map_err(|_| format_err!("data must be valid UTF-8"))?;
|
||||||
|
//core_update.api_fixup()?;
|
||||||
|
|
||||||
|
// unwrap: the id is matched by this method's API path
|
||||||
|
let id = core_update.id.clone().unwrap();
|
||||||
|
|
||||||
|
let delete: Vec<&str> = delete
|
||||||
|
.as_deref()
|
||||||
|
.unwrap_or("")
|
||||||
|
.split(&[' ', ',', ';', '\0'][..])
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let _lock = plugin::lock()?;
|
||||||
|
|
||||||
|
let (mut plugins, expected_digest) = plugin::config()?;
|
||||||
|
|
||||||
|
if let Some(digest) = digest {
|
||||||
|
let digest = proxmox::tools::hex_to_digest(&digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
match plugins.get_mut(&id) {
|
||||||
|
Some((ty, ref mut entry)) => {
|
||||||
|
if ty != "dns" {
|
||||||
|
bail!("cannot update plugin of type {:?}", ty);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut plugin: DnsPlugin = serde_json::from_value(entry.clone())?;
|
||||||
|
plugin.core.update_from(core_update, &delete)?;
|
||||||
|
if let Some(data) = data {
|
||||||
|
plugin.data = data;
|
||||||
|
}
|
||||||
|
*entry = serde_json::to_value(plugin)?;
|
||||||
|
}
|
||||||
|
None => http_bail!(NOT_FOUND, "no such plugin"),
|
||||||
|
}
|
||||||
|
|
||||||
|
plugin::save_config(&plugins)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -27,7 +27,7 @@ use crate::{
|
|||||||
SLOT_ARRAY_SCHEMA,
|
SLOT_ARRAY_SCHEMA,
|
||||||
EXPORT_SLOT_LIST_SCHEMA,
|
EXPORT_SLOT_LIST_SCHEMA,
|
||||||
ScsiTapeChanger,
|
ScsiTapeChanger,
|
||||||
LinuxTapeDrive,
|
LtoTapeDrive,
|
||||||
},
|
},
|
||||||
tape::{
|
tape::{
|
||||||
linux_tape_changer_list,
|
linux_tape_changer_list,
|
||||||
@ -303,7 +303,7 @@ pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> {
|
|||||||
None => bail!("Delete changer '{}' failed - no such entry", name),
|
None => bail!("Delete changer '{}' failed - no such entry", name),
|
||||||
}
|
}
|
||||||
|
|
||||||
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
let drive_list: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
|
||||||
for drive in drive_list {
|
for drive in drive_list {
|
||||||
if let Some(changer) = drive.changer {
|
if let Some(changer) = drive.changer {
|
||||||
if changer == name {
|
if changer == name {
|
||||||
|
@ -5,15 +5,15 @@ use serde_json::Value;
|
|||||||
use ::serde::{Deserialize, Serialize};
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||||
|
use proxmox::api::section_config::SectionConfigData;
|
||||||
use proxmox::api::schema::parse_property_string;
|
use proxmox::api::schema::parse_property_string;
|
||||||
use proxmox::tools::fs::open_file_locked;
|
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA};
|
use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA};
|
||||||
use crate::config::acl::{PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
|
use crate::config::acl::{PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
|
||||||
use crate::server::jobstate;
|
use crate::server::{jobstate, WorkerTask};
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
@ -50,6 +50,26 @@ pub fn list_datastores(
|
|||||||
Ok(list.into_iter().filter(filter_by_privs).collect())
|
Ok(list.into_iter().filter(filter_by_privs).collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn do_create_datastore(
|
||||||
|
_lock: std::fs::File,
|
||||||
|
mut config: SectionConfigData,
|
||||||
|
datastore: DataStoreConfig,
|
||||||
|
worker: Option<&dyn crate::task::TaskState>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let path: PathBuf = datastore.path.clone().into();
|
||||||
|
|
||||||
|
let backup_user = crate::backup::backup_user()?;
|
||||||
|
let _store = ChunkStore::create(&datastore.name, path, backup_user.uid, backup_user.gid, worker)?;
|
||||||
|
|
||||||
|
config.set_data(&datastore.name, "datastore", &datastore)?;
|
||||||
|
|
||||||
|
datastore::save_config(&config)?;
|
||||||
|
|
||||||
|
jobstate::create_state_file("prune", &datastore.name)?;
|
||||||
|
jobstate::create_state_file("garbage_collection", &datastore.name)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
// fixme: impl. const fn get_object_schema(datastore::DataStoreConfig::API_SCHEMA),
|
// fixme: impl. const fn get_object_schema(datastore::DataStoreConfig::API_SCHEMA),
|
||||||
// but this need support for match inside const fn
|
// but this need support for match inside const fn
|
||||||
@ -116,31 +136,30 @@ pub fn list_datastores(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Create new datastore config.
|
/// Create new datastore config.
|
||||||
pub fn create_datastore(param: Value) -> Result<(), Error> {
|
pub fn create_datastore(
|
||||||
|
param: Value,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let lock = datastore::lock_config()?;
|
||||||
|
|
||||||
let datastore: datastore::DataStoreConfig = serde_json::from_value(param)?;
|
let datastore: datastore::DataStoreConfig = serde_json::from_value(param)?;
|
||||||
|
|
||||||
let (mut config, _digest) = datastore::config()?;
|
let (config, _digest) = datastore::config()?;
|
||||||
|
|
||||||
if config.sections.get(&datastore.name).is_some() {
|
if config.sections.get(&datastore.name).is_some() {
|
||||||
bail!("datastore '{}' already exists.", datastore.name);
|
bail!("datastore '{}' already exists.", datastore.name);
|
||||||
}
|
}
|
||||||
|
|
||||||
let path: PathBuf = datastore.path.clone().into();
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let backup_user = crate::backup::backup_user()?;
|
WorkerTask::new_thread(
|
||||||
let _store = ChunkStore::create(&datastore.name, path, backup_user.uid, backup_user.gid)?;
|
"create-datastore",
|
||||||
|
Some(datastore.name.to_string()),
|
||||||
config.set_data(&datastore.name, "datastore", &datastore)?;
|
auth_id,
|
||||||
|
false,
|
||||||
datastore::save_config(&config)?;
|
move |worker| do_create_datastore(lock, config, datastore, Some(&worker)),
|
||||||
|
)
|
||||||
jobstate::create_state_file("prune", &datastore.name)?;
|
|
||||||
jobstate::create_state_file("garbage_collection", &datastore.name)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -296,7 +315,7 @@ pub fn update_datastore(
|
|||||||
digest: Option<String>,
|
digest: Option<String>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = datastore::lock_config()?;
|
||||||
|
|
||||||
// pass/compare digest
|
// pass/compare digest
|
||||||
let (mut config, expected_digest) = datastore::config()?;
|
let (mut config, expected_digest) = datastore::config()?;
|
||||||
@ -375,11 +394,11 @@ pub fn update_datastore(
|
|||||||
// we want to reset the statefiles, to avoid an immediate action in some cases
|
// we want to reset the statefiles, to avoid an immediate action in some cases
|
||||||
// (e.g. going from monthly to weekly in the second week of the month)
|
// (e.g. going from monthly to weekly in the second week of the month)
|
||||||
if gc_schedule_changed {
|
if gc_schedule_changed {
|
||||||
jobstate::create_state_file("garbage_collection", &name)?;
|
jobstate::update_job_last_run_time("garbage_collection", &name)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if prune_schedule_changed {
|
if prune_schedule_changed {
|
||||||
jobstate::create_state_file("prune", &name)?;
|
jobstate::update_job_last_run_time("prune", &name)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -403,9 +422,9 @@ pub fn update_datastore(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Remove a datastore configuration.
|
/// Remove a datastore configuration.
|
||||||
pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Error> {
|
pub async fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = datastore::lock_config()?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = datastore::config()?;
|
let (mut config, expected_digest) = datastore::config()?;
|
||||||
|
|
||||||
@ -425,6 +444,8 @@ pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Erro
|
|||||||
let _ = jobstate::remove_state_file("prune", &name);
|
let _ = jobstate::remove_state_file("prune", &name);
|
||||||
let _ = jobstate::remove_state_file("garbage_collection", &name);
|
let _ = jobstate::remove_state_file("garbage_collection", &name);
|
||||||
|
|
||||||
|
crate::server::notify_datastore_removed().await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,12 +19,12 @@ use crate::{
|
|||||||
DRIVE_NAME_SCHEMA,
|
DRIVE_NAME_SCHEMA,
|
||||||
CHANGER_NAME_SCHEMA,
|
CHANGER_NAME_SCHEMA,
|
||||||
CHANGER_DRIVENUM_SCHEMA,
|
CHANGER_DRIVENUM_SCHEMA,
|
||||||
LINUX_DRIVE_PATH_SCHEMA,
|
LTO_DRIVE_PATH_SCHEMA,
|
||||||
LinuxTapeDrive,
|
LtoTapeDrive,
|
||||||
ScsiTapeChanger,
|
ScsiTapeChanger,
|
||||||
},
|
},
|
||||||
tape::{
|
tape::{
|
||||||
linux_tape_device_list,
|
lto_tape_device_list,
|
||||||
check_drive_path,
|
check_drive_path,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -37,7 +37,7 @@ use crate::{
|
|||||||
schema: DRIVE_NAME_SCHEMA,
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
},
|
},
|
||||||
path: {
|
path: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
},
|
},
|
||||||
changer: {
|
changer: {
|
||||||
schema: CHANGER_NAME_SCHEMA,
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
@ -60,13 +60,13 @@ pub fn create_drive(param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
let (mut config, _digest) = config::drive::config()?;
|
let (mut config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
let item: LinuxTapeDrive = serde_json::from_value(param)?;
|
let item: LtoTapeDrive = serde_json::from_value(param)?;
|
||||||
|
|
||||||
let linux_drives = linux_tape_device_list();
|
let lto_drives = lto_tape_device_list();
|
||||||
|
|
||||||
check_drive_path(&linux_drives, &item.path)?;
|
check_drive_path(<o_drives, &item.path)?;
|
||||||
|
|
||||||
let existing: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
let existing: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
|
||||||
|
|
||||||
for drive in existing {
|
for drive in existing {
|
||||||
if drive.name == item.name {
|
if drive.name == item.name {
|
||||||
@ -77,7 +77,7 @@ pub fn create_drive(param: Value) -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
config.set_data(&item.name, "linux", &item)?;
|
config.set_data(&item.name, "lto", &item)?;
|
||||||
|
|
||||||
config::drive::save_config(&config)?;
|
config::drive::save_config(&config)?;
|
||||||
|
|
||||||
@ -93,7 +93,7 @@ pub fn create_drive(param: Value) -> Result<(), Error> {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
type: LinuxTapeDrive,
|
type: LtoTapeDrive,
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_AUDIT, false),
|
permission: &Permission::Privilege(&["tape", "device", "{name}"], PRIV_TAPE_AUDIT, false),
|
||||||
@ -104,11 +104,11 @@ pub fn get_config(
|
|||||||
name: String,
|
name: String,
|
||||||
_param: Value,
|
_param: Value,
|
||||||
mut rpcenv: &mut dyn RpcEnvironment,
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<LinuxTapeDrive, Error> {
|
) -> Result<LtoTapeDrive, Error> {
|
||||||
|
|
||||||
let (config, digest) = config::drive::config()?;
|
let (config, digest) = config::drive::config()?;
|
||||||
|
|
||||||
let data: LinuxTapeDrive = config.lookup("linux", &name)?;
|
let data: LtoTapeDrive = config.lookup("lto", &name)?;
|
||||||
|
|
||||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
@ -123,7 +123,7 @@ pub fn get_config(
|
|||||||
description: "The list of configured drives (with config digest).",
|
description: "The list of configured drives (with config digest).",
|
||||||
type: Array,
|
type: Array,
|
||||||
items: {
|
items: {
|
||||||
type: LinuxTapeDrive,
|
type: LtoTapeDrive,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
@ -135,13 +135,13 @@ pub fn get_config(
|
|||||||
pub fn list_drives(
|
pub fn list_drives(
|
||||||
_param: Value,
|
_param: Value,
|
||||||
mut rpcenv: &mut dyn RpcEnvironment,
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<LinuxTapeDrive>, Error> {
|
) -> Result<Vec<LtoTapeDrive>, Error> {
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let (config, digest) = config::drive::config()?;
|
let (config, digest) = config::drive::config()?;
|
||||||
|
|
||||||
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
let drive_list: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
|
||||||
|
|
||||||
let drive_list = drive_list
|
let drive_list = drive_list
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@ -176,7 +176,7 @@ pub enum DeletableProperty {
|
|||||||
schema: DRIVE_NAME_SCHEMA,
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
},
|
},
|
||||||
path: {
|
path: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
changer: {
|
changer: {
|
||||||
@ -225,7 +225,7 @@ pub fn update_drive(
|
|||||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut data: LinuxTapeDrive = config.lookup("linux", &name)?;
|
let mut data: LtoTapeDrive = config.lookup("lto", &name)?;
|
||||||
|
|
||||||
if let Some(delete) = delete {
|
if let Some(delete) = delete {
|
||||||
for delete_prop in delete {
|
for delete_prop in delete {
|
||||||
@ -240,8 +240,8 @@ pub fn update_drive(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(path) = path {
|
if let Some(path) = path {
|
||||||
let linux_drives = linux_tape_device_list();
|
let lto_drives = lto_tape_device_list();
|
||||||
check_drive_path(&linux_drives, &path)?;
|
check_drive_path(<o_drives, &path)?;
|
||||||
data.path = path;
|
data.path = path;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -261,7 +261,7 @@ pub fn update_drive(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
config.set_data(&name, "linux", &data)?;
|
config.set_data(&name, "lto", &data)?;
|
||||||
|
|
||||||
config::drive::save_config(&config)?;
|
config::drive::save_config(&config)?;
|
||||||
|
|
||||||
@ -290,8 +290,8 @@ pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
match config.sections.get(&name) {
|
match config.sections.get(&name) {
|
||||||
Some((section_type, _)) => {
|
Some((section_type, _)) => {
|
||||||
if section_type != "linux" {
|
if section_type != "lto" {
|
||||||
bail!("Entry '{}' exists, but is not a linux tape drive", name);
|
bail!("Entry '{}' exists, but is not a lto tape drive", name);
|
||||||
}
|
}
|
||||||
config.sections.remove(&name);
|
config.sections.remove(&name);
|
||||||
},
|
},
|
||||||
|
@ -333,6 +333,7 @@ pub fn update_sync_job(
|
|||||||
if let Some(remote_store) = remote_store { data.remote_store = remote_store; }
|
if let Some(remote_store) = remote_store { data.remote_store = remote_store; }
|
||||||
if let Some(owner) = owner { data.owner = Some(owner); }
|
if let Some(owner) = owner { data.owner = Some(owner); }
|
||||||
|
|
||||||
|
let schedule_changed = data.schedule != schedule;
|
||||||
if schedule.is_some() { data.schedule = schedule; }
|
if schedule.is_some() { data.schedule = schedule; }
|
||||||
if remove_vanished.is_some() { data.remove_vanished = remove_vanished; }
|
if remove_vanished.is_some() { data.remove_vanished = remove_vanished; }
|
||||||
|
|
||||||
@ -344,6 +345,10 @@ pub fn update_sync_job(
|
|||||||
|
|
||||||
sync::save_config(&config)?;
|
sync::save_config(&config)?;
|
||||||
|
|
||||||
|
if schedule_changed {
|
||||||
|
crate::server::jobstate::update_job_last_run_time("syncjob", &id)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -266,6 +266,7 @@ pub fn update_tape_backup_job(
|
|||||||
if latest_only.is_some() { data.setup.latest_only = latest_only; }
|
if latest_only.is_some() { data.setup.latest_only = latest_only; }
|
||||||
if notify_user.is_some() { data.setup.notify_user = notify_user; }
|
if notify_user.is_some() { data.setup.notify_user = notify_user; }
|
||||||
|
|
||||||
|
let schedule_changed = data.schedule != schedule;
|
||||||
if schedule.is_some() { data.schedule = schedule; }
|
if schedule.is_some() { data.schedule = schedule; }
|
||||||
|
|
||||||
if let Some(comment) = comment {
|
if let Some(comment) = comment {
|
||||||
@ -281,6 +282,10 @@ pub fn update_tape_backup_job(
|
|||||||
|
|
||||||
config::tape_job::save_config(&config)?;
|
config::tape_job::save_config(&config)?;
|
||||||
|
|
||||||
|
if schedule_changed {
|
||||||
|
crate::server::jobstate::update_job_last_run_time("tape-backup-job", &id)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -274,12 +274,17 @@ pub fn update_verification_job(
|
|||||||
|
|
||||||
if ignore_verified.is_some() { data.ignore_verified = ignore_verified; }
|
if ignore_verified.is_some() { data.ignore_verified = ignore_verified; }
|
||||||
if outdated_after.is_some() { data.outdated_after = outdated_after; }
|
if outdated_after.is_some() { data.outdated_after = outdated_after; }
|
||||||
|
let schedule_changed = data.schedule != schedule;
|
||||||
if schedule.is_some() { data.schedule = schedule; }
|
if schedule.is_some() { data.schedule = schedule; }
|
||||||
|
|
||||||
config.set_data(&id, "verification", &data)?;
|
config.set_data(&id, "verification", &data)?;
|
||||||
|
|
||||||
verify::save_config(&config)?;
|
verify::save_config(&config)?;
|
||||||
|
|
||||||
|
if schedule_changed {
|
||||||
|
crate::server::jobstate::update_job_last_run_time("verificationjob", &id)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ pub fn list_dir_content<R: Read + Seek>(
|
|||||||
let mut components = path.clone();
|
let mut components = path.clone();
|
||||||
components.push(b'/');
|
components.push(b'/');
|
||||||
components.extend(&direntry.name);
|
components.extend(&direntry.name);
|
||||||
let mut entry = ArchiveEntry::new(&components, &direntry.attr);
|
let mut entry = ArchiveEntry::new(&components, Some(&direntry.attr));
|
||||||
if let DirEntryAttribute::File { size, mtime } = direntry.attr {
|
if let DirEntryAttribute::File { size, mtime } = direntry.attr {
|
||||||
entry.size = size.into();
|
entry.size = size.into();
|
||||||
entry.mtime = mtime.into();
|
entry.mtime = mtime.into();
|
||||||
|
@ -17,7 +17,7 @@ use proxmox::api::{
|
|||||||
api, schema::*, ApiHandler, ApiMethod, ApiResponseFuture, Permission, RpcEnvironment,
|
api, schema::*, ApiHandler, ApiMethod, ApiResponseFuture, Permission, RpcEnvironment,
|
||||||
};
|
};
|
||||||
use proxmox::list_subdirs_api_method;
|
use proxmox::list_subdirs_api_method;
|
||||||
use proxmox::tools::websocket::WebSocket;
|
use proxmox_http::websocket::WebSocket;
|
||||||
use proxmox::{identity, sortable};
|
use proxmox::{identity, sortable};
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
@ -27,6 +27,8 @@ use crate::tools;
|
|||||||
use crate::tools::ticket::{self, Empty, Ticket};
|
use crate::tools::ticket::{self, Empty, Ticket};
|
||||||
|
|
||||||
pub mod apt;
|
pub mod apt;
|
||||||
|
pub mod certificates;
|
||||||
|
pub mod config;
|
||||||
pub mod disks;
|
pub mod disks;
|
||||||
pub mod dns;
|
pub mod dns;
|
||||||
pub mod network;
|
pub mod network;
|
||||||
@ -314,6 +316,8 @@ fn upgrade_to_websocket(
|
|||||||
|
|
||||||
pub const SUBDIRS: SubdirMap = &[
|
pub const SUBDIRS: SubdirMap = &[
|
||||||
("apt", &apt::ROUTER),
|
("apt", &apt::ROUTER),
|
||||||
|
("certificates", &certificates::ROUTER),
|
||||||
|
("config", &config::ROUTER),
|
||||||
("disks", &disks::ROUTER),
|
("disks", &disks::ROUTER),
|
||||||
("dns", &dns::ROUTER),
|
("dns", &dns::ROUTER),
|
||||||
("journal", &journal::ROUTER),
|
("journal", &journal::ROUTER),
|
||||||
|
@ -5,10 +5,17 @@ use std::collections::HashMap;
|
|||||||
use proxmox::list_subdirs_api_method;
|
use proxmox::list_subdirs_api_method;
|
||||||
use proxmox::api::{api, RpcEnvironment, RpcEnvironmentType, Permission};
|
use proxmox::api::{api, RpcEnvironment, RpcEnvironmentType, Permission};
|
||||||
use proxmox::api::router::{Router, SubdirMap};
|
use proxmox::api::router::{Router, SubdirMap};
|
||||||
|
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||||
|
|
||||||
|
use proxmox_http::ProxyConfig;
|
||||||
|
|
||||||
|
use crate::config::node;
|
||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
use crate::tools::{apt, http, subscription};
|
use crate::tools::{
|
||||||
|
apt,
|
||||||
|
pbs_simple_http,
|
||||||
|
subscription,
|
||||||
|
};
|
||||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||||
use crate::api2::types::{Authid, APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
|
use crate::api2::types::{Authid, APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
|
||||||
|
|
||||||
@ -46,10 +53,38 @@ fn apt_update_available(_param: Value) -> Result<Value, Error> {
|
|||||||
Ok(json!(cache.package_status))
|
Ok(json!(cache.package_status))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn update_apt_proxy_config(proxy_config: Option<&ProxyConfig>) -> Result<(), Error> {
|
||||||
|
|
||||||
|
const PROXY_CFG_FN: &str = "/etc/apt/apt.conf.d/76pveproxy"; // use same file as PVE
|
||||||
|
|
||||||
|
if let Some(proxy_config) = proxy_config {
|
||||||
|
let proxy = proxy_config.to_proxy_string()?;
|
||||||
|
let data = format!("Acquire::http::Proxy \"{}\";\n", proxy);
|
||||||
|
replace_file(PROXY_CFG_FN, data.as_bytes(), CreateOptions::new())
|
||||||
|
} else {
|
||||||
|
match std::fs::remove_file(PROXY_CFG_FN) {
|
||||||
|
Ok(()) => Ok(()),
|
||||||
|
Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(()),
|
||||||
|
Err(err) => bail!("failed to remove proxy config '{}' - {}", PROXY_CFG_FN, err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_and_update_proxy_config() -> Result<Option<ProxyConfig>, Error> {
|
||||||
|
let proxy_config = if let Ok((node_config, _digest)) = node::config() {
|
||||||
|
node_config.http_proxy()
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
update_apt_proxy_config(proxy_config.as_ref())?;
|
||||||
|
|
||||||
|
Ok(proxy_config)
|
||||||
|
}
|
||||||
|
|
||||||
fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
|
fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
|
||||||
if !quiet { worker.log("starting apt-get update") }
|
if !quiet { worker.log("starting apt-get update") }
|
||||||
|
|
||||||
// TODO: set proxy /etc/apt/apt.conf.d/76pbsproxy like PVE
|
read_and_update_proxy_config()?;
|
||||||
|
|
||||||
let mut command = std::process::Command::new("apt-get");
|
let mut command = std::process::Command::new("apt-get");
|
||||||
command.arg("update");
|
command.arg("update");
|
||||||
@ -152,6 +187,7 @@ pub fn apt_update_database(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
|
protected: true,
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
node: {
|
node: {
|
||||||
@ -194,10 +230,13 @@ fn apt_get_changelog(
|
|||||||
bail!("Package '{}' not found", name);
|
bail!("Package '{}' not found", name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let proxy_config = read_and_update_proxy_config()?;
|
||||||
|
let mut client = pbs_simple_http(proxy_config);
|
||||||
|
|
||||||
let changelog_url = &pkg_info[0].change_log_url;
|
let changelog_url = &pkg_info[0].change_log_url;
|
||||||
// FIXME: use 'apt-get changelog' for proxmox packages as well, once repo supports it
|
// FIXME: use 'apt-get changelog' for proxmox packages as well, once repo supports it
|
||||||
if changelog_url.starts_with("http://download.proxmox.com/") {
|
if changelog_url.starts_with("http://download.proxmox.com/") {
|
||||||
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, None))
|
let changelog = crate::tools::runtime::block_on(client.get_string(changelog_url, None))
|
||||||
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
||||||
Ok(json!(changelog))
|
Ok(json!(changelog))
|
||||||
|
|
||||||
@ -221,7 +260,7 @@ fn apt_get_changelog(
|
|||||||
auth_header.insert("Authorization".to_owned(),
|
auth_header.insert("Authorization".to_owned(),
|
||||||
format!("Basic {}", base64::encode(format!("{}:{}", key, id))));
|
format!("Basic {}", base64::encode(format!("{}:{}", key, id))));
|
||||||
|
|
||||||
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, Some(&auth_header)))
|
let changelog = crate::tools::runtime::block_on(client.get_string(changelog_url, Some(&auth_header)))
|
||||||
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
||||||
Ok(json!(changelog))
|
Ok(json!(changelog))
|
||||||
|
|
||||||
|
579
src/api2/node/certificates.rs
Normal file
@ -0,0 +1,579 @@
|
|||||||
|
use std::convert::TryFrom;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use openssl::pkey::PKey;
|
||||||
|
use openssl::x509::X509;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::router::SubdirMap;
|
||||||
|
use proxmox::api::{api, Permission, Router, RpcEnvironment};
|
||||||
|
use proxmox::list_subdirs_api_method;
|
||||||
|
|
||||||
|
use crate::acme::AcmeClient;
|
||||||
|
use crate::api2::types::Authid;
|
||||||
|
use crate::api2::types::NODE_SCHEMA;
|
||||||
|
use crate::api2::types::AcmeDomain;
|
||||||
|
use crate::config::acl::PRIV_SYS_MODIFY;
|
||||||
|
use crate::config::node::NodeConfig;
|
||||||
|
use crate::server::WorkerTask;
|
||||||
|
use crate::tools::cert;
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||||
|
.subdirs(SUBDIRS);
|
||||||
|
|
||||||
|
const SUBDIRS: SubdirMap = &[
|
||||||
|
("acme", &ACME_ROUTER),
|
||||||
|
(
|
||||||
|
"custom",
|
||||||
|
&Router::new()
|
||||||
|
.post(&API_METHOD_UPLOAD_CUSTOM_CERTIFICATE)
|
||||||
|
.delete(&API_METHOD_DELETE_CUSTOM_CERTIFICATE),
|
||||||
|
),
|
||||||
|
("info", &Router::new().get(&API_METHOD_GET_INFO)),
|
||||||
|
];
|
||||||
|
|
||||||
|
const ACME_ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(ACME_SUBDIRS))
|
||||||
|
.subdirs(ACME_SUBDIRS);
|
||||||
|
|
||||||
|
const ACME_SUBDIRS: SubdirMap = &[(
|
||||||
|
"certificate",
|
||||||
|
&Router::new()
|
||||||
|
.post(&API_METHOD_NEW_ACME_CERT)
|
||||||
|
.put(&API_METHOD_RENEW_ACME_CERT),
|
||||||
|
)];
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
san: {
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
description: "A SubjectAlternateName entry.",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Certificate information.
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct CertificateInfo {
|
||||||
|
/// Certificate file name.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
filename: Option<String>,
|
||||||
|
|
||||||
|
/// Certificate subject name.
|
||||||
|
subject: String,
|
||||||
|
|
||||||
|
/// List of certificate's SubjectAlternativeName entries.
|
||||||
|
san: Vec<String>,
|
||||||
|
|
||||||
|
/// Certificate issuer name.
|
||||||
|
issuer: String,
|
||||||
|
|
||||||
|
/// Certificate's notBefore timestamp (UNIX epoch).
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
notbefore: Option<i64>,
|
||||||
|
|
||||||
|
/// Certificate's notAfter timestamp (UNIX epoch).
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
notafter: Option<i64>,
|
||||||
|
|
||||||
|
/// Certificate in PEM format.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pem: Option<String>,
|
||||||
|
|
||||||
|
/// Certificate's public key algorithm.
|
||||||
|
public_key_type: String,
|
||||||
|
|
||||||
|
/// Certificate's public key size if available.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
public_key_bits: Option<u32>,
|
||||||
|
|
||||||
|
/// The SSL Fingerprint.
|
||||||
|
fingerprint: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<&cert::CertInfo> for CertificateInfo {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn try_from(info: &cert::CertInfo) -> Result<Self, Self::Error> {
|
||||||
|
let pubkey = info.public_key()?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
filename: None,
|
||||||
|
subject: info.subject_name()?,
|
||||||
|
san: info
|
||||||
|
.subject_alt_names()
|
||||||
|
.map(|san| {
|
||||||
|
san.into_iter()
|
||||||
|
// FIXME: Support `.ipaddress()`?
|
||||||
|
.filter_map(|name| name.dnsname().map(str::to_owned))
|
||||||
|
.collect()
|
||||||
|
})
|
||||||
|
.unwrap_or_default(),
|
||||||
|
issuer: info.issuer_name()?,
|
||||||
|
notbefore: info.not_before_unix().ok(),
|
||||||
|
notafter: info.not_after_unix().ok(),
|
||||||
|
pem: None,
|
||||||
|
public_key_type: openssl::nid::Nid::from_raw(pubkey.id().as_raw())
|
||||||
|
.long_name()
|
||||||
|
.unwrap_or("<unsupported key type>")
|
||||||
|
.to_owned(),
|
||||||
|
public_key_bits: Some(pubkey.bits()),
|
||||||
|
fingerprint: Some(info.fingerprint()?),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_certificate_pem() -> Result<String, Error> {
|
||||||
|
let cert_path = configdir!("/proxy.pem");
|
||||||
|
let cert_pem = proxmox::tools::fs::file_get_contents(&cert_path)?;
|
||||||
|
String::from_utf8(cert_pem)
|
||||||
|
.map_err(|_| format_err!("certificate in {:?} is not a valid PEM file", cert_path))
|
||||||
|
}
|
||||||
|
|
||||||
|
// to deduplicate error messages
|
||||||
|
fn pem_to_cert_info(pem: &[u8]) -> Result<cert::CertInfo, Error> {
|
||||||
|
cert::CertInfo::from_pem(pem)
|
||||||
|
.map_err(|err| format_err!("error loading proxy certificate: {}", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: { schema: NODE_SCHEMA },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: Array,
|
||||||
|
items: { type: CertificateInfo },
|
||||||
|
description: "List of certificate infos.",
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get certificate info.
|
||||||
|
pub fn get_info() -> Result<Vec<CertificateInfo>, Error> {
|
||||||
|
let cert_pem = get_certificate_pem()?;
|
||||||
|
let cert = pem_to_cert_info(cert_pem.as_bytes())?;
|
||||||
|
|
||||||
|
Ok(vec![CertificateInfo {
|
||||||
|
filename: Some("proxy.pem".to_string()), // we only have the one
|
||||||
|
pem: Some(cert_pem),
|
||||||
|
..CertificateInfo::try_from(&cert)?
|
||||||
|
}])
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: { schema: NODE_SCHEMA },
|
||||||
|
certificates: { description: "PEM encoded certificate (chain)." },
|
||||||
|
key: { description: "PEM encoded private key." },
|
||||||
|
// FIXME: widget-toolkit should have an option to disable using these 2 parameters...
|
||||||
|
restart: {
|
||||||
|
description: "UI compatibility parameter, ignored",
|
||||||
|
type: Boolean,
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
force: {
|
||||||
|
description: "Force replacement of existing files.",
|
||||||
|
type: Boolean,
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: Array,
|
||||||
|
items: { type: CertificateInfo },
|
||||||
|
description: "List of certificate infos.",
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
)]
|
||||||
|
/// Upload a custom certificate.
|
||||||
|
pub async fn upload_custom_certificate(
|
||||||
|
certificates: String,
|
||||||
|
key: String,
|
||||||
|
) -> Result<Vec<CertificateInfo>, Error> {
|
||||||
|
let certificates = X509::stack_from_pem(certificates.as_bytes())
|
||||||
|
.map_err(|err| format_err!("failed to decode certificate chain: {}", err))?;
|
||||||
|
let key = PKey::private_key_from_pem(key.as_bytes())
|
||||||
|
.map_err(|err| format_err!("failed to parse private key: {}", err))?;
|
||||||
|
|
||||||
|
let certificates = certificates
|
||||||
|
.into_iter()
|
||||||
|
.try_fold(Vec::<u8>::new(), |mut stack, cert| -> Result<_, Error> {
|
||||||
|
if !stack.is_empty() {
|
||||||
|
stack.push(b'\n');
|
||||||
|
}
|
||||||
|
stack.extend(cert.to_pem()?);
|
||||||
|
Ok(stack)
|
||||||
|
})
|
||||||
|
.map_err(|err| format_err!("error formatting certificate chain as PEM: {}", err))?;
|
||||||
|
|
||||||
|
let key = key.private_key_to_pem_pkcs8()?;
|
||||||
|
|
||||||
|
crate::config::set_proxy_certificate(&certificates, &key)?;
|
||||||
|
crate::server::reload_proxy_certificate().await?;
|
||||||
|
|
||||||
|
get_info()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: { schema: NODE_SCHEMA },
|
||||||
|
restart: {
|
||||||
|
description: "UI compatibility parameter, ignored",
|
||||||
|
type: Boolean,
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
)]
|
||||||
|
/// Delete the current certificate and regenerate a self signed one.
|
||||||
|
pub async fn delete_custom_certificate() -> Result<(), Error> {
|
||||||
|
let cert_path = configdir!("/proxy.pem");
|
||||||
|
// Here we fail since if this fails nothing else breaks anyway
|
||||||
|
std::fs::remove_file(&cert_path)
|
||||||
|
.map_err(|err| format_err!("failed to unlink {:?} - {}", cert_path, err))?;
|
||||||
|
|
||||||
|
let key_path = configdir!("/proxy.key");
|
||||||
|
if let Err(err) = std::fs::remove_file(&key_path) {
|
||||||
|
// Here we just log since the certificate is already gone and we'd rather try to generate
|
||||||
|
// the self-signed certificate even if this fails:
|
||||||
|
log::error!(
|
||||||
|
"failed to remove certificate private key {:?} - {}",
|
||||||
|
key_path,
|
||||||
|
err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
crate::config::update_self_signed_cert(true)?;
|
||||||
|
crate::server::reload_proxy_certificate().await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
struct OrderedCertificate {
|
||||||
|
certificate: hyper::body::Bytes,
|
||||||
|
private_key_pem: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn order_certificate(
|
||||||
|
worker: Arc<WorkerTask>,
|
||||||
|
node_config: &NodeConfig,
|
||||||
|
) -> Result<Option<OrderedCertificate>, Error> {
|
||||||
|
use proxmox_acme_rs::authorization::Status;
|
||||||
|
use proxmox_acme_rs::order::Identifier;
|
||||||
|
|
||||||
|
let domains = node_config.acme_domains().try_fold(
|
||||||
|
Vec::<AcmeDomain>::new(),
|
||||||
|
|mut acc, domain| -> Result<_, Error> {
|
||||||
|
let mut domain = domain?;
|
||||||
|
domain.domain.make_ascii_lowercase();
|
||||||
|
if let Some(alias) = &mut domain.alias {
|
||||||
|
alias.make_ascii_lowercase();
|
||||||
|
}
|
||||||
|
acc.push(domain);
|
||||||
|
Ok(acc)
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let get_domain_config = |domain: &str| {
|
||||||
|
domains
|
||||||
|
.iter()
|
||||||
|
.find(|d| d.domain == domain)
|
||||||
|
.ok_or_else(|| format_err!("no config for domain '{}'", domain))
|
||||||
|
};
|
||||||
|
|
||||||
|
if domains.is_empty() {
|
||||||
|
worker.log("No domains configured to be ordered from an ACME server.");
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
let (plugins, _) = crate::config::acme::plugin::config()?;
|
||||||
|
|
||||||
|
let mut acme = node_config.acme_client().await?;
|
||||||
|
|
||||||
|
worker.log("Placing ACME order");
|
||||||
|
let order = acme
|
||||||
|
.new_order(domains.iter().map(|d| d.domain.to_ascii_lowercase()))
|
||||||
|
.await?;
|
||||||
|
worker.log(format!("Order URL: {}", order.location));
|
||||||
|
|
||||||
|
let identifiers: Vec<String> = order
|
||||||
|
.data
|
||||||
|
.identifiers
|
||||||
|
.iter()
|
||||||
|
.map(|identifier| match identifier {
|
||||||
|
Identifier::Dns(domain) => domain.clone(),
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
for auth_url in &order.data.authorizations {
|
||||||
|
worker.log(format!("Getting authorization details from '{}'", auth_url));
|
||||||
|
let mut auth = acme.get_authorization(&auth_url).await?;
|
||||||
|
|
||||||
|
let domain = match &mut auth.identifier {
|
||||||
|
Identifier::Dns(domain) => domain.to_ascii_lowercase(),
|
||||||
|
};
|
||||||
|
|
||||||
|
if auth.status == Status::Valid {
|
||||||
|
worker.log(format!("{} is already validated!", domain));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
worker.log(format!("The validation for {} is pending", domain));
|
||||||
|
let domain_config: &AcmeDomain = get_domain_config(&domain)?;
|
||||||
|
let plugin_id = domain_config.plugin.as_deref().unwrap_or("standalone");
|
||||||
|
let mut plugin_cfg =
|
||||||
|
crate::acme::get_acme_plugin(&plugins, plugin_id)?.ok_or_else(|| {
|
||||||
|
format_err!("plugin '{}' for domain '{}' not found!", plugin_id, domain)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
worker.log("Setting up validation plugin");
|
||||||
|
let validation_url = plugin_cfg
|
||||||
|
.setup(&mut acme, &auth, domain_config, Arc::clone(&worker))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let result = request_validation(&worker, &mut acme, auth_url, validation_url).await;
|
||||||
|
|
||||||
|
if let Err(err) = plugin_cfg
|
||||||
|
.teardown(&mut acme, &auth, domain_config, Arc::clone(&worker))
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
worker.warn(format!(
|
||||||
|
"Failed to teardown plugin '{}' for domain '{}' - {}",
|
||||||
|
plugin_id, domain, err
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let _: () = result?;
|
||||||
|
}
|
||||||
|
|
||||||
|
worker.log("All domains validated");
|
||||||
|
worker.log("Creating CSR");
|
||||||
|
|
||||||
|
let csr = proxmox_acme_rs::util::Csr::generate(&identifiers, &Default::default())?;
|
||||||
|
let mut finalize_error_cnt = 0u8;
|
||||||
|
let order_url = &order.location;
|
||||||
|
let mut order;
|
||||||
|
loop {
|
||||||
|
use proxmox_acme_rs::order::Status;
|
||||||
|
|
||||||
|
order = acme.get_order(order_url).await?;
|
||||||
|
|
||||||
|
match order.status {
|
||||||
|
Status::Pending => {
|
||||||
|
worker.log("still pending, trying to finalize anyway");
|
||||||
|
let finalize = order
|
||||||
|
.finalize
|
||||||
|
.as_deref()
|
||||||
|
.ok_or_else(|| format_err!("missing 'finalize' URL in order"))?;
|
||||||
|
if let Err(err) = acme.finalize(finalize, &csr.data).await {
|
||||||
|
if finalize_error_cnt >= 5 {
|
||||||
|
return Err(err.into());
|
||||||
|
}
|
||||||
|
|
||||||
|
finalize_error_cnt += 1;
|
||||||
|
}
|
||||||
|
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||||
|
}
|
||||||
|
Status::Ready => {
|
||||||
|
worker.log("order is ready, finalizing");
|
||||||
|
let finalize = order
|
||||||
|
.finalize
|
||||||
|
.as_deref()
|
||||||
|
.ok_or_else(|| format_err!("missing 'finalize' URL in order"))?;
|
||||||
|
acme.finalize(finalize, &csr.data).await?;
|
||||||
|
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||||
|
}
|
||||||
|
Status::Processing => {
|
||||||
|
worker.log("still processing, trying again in 30 seconds");
|
||||||
|
tokio::time::sleep(Duration::from_secs(30)).await;
|
||||||
|
}
|
||||||
|
Status::Valid => {
|
||||||
|
worker.log("valid");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
other => bail!("order status: {:?}", other),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
worker.log("Downloading certificate");
|
||||||
|
let certificate = acme
|
||||||
|
.get_certificate(
|
||||||
|
order
|
||||||
|
.certificate
|
||||||
|
.as_deref()
|
||||||
|
.ok_or_else(|| format_err!("missing certificate url in finalized order"))?,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(Some(OrderedCertificate {
|
||||||
|
certificate,
|
||||||
|
private_key_pem: csr.private_key_pem,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn request_validation(
|
||||||
|
worker: &WorkerTask,
|
||||||
|
acme: &mut AcmeClient,
|
||||||
|
auth_url: &str,
|
||||||
|
validation_url: &str,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
worker.log("Triggering validation");
|
||||||
|
acme.request_challenge_validation(&validation_url).await?;
|
||||||
|
|
||||||
|
worker.log("Sleeping for 5 seconds");
|
||||||
|
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
use proxmox_acme_rs::authorization::Status;
|
||||||
|
|
||||||
|
let auth = acme.get_authorization(&auth_url).await?;
|
||||||
|
match auth.status {
|
||||||
|
Status::Pending => {
|
||||||
|
worker.log("Status is still 'pending', trying again in 10 seconds");
|
||||||
|
tokio::time::sleep(Duration::from_secs(10)).await;
|
||||||
|
}
|
||||||
|
Status::Valid => return Ok(()),
|
||||||
|
other => bail!(
|
||||||
|
"validating challenge '{}' failed - status: {:?}",
|
||||||
|
validation_url,
|
||||||
|
other
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: { schema: NODE_SCHEMA },
|
||||||
|
force: {
|
||||||
|
description: "Force replacement of existing files.",
|
||||||
|
type: Boolean,
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
)]
|
||||||
|
/// Order a new ACME certificate.
|
||||||
|
pub fn new_acme_cert(force: bool, rpcenv: &mut dyn RpcEnvironment) -> Result<String, Error> {
|
||||||
|
spawn_certificate_worker("acme-new-cert", force, rpcenv)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: { schema: NODE_SCHEMA },
|
||||||
|
force: {
|
||||||
|
description: "Force replacement of existing files.",
|
||||||
|
type: Boolean,
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
)]
|
||||||
|
/// Renew the current ACME certificate if it expires within 30 days (or always if the `force`
|
||||||
|
/// parameter is set).
|
||||||
|
pub fn renew_acme_cert(force: bool, rpcenv: &mut dyn RpcEnvironment) -> Result<String, Error> {
|
||||||
|
if !cert_expires_soon()? && !force {
|
||||||
|
bail!("Certificate does not expire within the next 30 days and 'force' is not set.")
|
||||||
|
}
|
||||||
|
|
||||||
|
spawn_certificate_worker("acme-renew-cert", force, rpcenv)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check whether the current certificate expires within the next 30 days.
|
||||||
|
pub fn cert_expires_soon() -> Result<bool, Error> {
|
||||||
|
let cert = pem_to_cert_info(get_certificate_pem()?.as_bytes())?;
|
||||||
|
cert.is_expired_after_epoch(proxmox::tools::time::epoch_i64() + 30 * 24 * 60 * 60)
|
||||||
|
.map_err(|err| format_err!("Failed to check certificate expiration date: {}", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn spawn_certificate_worker(
|
||||||
|
name: &'static str,
|
||||||
|
force: bool,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
// We only have 1 certificate path in PBS which makes figuring out whether or not it is a
|
||||||
|
// custom one too hard... We keep the parameter because the widget-toolkit may be using it...
|
||||||
|
let _ = force;
|
||||||
|
|
||||||
|
let (node_config, _digest) = crate::config::node::config()?;
|
||||||
|
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
WorkerTask::spawn(name, None, auth_id, true, move |worker| async move {
|
||||||
|
if let Some(cert) = order_certificate(worker, &node_config).await? {
|
||||||
|
crate::config::set_proxy_certificate(&cert.certificate, &cert.private_key_pem)?;
|
||||||
|
crate::server::reload_proxy_certificate().await?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: { schema: NODE_SCHEMA },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system", "certificates"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
)]
|
||||||
|
/// Renew the current ACME certificate if it expires within 30 days (or always if the `force`
|
||||||
|
/// parameter is set).
|
||||||
|
pub fn revoke_acme_cert(rpcenv: &mut dyn RpcEnvironment) -> Result<String, Error> {
|
||||||
|
let (node_config, _digest) = crate::config::node::config()?;
|
||||||
|
|
||||||
|
let cert_pem = get_certificate_pem()?;
|
||||||
|
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
WorkerTask::spawn(
|
||||||
|
"acme-revoke-cert",
|
||||||
|
None,
|
||||||
|
auth_id,
|
||||||
|
true,
|
||||||
|
move |worker| async move {
|
||||||
|
worker.log("Loading ACME account");
|
||||||
|
let mut acme = node_config.acme_client().await?;
|
||||||
|
worker.log("Revoking old certificate");
|
||||||
|
acme.revoke_certificate(cert_pem.as_bytes(), None).await?;
|
||||||
|
worker.log("Deleting certificate and regenerating a self-signed one");
|
||||||
|
delete_custom_certificate().await?;
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
87
src/api2/node/config.rs
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
use anyhow::Error;
|
||||||
|
|
||||||
|
use proxmox::api::schema::Updatable;
|
||||||
|
use proxmox::api::{api, Permission, Router, RpcEnvironment};
|
||||||
|
|
||||||
|
use crate::api2::types::NODE_SCHEMA;
|
||||||
|
use crate::api2::node::apt::update_apt_proxy_config;
|
||||||
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||||
|
use crate::config::node::{NodeConfig, NodeConfigUpdater};
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_GET_NODE_CONFIG)
|
||||||
|
.put(&API_METHOD_UPDATE_NODE_CONFIG);
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: { schema: NODE_SCHEMA },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system"], PRIV_SYS_AUDIT, false),
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: NodeConfig,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get the node configuration
|
||||||
|
pub fn get_node_config(mut rpcenv: &mut dyn RpcEnvironment) -> Result<NodeConfig, Error> {
|
||||||
|
let (config, digest) = crate::config::node::config()?;
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
Ok(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
node: { schema: NODE_SCHEMA },
|
||||||
|
digest: {
|
||||||
|
description: "Digest to protect against concurrent updates",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
updater: {
|
||||||
|
type: NodeConfigUpdater,
|
||||||
|
flatten: true,
|
||||||
|
},
|
||||||
|
delete: {
|
||||||
|
description: "Options to remove from the configuration",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["system"], PRIV_SYS_MODIFY, false),
|
||||||
|
},
|
||||||
|
protected: true,
|
||||||
|
)]
|
||||||
|
/// Update the node configuration
|
||||||
|
pub fn update_node_config(
|
||||||
|
updater: NodeConfigUpdater,
|
||||||
|
delete: Option<String>,
|
||||||
|
digest: Option<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let _lock = crate::config::node::lock()?;
|
||||||
|
let (mut config, expected_digest) = crate::config::node::config()?;
|
||||||
|
if let Some(digest) = digest {
|
||||||
|
// FIXME: GUI doesn't handle our non-inlined digest part here properly...
|
||||||
|
if !digest.is_empty() {
|
||||||
|
let digest = proxmox::tools::hex_to_digest(&digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let delete: Vec<&str> = delete
|
||||||
|
.as_deref()
|
||||||
|
.unwrap_or("")
|
||||||
|
.split(&[' ', ',', ';', '\0'][..])
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
config.update_from(updater, &delete)?;
|
||||||
|
|
||||||
|
crate::config::node::save_config(&config)?;
|
||||||
|
|
||||||
|
update_apt_proxy_config(config.http_proxy().as_ref())?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -5,6 +5,7 @@ use ::serde::{Deserialize, Serialize};
|
|||||||
use proxmox::api::{api, Permission, RpcEnvironment, RpcEnvironmentType};
|
use proxmox::api::{api, Permission, RpcEnvironment, RpcEnvironmentType};
|
||||||
use proxmox::api::section_config::SectionConfigData;
|
use proxmox::api::section_config::SectionConfigData;
|
||||||
use proxmox::api::router::Router;
|
use proxmox::api::router::Router;
|
||||||
|
use proxmox::tools::fs::open_file_locked;
|
||||||
|
|
||||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
|
||||||
use crate::tools::disks::{
|
use crate::tools::disks::{
|
||||||
@ -16,7 +17,7 @@ use crate::tools::systemd::{self, types::*};
|
|||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::config::datastore::DataStoreConfig;
|
use crate::config::datastore::{self, DataStoreConfig};
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
@ -179,7 +180,17 @@ pub fn create_datastore_disk(
|
|||||||
systemd::start_unit(&mount_unit_name)?;
|
systemd::start_unit(&mount_unit_name)?;
|
||||||
|
|
||||||
if add_datastore {
|
if add_datastore {
|
||||||
crate::api2::config::datastore::create_datastore(json!({ "name": name, "path": mount_point }))?
|
let lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
let datastore: DataStoreConfig =
|
||||||
|
serde_json::from_value(json!({ "name": name, "path": mount_point }))?;
|
||||||
|
|
||||||
|
let (config, _digest) = datastore::config()?;
|
||||||
|
|
||||||
|
if config.sections.get(&datastore.name).is_some() {
|
||||||
|
bail!("datastore '{}' already exists.", datastore.name);
|
||||||
|
}
|
||||||
|
|
||||||
|
crate::api2::config::datastore::do_create_datastore(lock, config, datastore, Some(&worker))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -20,6 +20,7 @@ use crate::tools::disks::{
|
|||||||
zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree,
|
zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree,
|
||||||
DiskUsageType,
|
DiskUsageType,
|
||||||
};
|
};
|
||||||
|
use crate::config::datastore::{self, DataStoreConfig};
|
||||||
|
|
||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
|
|
||||||
@ -372,7 +373,17 @@ pub fn create_zpool(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if add_datastore {
|
if add_datastore {
|
||||||
crate::api2::config::datastore::create_datastore(json!({ "name": name, "path": mount_point }))?
|
let lock = datastore::lock_config()?;
|
||||||
|
let datastore: DataStoreConfig =
|
||||||
|
serde_json::from_value(json!({ "name": name, "path": mount_point }))?;
|
||||||
|
|
||||||
|
let (config, _digest) = datastore::config()?;
|
||||||
|
|
||||||
|
if config.sections.get(&datastore.name).is_some() {
|
||||||
|
bail!("datastore '{}' already exists.", datastore.name);
|
||||||
|
}
|
||||||
|
|
||||||
|
crate::api2::config::datastore::do_create_datastore(lock, config, datastore, Some(&worker))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -60,36 +60,41 @@ use crate::config::acl::PRIV_SYS_AUDIT;
|
|||||||
)]
|
)]
|
||||||
/// Read syslog entries.
|
/// Read syslog entries.
|
||||||
fn get_journal(
|
fn get_journal(
|
||||||
param: Value,
|
since: Option<i64>,
|
||||||
|
until: Option<i64>,
|
||||||
|
lastentries: Option<u64>,
|
||||||
|
startcursor: Option<String>,
|
||||||
|
endcursor: Option<String>,
|
||||||
|
_param: Value,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let mut args = vec![];
|
let mut args = vec![];
|
||||||
|
|
||||||
if let Some(lastentries) = param["lastentries"].as_u64() {
|
if let Some(lastentries) = lastentries {
|
||||||
args.push(String::from("-n"));
|
args.push(String::from("-n"));
|
||||||
args.push(format!("{}", lastentries));
|
args.push(format!("{}", lastentries));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(since) = param["since"].as_str() {
|
if let Some(since) = since {
|
||||||
args.push(String::from("-b"));
|
args.push(String::from("-b"));
|
||||||
args.push(since.to_owned());
|
args.push(since.to_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(until) = param["until"].as_str() {
|
if let Some(until) = until {
|
||||||
args.push(String::from("-e"));
|
args.push(String::from("-e"));
|
||||||
args.push(until.to_owned());
|
args.push(until.to_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(startcursor) = param["startcursor"].as_str() {
|
if let Some(startcursor) = startcursor {
|
||||||
args.push(String::from("-f"));
|
args.push(String::from("-f"));
|
||||||
args.push(startcursor.to_owned());
|
args.push(startcursor);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(endcursor) = param["endcursor"].as_str() {
|
if let Some(endcursor) = endcursor {
|
||||||
args.push(String::from("-t"));
|
args.push(String::from("-t"));
|
||||||
args.push(endcursor.to_owned());
|
args.push(endcursor);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut lines: Vec<String> = vec![];
|
let mut lines: Vec<String> = vec![];
|
||||||
|
@ -2,7 +2,7 @@ use std::process::Command;
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use anyhow::{Error, format_err, bail};
|
use anyhow::{Error, format_err, bail};
|
||||||
use serde_json::{json, Value};
|
use serde_json::Value;
|
||||||
|
|
||||||
use proxmox::sys::linux::procfs;
|
use proxmox::sys::linux::procfs;
|
||||||
|
|
||||||
@ -12,6 +12,16 @@ use crate::api2::types::*;
|
|||||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
|
||||||
use crate::tools::cert::CertInfo;
|
use crate::tools::cert::CertInfo;
|
||||||
|
|
||||||
|
impl std::convert::From<procfs::ProcFsCPUInfo> for NodeCpuInformation {
|
||||||
|
fn from(info: procfs::ProcFsCPUInfo) -> Self {
|
||||||
|
Self {
|
||||||
|
model: info.model,
|
||||||
|
sockets: info.sockets,
|
||||||
|
cpus: info.cpus,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -21,43 +31,7 @@ use crate::tools::cert::CertInfo;
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
type: Object,
|
type: NodeStatus,
|
||||||
description: "Returns node memory, CPU and (root) disk usage",
|
|
||||||
properties: {
|
|
||||||
memory: {
|
|
||||||
type: Object,
|
|
||||||
description: "node memory usage counters",
|
|
||||||
properties: {
|
|
||||||
total: {
|
|
||||||
description: "total memory",
|
|
||||||
type: Integer,
|
|
||||||
},
|
|
||||||
used: {
|
|
||||||
description: "total memory",
|
|
||||||
type: Integer,
|
|
||||||
},
|
|
||||||
free: {
|
|
||||||
description: "free memory",
|
|
||||||
type: Integer,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
cpu: {
|
|
||||||
type: Number,
|
|
||||||
description: "Total CPU usage since last query.",
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
info: {
|
|
||||||
type: Object,
|
|
||||||
description: "contains node information",
|
|
||||||
properties: {
|
|
||||||
fingerprint: {
|
|
||||||
description: "The SSL Fingerprint",
|
|
||||||
type: String,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_AUDIT, false),
|
permission: &Permission::Privilege(&["system", "status"], PRIV_SYS_AUDIT, false),
|
||||||
@ -68,32 +42,52 @@ fn get_status(
|
|||||||
_param: Value,
|
_param: Value,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<NodeStatus, Error> {
|
||||||
|
|
||||||
let meminfo: procfs::ProcFsMemInfo = procfs::read_meminfo()?;
|
let meminfo: procfs::ProcFsMemInfo = procfs::read_meminfo()?;
|
||||||
|
let memory = NodeMemoryCounters {
|
||||||
|
total: meminfo.memtotal,
|
||||||
|
used: meminfo.memused,
|
||||||
|
free: meminfo.memfree,
|
||||||
|
};
|
||||||
|
|
||||||
|
let swap = NodeSwapCounters {
|
||||||
|
total: meminfo.swaptotal,
|
||||||
|
used: meminfo.swapused,
|
||||||
|
free: meminfo.swapfree,
|
||||||
|
};
|
||||||
|
|
||||||
let kstat: procfs::ProcFsStat = procfs::read_proc_stat()?;
|
let kstat: procfs::ProcFsStat = procfs::read_proc_stat()?;
|
||||||
let disk_usage = crate::tools::disks::disk_usage(Path::new("/"))?;
|
let cpu = kstat.cpu;
|
||||||
|
let wait = kstat.iowait_percent;
|
||||||
|
|
||||||
// get fingerprint
|
let loadavg = procfs::Loadavg::read()?;
|
||||||
let cert = CertInfo::new()?;
|
let loadavg = [loadavg.one(), loadavg.five(), loadavg.fifteen()];
|
||||||
let fp = cert.fingerprint()?;
|
|
||||||
|
|
||||||
Ok(json!({
|
let cpuinfo = procfs::read_cpuinfo()?;
|
||||||
"memory": {
|
let cpuinfo = cpuinfo.into();
|
||||||
"total": meminfo.memtotal,
|
|
||||||
"used": meminfo.memused,
|
let uname = nix::sys::utsname::uname();
|
||||||
"free": meminfo.memfree,
|
let kversion = format!(
|
||||||
|
"{} {} {}",
|
||||||
|
uname.sysname(),
|
||||||
|
uname.release(),
|
||||||
|
uname.version()
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(NodeStatus {
|
||||||
|
memory,
|
||||||
|
swap,
|
||||||
|
root: crate::tools::disks::disk_usage(Path::new("/"))?,
|
||||||
|
uptime: procfs::read_proc_uptime()?.0 as u64,
|
||||||
|
loadavg,
|
||||||
|
kversion,
|
||||||
|
cpuinfo,
|
||||||
|
cpu,
|
||||||
|
wait,
|
||||||
|
info: NodeInformation {
|
||||||
|
fingerprint: CertInfo::new()?.fingerprint()?,
|
||||||
},
|
},
|
||||||
"cpu": kstat.cpu,
|
})
|
||||||
"root": {
|
|
||||||
"total": disk_usage.total,
|
|
||||||
"used": disk_usage.used,
|
|
||||||
"free": disk_usage.avail,
|
|
||||||
},
|
|
||||||
"info": {
|
|
||||||
"fingerprint": fp,
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
|
@ -32,9 +32,6 @@ use crate::api2::types::{NODE_SCHEMA, SUBSCRIPTION_KEY_SCHEMA, Authid};
|
|||||||
pub fn check_subscription(
|
pub fn check_subscription(
|
||||||
force: bool,
|
force: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
// FIXME: drop once proxmox-api-macro is bumped to >> 5.0.0-1
|
|
||||||
let _remove_me = API_METHOD_CHECK_SUBSCRIPTION_PARAM_DEFAULT_FORCE;
|
|
||||||
|
|
||||||
let info = match subscription::read_subscription() {
|
let info = match subscription::read_subscription() {
|
||||||
Err(err) => bail!("could not read subscription status: {}", err),
|
Err(err) => bail!("could not read subscription status: {}", err),
|
||||||
Ok(Some(info)) => info,
|
Ok(Some(info)) => info,
|
||||||
|
@ -256,7 +256,7 @@ fn extract_upid(param: &Value) -> Result<UPID, Error> {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
description: "Users can access there own tasks, or need Sys.Audit on /system/tasks.",
|
description: "Users can access their own tasks, or need Sys.Audit on /system/tasks.",
|
||||||
permission: &Permission::Anybody,
|
permission: &Permission::Anybody,
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
@ -326,7 +326,7 @@ async fn read_task_log(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
description: "Users can stop there own tasks, or need Sys.Modify on /system/tasks.",
|
description: "Users can stop their own tasks, or need Sys.Modify on /system/tasks.",
|
||||||
permission: &Permission::Anybody,
|
permission: &Permission::Anybody,
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
@ -420,7 +420,7 @@ fn stop_task(
|
|||||||
items: { type: TaskListItem },
|
items: { type: TaskListItem },
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
description: "Users can only see there own tasks, unless the have Sys.Audit on /system/tasks.",
|
description: "Users can only see their own tasks, unless they have Sys.Audit on /system/tasks.",
|
||||||
permission: &Permission::Anybody,
|
permission: &Permission::Anybody,
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
|
@ -21,7 +21,7 @@ use crate::api2::types::{
|
|||||||
Authid,
|
Authid,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::backup::{DataStore};
|
use crate::backup::DataStore;
|
||||||
use crate::config::datastore;
|
use crate::config::datastore;
|
||||||
use crate::tools::statistics::{linear_regression};
|
use crate::tools::statistics::{linear_regression};
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
@ -55,6 +55,7 @@ use crate::config::acl::{
|
|||||||
},
|
},
|
||||||
history: {
|
history: {
|
||||||
type: Array,
|
type: Array,
|
||||||
|
optional: true,
|
||||||
description: "A list of usages of the past (last Month).",
|
description: "A list of usages of the past (last Month).",
|
||||||
items: {
|
items: {
|
||||||
type: Number,
|
type: Number,
|
||||||
@ -69,6 +70,11 @@ use crate::config::acl::{
|
|||||||
of RRD data of the last Month. Missing if there are not enough data points yet.\
|
of RRD data of the last Month. Missing if there are not enough data points yet.\
|
||||||
If the estimate lies in the past, the usage is decreasing.",
|
If the estimate lies in the past, the usage is decreasing.",
|
||||||
},
|
},
|
||||||
|
"error": {
|
||||||
|
type: String,
|
||||||
|
optional: true,
|
||||||
|
description: "An error description, for example, when the datastore could not be looked up.",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -97,7 +103,19 @@ pub fn datastore_status(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = match DataStore::lookup_datastore(&store) {
|
||||||
|
Ok(datastore) => datastore,
|
||||||
|
Err(err) => {
|
||||||
|
list.push(json!({
|
||||||
|
"store": store,
|
||||||
|
"total": -1,
|
||||||
|
"used": -1,
|
||||||
|
"avail": -1,
|
||||||
|
"error": err.to_string()
|
||||||
|
}));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
let status = crate::tools::disks::disk_usage(&datastore.base_path())?;
|
let status = crate::tools::disks::disk_usage(&datastore.base_path())?;
|
||||||
|
|
||||||
let mut entry = json!({
|
let mut entry = json!({
|
||||||
@ -110,24 +128,17 @@ pub fn datastore_status(
|
|||||||
|
|
||||||
let rrd_dir = format!("datastore/{}", store);
|
let rrd_dir = format!("datastore/{}", store);
|
||||||
let now = proxmox::tools::time::epoch_f64();
|
let now = proxmox::tools::time::epoch_f64();
|
||||||
let rrd_resolution = RRDTimeFrameResolution::Month;
|
|
||||||
let rrd_mode = RRDMode::Average;
|
|
||||||
|
|
||||||
let total_res = crate::rrd::extract_cached_data(
|
let get_rrd = |what: &str| crate::rrd::extract_cached_data(
|
||||||
&rrd_dir,
|
&rrd_dir,
|
||||||
"total",
|
what,
|
||||||
now,
|
now,
|
||||||
rrd_resolution,
|
RRDTimeFrameResolution::Month,
|
||||||
rrd_mode,
|
RRDMode::Average,
|
||||||
);
|
);
|
||||||
|
|
||||||
let used_res = crate::rrd::extract_cached_data(
|
let total_res = get_rrd("total");
|
||||||
&rrd_dir,
|
let used_res = get_rrd("used");
|
||||||
"used",
|
|
||||||
now,
|
|
||||||
rrd_resolution,
|
|
||||||
rrd_mode,
|
|
||||||
);
|
|
||||||
|
|
||||||
if let (Some((start, reso, total_list)), Some((_, _, used_list))) = (total_res, used_res) {
|
if let (Some((start, reso, total_list)), Some((_, _, used_list))) = (total_res, used_res) {
|
||||||
let mut usage_list: Vec<f64> = Vec::new();
|
let mut usage_list: Vec<f64> = Vec::new();
|
||||||
@ -160,13 +171,10 @@ pub fn datastore_status(
|
|||||||
|
|
||||||
// we skip the calculation for datastores with not enough data
|
// we skip the calculation for datastores with not enough data
|
||||||
if usage_list.len() >= 7 {
|
if usage_list.len() >= 7 {
|
||||||
entry["estimated-full-date"] = Value::from(0);
|
entry["estimated-full-date"] = match linear_regression(&time_list, &usage_list) {
|
||||||
if let Some((a,b)) = linear_regression(&time_list, &usage_list) {
|
Some((a, b)) if b != 0.0 => Value::from(((1.0 - a) / b).floor() as u64),
|
||||||
if b != 0.0 {
|
_ => Value::from(0),
|
||||||
let estimate = (1.0 - a) / b;
|
};
|
||||||
entry["estimated-full-date"] = Value::from(estimate.floor() as u64);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::{Mutex, Arc};
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use proxmox::{
|
use proxmox::{
|
||||||
|
try_block,
|
||||||
api::{
|
api::{
|
||||||
api,
|
api,
|
||||||
RpcEnvironment,
|
RpcEnvironment,
|
||||||
@ -33,6 +34,7 @@ use crate::{
|
|||||||
},
|
},
|
||||||
server::{
|
server::{
|
||||||
lookup_user_email,
|
lookup_user_email,
|
||||||
|
TapeBackupJobSummary,
|
||||||
jobstate::{
|
jobstate::{
|
||||||
Job,
|
Job,
|
||||||
JobState,
|
JobState,
|
||||||
@ -63,6 +65,7 @@ use crate::{
|
|||||||
drive::{
|
drive::{
|
||||||
media_changer,
|
media_changer,
|
||||||
lock_tape_device,
|
lock_tape_device,
|
||||||
|
TapeLockError,
|
||||||
set_tape_device_state,
|
set_tape_device_state,
|
||||||
},
|
},
|
||||||
changer::update_changer_online_status,
|
changer::update_changer_online_status,
|
||||||
@ -176,8 +179,15 @@ pub fn do_tape_backup_job(
|
|||||||
|
|
||||||
let (drive_config, _digest) = config::drive::config()?;
|
let (drive_config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
// early check/lock before starting worker
|
// for scheduled jobs we acquire the lock later in the worker
|
||||||
let drive_lock = lock_tape_device(&drive_config, &setup.drive)?;
|
let drive_lock = if schedule.is_some() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(lock_tape_device(&drive_config, &setup.drive)?)
|
||||||
|
};
|
||||||
|
|
||||||
|
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
||||||
|
let email = lookup_user_email(notify_user);
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
&worker_type,
|
&worker_type,
|
||||||
@ -185,26 +195,44 @@ pub fn do_tape_backup_job(
|
|||||||
auth_id.clone(),
|
auth_id.clone(),
|
||||||
false,
|
false,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
let _drive_lock = drive_lock; // keep lock guard
|
|
||||||
|
|
||||||
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
|
||||||
job.start(&worker.upid().to_string())?;
|
job.start(&worker.upid().to_string())?;
|
||||||
|
let mut drive_lock = drive_lock;
|
||||||
|
|
||||||
|
let mut summary = Default::default();
|
||||||
|
let job_result = try_block!({
|
||||||
|
if schedule.is_some() {
|
||||||
|
// for scheduled tape backup jobs, we wait indefinitely for the lock
|
||||||
|
task_log!(worker, "waiting for drive lock...");
|
||||||
|
loop {
|
||||||
|
worker.check_abort()?;
|
||||||
|
match lock_tape_device(&drive_config, &setup.drive) {
|
||||||
|
Ok(lock) => {
|
||||||
|
drive_lock = Some(lock);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(TapeLockError::TimeOut) => continue,
|
||||||
|
Err(TapeLockError::Other(err)) => return Err(err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
||||||
|
|
||||||
task_log!(worker,"Starting tape backup job '{}'", job_id);
|
task_log!(worker,"Starting tape backup job '{}'", job_id);
|
||||||
if let Some(event_str) = schedule {
|
if let Some(event_str) = schedule {
|
||||||
task_log!(worker,"task triggered by schedule '{}'", event_str);
|
task_log!(worker,"task triggered by schedule '{}'", event_str);
|
||||||
}
|
}
|
||||||
|
|
||||||
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
|
||||||
let email = lookup_user_email(notify_user);
|
|
||||||
|
|
||||||
let job_result = backup_worker(
|
backup_worker(
|
||||||
&worker,
|
&worker,
|
||||||
datastore,
|
datastore,
|
||||||
&pool_config,
|
&pool_config,
|
||||||
&setup,
|
&setup,
|
||||||
email.clone(),
|
email.clone(),
|
||||||
);
|
&mut summary,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
});
|
||||||
|
|
||||||
let status = worker.create_state(&job_result);
|
let status = worker.create_state(&job_result);
|
||||||
|
|
||||||
@ -214,6 +242,7 @@ pub fn do_tape_backup_job(
|
|||||||
Some(job.jobname()),
|
Some(job.jobname()),
|
||||||
&setup,
|
&setup,
|
||||||
&job_result,
|
&job_result,
|
||||||
|
summary,
|
||||||
) {
|
) {
|
||||||
eprintln!("send tape backup notification failed: {}", err);
|
eprintln!("send tape backup notification failed: {}", err);
|
||||||
}
|
}
|
||||||
@ -288,6 +317,12 @@ pub fn run_tape_backup_job(
|
|||||||
type: TapeBackupJobSetup,
|
type: TapeBackupJobSetup,
|
||||||
flatten: true,
|
flatten: true,
|
||||||
},
|
},
|
||||||
|
"force-media-set": {
|
||||||
|
description: "Ignore the allocation policy and start a new media-set.",
|
||||||
|
optional: true,
|
||||||
|
type: bool,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
@ -303,6 +338,7 @@ pub fn run_tape_backup_job(
|
|||||||
/// Backup datastore to tape media pool
|
/// Backup datastore to tape media pool
|
||||||
pub fn backup(
|
pub fn backup(
|
||||||
setup: TapeBackupJobSetup,
|
setup: TapeBackupJobSetup,
|
||||||
|
force_media_set: bool,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
@ -340,12 +376,16 @@ pub fn backup(
|
|||||||
move |worker| {
|
move |worker| {
|
||||||
let _drive_lock = drive_lock; // keep lock guard
|
let _drive_lock = drive_lock; // keep lock guard
|
||||||
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
||||||
|
|
||||||
|
let mut summary = Default::default();
|
||||||
let job_result = backup_worker(
|
let job_result = backup_worker(
|
||||||
&worker,
|
&worker,
|
||||||
datastore,
|
datastore,
|
||||||
&pool_config,
|
&pool_config,
|
||||||
&setup,
|
&setup,
|
||||||
email.clone(),
|
email.clone(),
|
||||||
|
&mut summary,
|
||||||
|
force_media_set,
|
||||||
);
|
);
|
||||||
|
|
||||||
if let Some(email) = email {
|
if let Some(email) = email {
|
||||||
@ -354,6 +394,7 @@ pub fn backup(
|
|||||||
None,
|
None,
|
||||||
&setup,
|
&setup,
|
||||||
&job_result,
|
&job_result,
|
||||||
|
summary,
|
||||||
) {
|
) {
|
||||||
eprintln!("send tape backup notification failed: {}", err);
|
eprintln!("send tape backup notification failed: {}", err);
|
||||||
}
|
}
|
||||||
@ -374,18 +415,25 @@ fn backup_worker(
|
|||||||
pool_config: &MediaPoolConfig,
|
pool_config: &MediaPoolConfig,
|
||||||
setup: &TapeBackupJobSetup,
|
setup: &TapeBackupJobSetup,
|
||||||
email: Option<String>,
|
email: Option<String>,
|
||||||
|
summary: &mut TapeBackupJobSummary,
|
||||||
|
force_media_set: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
let start = std::time::Instant::now();
|
||||||
let _lock = MediaPool::lock(status_path, &pool_config.name)?;
|
|
||||||
|
|
||||||
task_log!(worker, "update media online status");
|
task_log!(worker, "update media online status");
|
||||||
let changer_name = update_media_online_status(&setup.drive)?;
|
let changer_name = update_media_online_status(&setup.drive)?;
|
||||||
|
|
||||||
let pool = MediaPool::with_config(status_path, &pool_config, changer_name)?;
|
let pool = MediaPool::with_config(status_path, &pool_config, changer_name, false)?;
|
||||||
|
|
||||||
let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email)?;
|
let mut pool_writer = PoolWriter::new(
|
||||||
|
pool,
|
||||||
|
&setup.drive,
|
||||||
|
worker,
|
||||||
|
email,
|
||||||
|
force_media_set
|
||||||
|
)?;
|
||||||
|
|
||||||
let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
|
let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
|
||||||
|
|
||||||
@ -402,26 +450,42 @@ fn backup_worker(
|
|||||||
task_log!(worker, "latest-only: true (only considering latest snapshots)");
|
task_log!(worker, "latest-only: true (only considering latest snapshots)");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let datastore_name = datastore.name();
|
||||||
|
|
||||||
let mut errors = false;
|
let mut errors = false;
|
||||||
|
|
||||||
|
let mut need_catalog = false; // avoid writing catalog for empty jobs
|
||||||
|
|
||||||
for (group_number, group) in group_list.into_iter().enumerate() {
|
for (group_number, group) in group_list.into_iter().enumerate() {
|
||||||
progress.done_groups = group_number as u64;
|
progress.done_groups = group_number as u64;
|
||||||
progress.done_snapshots = 0;
|
progress.done_snapshots = 0;
|
||||||
progress.group_snapshots = 0;
|
progress.group_snapshots = 0;
|
||||||
|
|
||||||
let mut snapshot_list = group.list_backups(&datastore.base_path())?;
|
let snapshot_list = group.list_backups(&datastore.base_path())?;
|
||||||
|
|
||||||
|
// filter out unfinished backups
|
||||||
|
let mut snapshot_list = snapshot_list
|
||||||
|
.into_iter()
|
||||||
|
.filter(|item| item.is_finished())
|
||||||
|
.collect();
|
||||||
|
|
||||||
BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
|
BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
|
||||||
|
|
||||||
if latest_only {
|
if latest_only {
|
||||||
progress.group_snapshots = 1;
|
progress.group_snapshots = 1;
|
||||||
if let Some(info) = snapshot_list.pop() {
|
if let Some(info) = snapshot_list.pop() {
|
||||||
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
|
||||||
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
need_catalog = true;
|
||||||
|
|
||||||
|
let snapshot_name = info.backup_dir.to_string();
|
||||||
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
|
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
|
||||||
errors = true;
|
errors = true;
|
||||||
|
} else {
|
||||||
|
summary.snapshot_list.push(snapshot_name);
|
||||||
}
|
}
|
||||||
progress.done_snapshots = 1;
|
progress.done_snapshots = 1;
|
||||||
task_log!(
|
task_log!(
|
||||||
@ -433,12 +497,18 @@ fn backup_worker(
|
|||||||
} else {
|
} else {
|
||||||
progress.group_snapshots = snapshot_list.len() as u64;
|
progress.group_snapshots = snapshot_list.len() as u64;
|
||||||
for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
|
for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
|
||||||
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
|
||||||
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
need_catalog = true;
|
||||||
|
|
||||||
|
let snapshot_name = info.backup_dir.to_string();
|
||||||
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
|
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
|
||||||
errors = true;
|
errors = true;
|
||||||
|
} else {
|
||||||
|
summary.snapshot_list.push(snapshot_name);
|
||||||
}
|
}
|
||||||
progress.done_snapshots = snapshot_number as u64 + 1;
|
progress.done_snapshots = snapshot_number as u64 + 1;
|
||||||
task_log!(
|
task_log!(
|
||||||
@ -452,6 +522,22 @@ fn backup_worker(
|
|||||||
|
|
||||||
pool_writer.commit()?;
|
pool_writer.commit()?;
|
||||||
|
|
||||||
|
if need_catalog {
|
||||||
|
task_log!(worker, "append media catalog");
|
||||||
|
|
||||||
|
let uuid = pool_writer.load_writable_media(worker)?;
|
||||||
|
let done = pool_writer.append_catalog_archive(worker)?;
|
||||||
|
if !done {
|
||||||
|
task_log!(worker, "catalog does not fit on tape, writing to next volume");
|
||||||
|
pool_writer.set_media_status_full(&uuid)?;
|
||||||
|
pool_writer.load_writable_media(worker)?;
|
||||||
|
let done = pool_writer.append_catalog_archive(worker)?;
|
||||||
|
if !done {
|
||||||
|
bail!("write_catalog_archive failed on second media");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if setup.export_media_set.unwrap_or(false) {
|
if setup.export_media_set.unwrap_or(false) {
|
||||||
pool_writer.export_media_set(worker)?;
|
pool_writer.export_media_set(worker)?;
|
||||||
} else if setup.eject_media.unwrap_or(false) {
|
} else if setup.eject_media.unwrap_or(false) {
|
||||||
@ -462,6 +548,8 @@ fn backup_worker(
|
|||||||
bail!("Tape backup finished with some errors. Please check the task log.");
|
bail!("Tape backup finished with some errors. Please check the task log.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
summary.duration = start.elapsed();
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -508,33 +596,48 @@ pub fn backup_snapshot(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut chunk_iter = snapshot_reader.chunk_iterator()?.peekable();
|
let snapshot_reader = Arc::new(Mutex::new(snapshot_reader));
|
||||||
|
|
||||||
|
let (reader_thread, chunk_iter) = pool_writer.spawn_chunk_reader_thread(
|
||||||
|
datastore.clone(),
|
||||||
|
snapshot_reader.clone(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut chunk_iter = chunk_iter.peekable();
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
// test is we have remaining chunks
|
// test is we have remaining chunks
|
||||||
if chunk_iter.peek().is_none() {
|
match chunk_iter.peek() {
|
||||||
break;
|
None => break,
|
||||||
|
Some(Ok(_)) => { /* Ok */ },
|
||||||
|
Some(Err(err)) => bail!("{}", err),
|
||||||
}
|
}
|
||||||
|
|
||||||
let uuid = pool_writer.load_writable_media(worker)?;
|
let uuid = pool_writer.load_writable_media(worker)?;
|
||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &datastore, &mut chunk_iter)?;
|
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &mut chunk_iter, datastore.name())?;
|
||||||
|
|
||||||
if leom {
|
if leom {
|
||||||
pool_writer.set_media_status_full(&uuid)?;
|
pool_writer.set_media_status_full(&uuid)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Err(_) = reader_thread.join() {
|
||||||
|
bail!("chunk reader thread failed");
|
||||||
|
}
|
||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
let uuid = pool_writer.load_writable_media(worker)?;
|
let uuid = pool_writer.load_writable_media(worker)?;
|
||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
|
let snapshot_reader = snapshot_reader.lock().unwrap();
|
||||||
|
|
||||||
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
|
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
|
||||||
|
|
||||||
if !done {
|
if !done {
|
||||||
|
@ -20,7 +20,7 @@ use crate::{
|
|||||||
Authid,
|
Authid,
|
||||||
CHANGER_NAME_SCHEMA,
|
CHANGER_NAME_SCHEMA,
|
||||||
ChangerListEntry,
|
ChangerListEntry,
|
||||||
LinuxTapeDrive,
|
LtoTapeDrive,
|
||||||
MtxEntryKind,
|
MtxEntryKind,
|
||||||
MtxStatusEntry,
|
MtxStatusEntry,
|
||||||
ScsiTapeChanger,
|
ScsiTapeChanger,
|
||||||
@ -88,7 +88,7 @@ pub async fn get_status(
|
|||||||
|
|
||||||
inventory.update_online_status(&map)?;
|
inventory.update_online_status(&map)?;
|
||||||
|
|
||||||
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
let drive_list: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
|
||||||
let mut drive_map: HashMap<u64, String> = HashMap::new();
|
let mut drive_map: HashMap<u64, String> = HashMap::new();
|
||||||
|
|
||||||
for drive in drive_list {
|
for drive in drive_list {
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use std::panic::UnwindSafe;
|
use std::panic::UnwindSafe;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
@ -10,7 +11,6 @@ use proxmox::{
|
|||||||
identity,
|
identity,
|
||||||
list_subdirs_api_method,
|
list_subdirs_api_method,
|
||||||
tools::Uuid,
|
tools::Uuid,
|
||||||
sys::error::SysError,
|
|
||||||
api::{
|
api::{
|
||||||
api,
|
api,
|
||||||
section_config::SectionConfigData,
|
section_config::SectionConfigData,
|
||||||
@ -42,22 +42,29 @@ use crate::{
|
|||||||
MEDIA_POOL_NAME_SCHEMA,
|
MEDIA_POOL_NAME_SCHEMA,
|
||||||
Authid,
|
Authid,
|
||||||
DriveListEntry,
|
DriveListEntry,
|
||||||
LinuxTapeDrive,
|
LtoTapeDrive,
|
||||||
MediaIdFlat,
|
MediaIdFlat,
|
||||||
LabelUuidMap,
|
LabelUuidMap,
|
||||||
MamAttribute,
|
MamAttribute,
|
||||||
LinuxDriveAndMediaStatus,
|
LtoDriveAndMediaStatus,
|
||||||
|
Lp17VolumeStatistics,
|
||||||
|
},
|
||||||
|
tape::restore::{
|
||||||
|
fast_catalog_restore,
|
||||||
|
restore_media,
|
||||||
},
|
},
|
||||||
tape::restore::restore_media,
|
|
||||||
},
|
},
|
||||||
server::WorkerTask,
|
server::WorkerTask,
|
||||||
tape::{
|
tape::{
|
||||||
TAPE_STATUS_DIR,
|
TAPE_STATUS_DIR,
|
||||||
MediaPool,
|
|
||||||
Inventory,
|
Inventory,
|
||||||
MediaCatalog,
|
MediaCatalog,
|
||||||
MediaId,
|
MediaId,
|
||||||
linux_tape_device_list,
|
BlockReadError,
|
||||||
|
lock_media_set,
|
||||||
|
lock_media_pool,
|
||||||
|
lock_unassigned_media_pool,
|
||||||
|
lto_tape_device_list,
|
||||||
lookup_device_identification,
|
lookup_device_identification,
|
||||||
file_formats::{
|
file_formats::{
|
||||||
MediaLabel,
|
MediaLabel,
|
||||||
@ -65,9 +72,8 @@ use crate::{
|
|||||||
},
|
},
|
||||||
drive::{
|
drive::{
|
||||||
TapeDriver,
|
TapeDriver,
|
||||||
LinuxTapeHandle,
|
LtoTapeHandle,
|
||||||
Lp17VolumeStatistics,
|
open_lto_tape_device,
|
||||||
open_linux_tape_device,
|
|
||||||
media_changer,
|
media_changer,
|
||||||
required_media_changer,
|
required_media_changer,
|
||||||
open_drive,
|
open_drive,
|
||||||
@ -316,8 +322,8 @@ pub fn unload(
|
|||||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_WRITE, false),
|
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_WRITE, false),
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Erase media. Check for label-text if given (cancels if wrong media).
|
/// Format media. Check for label-text if given (cancels if wrong media).
|
||||||
pub fn erase_media(
|
pub fn format_media(
|
||||||
drive: String,
|
drive: String,
|
||||||
fast: Option<bool>,
|
fast: Option<bool>,
|
||||||
label_text: Option<String>,
|
label_text: Option<String>,
|
||||||
@ -326,7 +332,7 @@ pub fn erase_media(
|
|||||||
let upid_str = run_drive_worker(
|
let upid_str = run_drive_worker(
|
||||||
rpcenv,
|
rpcenv,
|
||||||
drive.clone(),
|
drive.clone(),
|
||||||
"erase-media",
|
"format-media",
|
||||||
Some(drive.clone()),
|
Some(drive.clone()),
|
||||||
move |worker, config| {
|
move |worker, config| {
|
||||||
if let Some(ref label) = label_text {
|
if let Some(ref label) = label_text {
|
||||||
@ -345,15 +351,15 @@ pub fn erase_media(
|
|||||||
}
|
}
|
||||||
/* assume drive contains no or unrelated data */
|
/* assume drive contains no or unrelated data */
|
||||||
task_log!(worker, "unable to read media label: {}", err);
|
task_log!(worker, "unable to read media label: {}", err);
|
||||||
task_log!(worker, "erase anyways");
|
task_log!(worker, "format anyways");
|
||||||
handle.erase_media(fast.unwrap_or(true))?;
|
handle.format_media(fast.unwrap_or(true))?;
|
||||||
}
|
}
|
||||||
Ok((None, _)) => {
|
Ok((None, _)) => {
|
||||||
if let Some(label) = label_text {
|
if let Some(label) = label_text {
|
||||||
bail!("expected label '{}', found empty tape", label);
|
bail!("expected label '{}', found empty tape", label);
|
||||||
}
|
}
|
||||||
task_log!(worker, "found empty media - erase anyways");
|
task_log!(worker, "found empty media - format anyways");
|
||||||
handle.erase_media(fast.unwrap_or(true))?;
|
handle.format_media(fast.unwrap_or(true))?;
|
||||||
}
|
}
|
||||||
Ok((Some(media_id), _key_config)) => {
|
Ok((Some(media_id), _key_config)) => {
|
||||||
if let Some(label_text) = label_text {
|
if let Some(label_text) = label_text {
|
||||||
@ -373,11 +379,20 @@ pub fn erase_media(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
let mut inventory = Inventory::load(status_path)?;
|
let mut inventory = Inventory::new(status_path);
|
||||||
|
|
||||||
|
if let Some(MediaSetLabel { ref pool, ref uuid, ..}) = media_id.media_set_label {
|
||||||
|
let _pool_lock = lock_media_pool(status_path, pool)?;
|
||||||
|
let _media_set_lock = lock_media_set(status_path, uuid, None)?;
|
||||||
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
|
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
|
||||||
inventory.remove_media(&media_id.label.uuid)?;
|
inventory.remove_media(&media_id.label.uuid)?;
|
||||||
handle.erase_media(fast.unwrap_or(true))?;
|
} else {
|
||||||
|
let _lock = lock_unassigned_media_pool(status_path)?;
|
||||||
|
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
|
||||||
|
inventory.remove_media(&media_id.label.uuid)?;
|
||||||
|
};
|
||||||
|
|
||||||
|
handle.format_media(fast.unwrap_or(true))?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -489,7 +504,7 @@ pub fn eject_media(
|
|||||||
/// Write a new media label to the media in 'drive'. The media is
|
/// Write a new media label to the media in 'drive'. The media is
|
||||||
/// assigned to the specified 'pool', or else to the free media pool.
|
/// assigned to the specified 'pool', or else to the free media pool.
|
||||||
///
|
///
|
||||||
/// Note: The media need to be empty (you may want to erase it first).
|
/// Note: The media need to be empty (you may want to format it first).
|
||||||
pub fn label_media(
|
pub fn label_media(
|
||||||
drive: String,
|
drive: String,
|
||||||
pool: Option<String>,
|
pool: Option<String>,
|
||||||
@ -514,16 +529,13 @@ pub fn label_media(
|
|||||||
drive.rewind()?;
|
drive.rewind()?;
|
||||||
|
|
||||||
match drive.read_next_file() {
|
match drive.read_next_file() {
|
||||||
Ok(Some(_file)) => bail!("media is not empty (erase first)"),
|
Ok(_reader) => bail!("media is not empty (format it first)"),
|
||||||
Ok(None) => { /* EOF mark at BOT, assume tape is empty */ },
|
Err(BlockReadError::EndOfFile) => { /* EOF mark at BOT, assume tape is empty */ },
|
||||||
|
Err(BlockReadError::EndOfStream) => { /* tape is empty */ },
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
if err.is_errno(nix::errno::Errno::ENOSPC) || err.is_errno(nix::errno::Errno::EIO) {
|
|
||||||
/* assume tape is empty */
|
|
||||||
} else {
|
|
||||||
bail!("media read error - {}", err);
|
bail!("media read error - {}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
let ctime = proxmox::tools::time::epoch_i64();
|
let ctime = proxmox::tools::time::epoch_i64();
|
||||||
let label = MediaLabel {
|
let label = MediaLabel {
|
||||||
@ -548,29 +560,38 @@ fn write_media_label(
|
|||||||
|
|
||||||
drive.label_tape(&label)?;
|
drive.label_tape(&label)?;
|
||||||
|
|
||||||
let mut media_set_label = None;
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
|
||||||
if let Some(ref pool) = pool {
|
let media_id = if let Some(ref pool) = pool {
|
||||||
// assign media to pool by writing special media set label
|
// assign media to pool by writing special media set label
|
||||||
worker.log(format!("Label media '{}' for pool '{}'", label.label_text, pool));
|
worker.log(format!("Label media '{}' for pool '{}'", label.label_text, pool));
|
||||||
let set = MediaSetLabel::with_data(&pool, [0u8; 16].into(), 0, label.ctime, None);
|
let set = MediaSetLabel::with_data(&pool, [0u8; 16].into(), 0, label.ctime, None);
|
||||||
|
|
||||||
drive.write_media_set_label(&set, None)?;
|
drive.write_media_set_label(&set, None)?;
|
||||||
media_set_label = Some(set);
|
|
||||||
} else {
|
|
||||||
worker.log(format!("Label media '{}' (no pool assignment)", label.label_text));
|
|
||||||
}
|
|
||||||
|
|
||||||
let media_id = MediaId { label, media_set_label };
|
let media_id = MediaId { label, media_set_label: Some(set) };
|
||||||
|
|
||||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
|
||||||
|
|
||||||
// Create the media catalog
|
// Create the media catalog
|
||||||
MediaCatalog::overwrite(status_path, &media_id, false)?;
|
MediaCatalog::overwrite(status_path, &media_id, false)?;
|
||||||
|
|
||||||
let mut inventory = Inventory::load(status_path)?;
|
let mut inventory = Inventory::new(status_path);
|
||||||
inventory.store(media_id.clone(), false)?;
|
inventory.store(media_id.clone(), false)?;
|
||||||
|
|
||||||
|
media_id
|
||||||
|
} else {
|
||||||
|
worker.log(format!("Label media '{}' (no pool assignment)", label.label_text));
|
||||||
|
|
||||||
|
let media_id = MediaId { label, media_set_label: None };
|
||||||
|
|
||||||
|
// Create the media catalog
|
||||||
|
MediaCatalog::overwrite(status_path, &media_id, false)?;
|
||||||
|
|
||||||
|
let mut inventory = Inventory::new(status_path);
|
||||||
|
inventory.store(media_id.clone(), false)?;
|
||||||
|
|
||||||
|
media_id
|
||||||
|
};
|
||||||
|
|
||||||
drive.rewind()?;
|
drive.rewind()?;
|
||||||
|
|
||||||
match drive.read_label() {
|
match drive.read_label() {
|
||||||
@ -705,14 +726,24 @@ pub async fn read_label(
|
|||||||
|
|
||||||
if let Err(err) = drive.set_encryption(encrypt_fingerprint) {
|
if let Err(err) = drive.set_encryption(encrypt_fingerprint) {
|
||||||
// try, but ignore errors. just log to stderr
|
// try, but ignore errors. just log to stderr
|
||||||
eprintln!("uable to load encryption key: {}", err);
|
eprintln!("unable to load encryption key: {}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(true) = inventorize {
|
if let Some(true) = inventorize {
|
||||||
let state_path = Path::new(TAPE_STATUS_DIR);
|
let state_path = Path::new(TAPE_STATUS_DIR);
|
||||||
let mut inventory = Inventory::load(state_path)?;
|
let mut inventory = Inventory::new(state_path);
|
||||||
|
|
||||||
|
if let Some(MediaSetLabel { ref pool, ref uuid, ..}) = media_id.media_set_label {
|
||||||
|
let _pool_lock = lock_media_pool(state_path, pool)?;
|
||||||
|
let _lock = lock_media_set(state_path, uuid, None)?;
|
||||||
|
MediaCatalog::destroy_unrelated_catalog(state_path, &media_id)?;
|
||||||
inventory.store(media_id, false)?;
|
inventory.store(media_id, false)?;
|
||||||
|
} else {
|
||||||
|
let _lock = lock_unassigned_media_pool(state_path)?;
|
||||||
|
MediaCatalog::destroy(state_path, &media_id.label.uuid)?;
|
||||||
|
inventory.store(media_id, false)?;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
flat
|
flat
|
||||||
@ -760,9 +791,9 @@ pub fn clean_drive(
|
|||||||
|
|
||||||
changer.clean_drive()?;
|
changer.clean_drive()?;
|
||||||
|
|
||||||
if let Ok(drive_config) = config.lookup::<LinuxTapeDrive>("linux", &drive) {
|
if let Ok(drive_config) = config.lookup::<LtoTapeDrive>("lto", &drive) {
|
||||||
// Note: clean_drive unloads the cleaning media, so we cannot use drive_config.open
|
// Note: clean_drive unloads the cleaning media, so we cannot use drive_config.open
|
||||||
let mut handle = LinuxTapeHandle::new(open_linux_tape_device(&drive_config.path)?);
|
let mut handle = LtoTapeHandle::new(open_lto_tape_device(&drive_config.path)?)?;
|
||||||
|
|
||||||
// test for critical tape alert flags
|
// test for critical tape alert flags
|
||||||
if let Ok(alert_flags) = handle.tape_alert_flags() {
|
if let Ok(alert_flags) = handle.tape_alert_flags() {
|
||||||
@ -947,7 +978,17 @@ pub fn update_inventory(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
worker.log(format!("inventorize media '{}' with uuid '{}'", label_text, media_id.label.uuid));
|
worker.log(format!("inventorize media '{}' with uuid '{}'", label_text, media_id.label.uuid));
|
||||||
|
|
||||||
|
if let Some(MediaSetLabel { ref pool, ref uuid, ..}) = media_id.media_set_label {
|
||||||
|
let _pool_lock = lock_media_pool(state_path, pool)?;
|
||||||
|
let _lock = lock_media_set(state_path, uuid, None)?;
|
||||||
|
MediaCatalog::destroy_unrelated_catalog(state_path, &media_id)?;
|
||||||
inventory.store(media_id, false)?;
|
inventory.store(media_id, false)?;
|
||||||
|
} else {
|
||||||
|
let _lock = lock_unassigned_media_pool(state_path)?;
|
||||||
|
MediaCatalog::destroy(state_path, &media_id.label.uuid)?;
|
||||||
|
inventory.store(media_id, false)?;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
changer.unload_media(None)?;
|
changer.unload_media(None)?;
|
||||||
@ -1047,20 +1088,17 @@ fn barcode_label_media_worker(
|
|||||||
drive.rewind()?;
|
drive.rewind()?;
|
||||||
|
|
||||||
match drive.read_next_file() {
|
match drive.read_next_file() {
|
||||||
Ok(Some(_file)) => {
|
Ok(_reader) => {
|
||||||
worker.log(format!("media '{}' is not empty (erase first)", label_text));
|
worker.log(format!("media '{}' is not empty (format it first)", label_text));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
Ok(None) => { /* EOF mark at BOT, assume tape is empty */ },
|
Err(BlockReadError::EndOfFile) => { /* EOF mark at BOT, assume tape is empty */ },
|
||||||
Err(err) => {
|
Err(BlockReadError::EndOfStream) => { /* tape is empty */ },
|
||||||
if err.is_errno(nix::errno::Errno::ENOSPC) || err.is_errno(nix::errno::Errno::EIO) {
|
Err(_err) => {
|
||||||
/* assume tape is empty */
|
worker.warn(format!("media '{}' read error (maybe not empty - format it first)", label_text));
|
||||||
} else {
|
|
||||||
worker.warn(format!("media '{}' read error (maybe not empty - erase first)", label_text));
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
let ctime = proxmox::tools::time::epoch_i64();
|
let ctime = proxmox::tools::time::epoch_i64();
|
||||||
let label = MediaLabel {
|
let label = MediaLabel {
|
||||||
@ -1100,7 +1138,7 @@ pub async fn cartridge_memory(drive: String) -> Result<Vec<MamAttribute>, Error>
|
|||||||
drive.clone(),
|
drive.clone(),
|
||||||
"reading cartridge memory".to_string(),
|
"reading cartridge memory".to_string(),
|
||||||
move |config| {
|
move |config| {
|
||||||
let drive_config: LinuxTapeDrive = config.lookup("linux", &drive)?;
|
let drive_config: LtoTapeDrive = config.lookup("lto", &drive)?;
|
||||||
let mut handle = drive_config.open()?;
|
let mut handle = drive_config.open()?;
|
||||||
|
|
||||||
handle.cartridge_memory()
|
handle.cartridge_memory()
|
||||||
@ -1130,7 +1168,7 @@ pub async fn volume_statistics(drive: String) -> Result<Lp17VolumeStatistics, Er
|
|||||||
drive.clone(),
|
drive.clone(),
|
||||||
"reading volume statistics".to_string(),
|
"reading volume statistics".to_string(),
|
||||||
move |config| {
|
move |config| {
|
||||||
let drive_config: LinuxTapeDrive = config.lookup("linux", &drive)?;
|
let drive_config: LtoTapeDrive = config.lookup("lto", &drive)?;
|
||||||
let mut handle = drive_config.open()?;
|
let mut handle = drive_config.open()?;
|
||||||
|
|
||||||
handle.volume_statistics()
|
handle.volume_statistics()
|
||||||
@ -1148,24 +1186,24 @@ pub async fn volume_statistics(drive: String) -> Result<Lp17VolumeStatistics, Er
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
type: LinuxDriveAndMediaStatus,
|
type: LtoDriveAndMediaStatus,
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_AUDIT, false),
|
permission: &Permission::Privilege(&["tape", "device", "{drive}"], PRIV_TAPE_AUDIT, false),
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Get drive/media status
|
/// Get drive/media status
|
||||||
pub async fn status(drive: String) -> Result<LinuxDriveAndMediaStatus, Error> {
|
pub async fn status(drive: String) -> Result<LtoDriveAndMediaStatus, Error> {
|
||||||
run_drive_blocking_task(
|
run_drive_blocking_task(
|
||||||
drive.clone(),
|
drive.clone(),
|
||||||
"reading drive status".to_string(),
|
"reading drive status".to_string(),
|
||||||
move |config| {
|
move |config| {
|
||||||
let drive_config: LinuxTapeDrive = config.lookup("linux", &drive)?;
|
let drive_config: LtoTapeDrive = config.lookup("lto", &drive)?;
|
||||||
|
|
||||||
// Note: use open_linux_tape_device, because this also works if no medium loaded
|
// Note: use open_lto_tape_device, because this also works if no medium loaded
|
||||||
let file = open_linux_tape_device(&drive_config.path)?;
|
let file = open_lto_tape_device(&drive_config.path)?;
|
||||||
|
|
||||||
let mut handle = LinuxTapeHandle::new(file);
|
let mut handle = LtoTapeHandle::new(file)?;
|
||||||
|
|
||||||
handle.get_drive_and_media_status()
|
handle.get_drive_and_media_status()
|
||||||
}
|
}
|
||||||
@ -1184,6 +1222,11 @@ pub async fn status(drive: String) -> Result<LinuxDriveAndMediaStatus, Error> {
|
|||||||
type: bool,
|
type: bool,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
scan: {
|
||||||
|
description: "Re-read the whole tape to reconstruct the catalog instead of restoring saved versions.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
verbose: {
|
verbose: {
|
||||||
description: "Verbose mode - log all found chunks.",
|
description: "Verbose mode - log all found chunks.",
|
||||||
type: bool,
|
type: bool,
|
||||||
@ -1202,11 +1245,13 @@ pub async fn status(drive: String) -> Result<LinuxDriveAndMediaStatus, Error> {
|
|||||||
pub fn catalog_media(
|
pub fn catalog_media(
|
||||||
drive: String,
|
drive: String,
|
||||||
force: Option<bool>,
|
force: Option<bool>,
|
||||||
|
scan: Option<bool>,
|
||||||
verbose: Option<bool>,
|
verbose: Option<bool>,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
let verbose = verbose.unwrap_or(false);
|
let verbose = verbose.unwrap_or(false);
|
||||||
let force = force.unwrap_or(false);
|
let force = force.unwrap_or(false);
|
||||||
|
let scan = scan.unwrap_or(false);
|
||||||
|
|
||||||
let upid_str = run_drive_worker(
|
let upid_str = run_drive_worker(
|
||||||
rpcenv,
|
rpcenv,
|
||||||
@ -1237,19 +1282,22 @@ pub fn catalog_media(
|
|||||||
|
|
||||||
let status_path = Path::new(TAPE_STATUS_DIR);
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
|
||||||
let mut inventory = Inventory::load(status_path)?;
|
let mut inventory = Inventory::new(status_path);
|
||||||
inventory.store(media_id.clone(), false)?;
|
|
||||||
|
|
||||||
let pool = match media_id.media_set_label {
|
let (_media_set_lock, media_set_uuid) = match media_id.media_set_label {
|
||||||
None => {
|
None => {
|
||||||
worker.log("media is empty");
|
worker.log("media is empty");
|
||||||
|
let _lock = lock_unassigned_media_pool(status_path)?;
|
||||||
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
|
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
|
||||||
|
inventory.store(media_id.clone(), false)?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
Some(ref set) => {
|
Some(ref set) => {
|
||||||
if set.uuid.as_ref() == [0u8;16] { // media is empty
|
if set.uuid.as_ref() == [0u8;16] { // media is empty
|
||||||
worker.log("media is empty");
|
worker.log("media is empty");
|
||||||
|
let _lock = lock_unassigned_media_pool(status_path)?;
|
||||||
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
|
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
|
||||||
|
inventory.store(media_id.clone(), false)?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
let encrypt_fingerprint = set.encryption_key_fingerprint.clone()
|
let encrypt_fingerprint = set.encryption_key_fingerprint.clone()
|
||||||
@ -1257,17 +1305,38 @@ pub fn catalog_media(
|
|||||||
|
|
||||||
drive.set_encryption(encrypt_fingerprint)?;
|
drive.set_encryption(encrypt_fingerprint)?;
|
||||||
|
|
||||||
set.pool.clone()
|
let _pool_lock = lock_media_pool(status_path, &set.pool)?;
|
||||||
|
let media_set_lock = lock_media_set(status_path, &set.uuid, None)?;
|
||||||
|
|
||||||
|
MediaCatalog::destroy_unrelated_catalog(status_path, &media_id)?;
|
||||||
|
|
||||||
|
inventory.store(media_id.clone(), false)?;
|
||||||
|
|
||||||
|
(media_set_lock, &set.uuid)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let _lock = MediaPool::lock(status_path, &pool)?;
|
|
||||||
|
|
||||||
if MediaCatalog::exists(status_path, &media_id.label.uuid) && !force {
|
if MediaCatalog::exists(status_path, &media_id.label.uuid) && !force {
|
||||||
bail!("media catalog exists (please use --force to overwrite)");
|
bail!("media catalog exists (please use --force to overwrite)");
|
||||||
}
|
}
|
||||||
|
|
||||||
restore_media(&worker, &mut drive, &media_id, None, verbose)?;
|
if !scan {
|
||||||
|
let media_set = inventory.compute_media_set_members(media_set_uuid)?;
|
||||||
|
|
||||||
|
if fast_catalog_restore(&worker, &mut drive, &media_set, &media_id.label.uuid)? {
|
||||||
|
return Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
task_log!(worker, "no catalog found");
|
||||||
|
}
|
||||||
|
|
||||||
|
task_log!(worker, "scanning entire media to reconstruct catalog");
|
||||||
|
|
||||||
|
drive.rewind()?;
|
||||||
|
drive.read_label()?; // skip over labels - we already read them above
|
||||||
|
|
||||||
|
let mut checked_chunks = HashMap::new();
|
||||||
|
restore_media(worker, &mut drive, &media_id, None, &mut checked_chunks, verbose)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
@ -1308,9 +1377,9 @@ pub fn list_drives(
|
|||||||
|
|
||||||
let (config, _) = config::drive::config()?;
|
let (config, _) = config::drive::config()?;
|
||||||
|
|
||||||
let linux_drives = linux_tape_device_list();
|
let lto_drives = lto_tape_device_list();
|
||||||
|
|
||||||
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
let drive_list: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
|
||||||
|
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
||||||
@ -1324,7 +1393,7 @@ pub fn list_drives(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let info = lookup_device_identification(&linux_drives, &drive.path);
|
let info = lookup_device_identification(<o_drives, &drive.path);
|
||||||
let state = get_tape_device_state(&config, &drive.name)?;
|
let state = get_tape_device_state(&config, &drive.name)?;
|
||||||
let entry = DriveListEntry { config: drive, info, state };
|
let entry = DriveListEntry { config: drive, info, state };
|
||||||
list.push(entry);
|
list.push(entry);
|
||||||
@ -1356,9 +1425,9 @@ pub const SUBDIRS: SubdirMap = &sorted!([
|
|||||||
.post(&API_METHOD_EJECT_MEDIA)
|
.post(&API_METHOD_EJECT_MEDIA)
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"erase-media",
|
"format-media",
|
||||||
&Router::new()
|
&Router::new()
|
||||||
.post(&API_METHOD_ERASE_MEDIA)
|
.post(&API_METHOD_FORMAT_MEDIA)
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"export-media",
|
"export-media",
|
||||||
@ -1384,7 +1453,7 @@ pub const SUBDIRS: SubdirMap = &sorted!([
|
|||||||
(
|
(
|
||||||
"load-slot",
|
"load-slot",
|
||||||
&Router::new()
|
&Router::new()
|
||||||
.put(&API_METHOD_LOAD_SLOT)
|
.post(&API_METHOD_LOAD_SLOT)
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"cartridge-memory",
|
"cartridge-memory",
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde::{Serialize, Deserialize};
|
use serde::{Serialize, Deserialize};
|
||||||
@ -28,6 +29,7 @@ use crate::{
|
|||||||
CHANGER_NAME_SCHEMA,
|
CHANGER_NAME_SCHEMA,
|
||||||
MediaPoolConfig,
|
MediaPoolConfig,
|
||||||
MediaListEntry,
|
MediaListEntry,
|
||||||
|
MediaSetListEntry,
|
||||||
MediaStatus,
|
MediaStatus,
|
||||||
MediaContentEntry,
|
MediaContentEntry,
|
||||||
VAULT_NAME_SCHEMA,
|
VAULT_NAME_SCHEMA,
|
||||||
@ -44,6 +46,74 @@ use crate::{
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
returns: {
|
||||||
|
description: "List of media sets.",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: MediaSetListEntry,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
description: "List of media sets filtered by Tape.Audit privileges on pool",
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List Media sets
|
||||||
|
pub async fn list_media_sets(
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<MediaSetListEntry>, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
let (config, _digest) = config::media_pool::config()?;
|
||||||
|
|
||||||
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
|
||||||
|
let mut media_sets: HashSet<Uuid> = HashSet::new();
|
||||||
|
let mut list = Vec::new();
|
||||||
|
|
||||||
|
for (_section_type, data) in config.sections.values() {
|
||||||
|
let pool_name = match data["name"].as_str() {
|
||||||
|
None => continue,
|
||||||
|
Some(name) => name,
|
||||||
|
};
|
||||||
|
|
||||||
|
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", pool_name]);
|
||||||
|
if (privs & PRIV_TAPE_AUDIT) == 0 {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let config: MediaPoolConfig = config.lookup("pool", pool_name)?;
|
||||||
|
|
||||||
|
let changer_name = None; // assume standalone drive
|
||||||
|
let pool = MediaPool::with_config(status_path, &config, changer_name, true)?;
|
||||||
|
|
||||||
|
for media in pool.list_media() {
|
||||||
|
if let Some(label) = media.media_set_label() {
|
||||||
|
if media_sets.contains(&label.uuid) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let media_set_uuid = label.uuid.clone();
|
||||||
|
let media_set_ctime = label.ctime;
|
||||||
|
let media_set_name = pool
|
||||||
|
.generate_media_set_name(&media_set_uuid, config.template.clone())
|
||||||
|
.unwrap_or_else(|_| media_set_uuid.to_string());
|
||||||
|
|
||||||
|
media_sets.insert(media_set_uuid.clone());
|
||||||
|
list.push(MediaSetListEntry {
|
||||||
|
media_set_name,
|
||||||
|
media_set_uuid,
|
||||||
|
media_set_ctime,
|
||||||
|
pool: pool_name.to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -122,14 +192,14 @@ pub async fn list_media(
|
|||||||
let config: MediaPoolConfig = config.lookup("pool", pool_name)?;
|
let config: MediaPoolConfig = config.lookup("pool", pool_name)?;
|
||||||
|
|
||||||
let changer_name = None; // assume standalone drive
|
let changer_name = None; // assume standalone drive
|
||||||
let mut pool = MediaPool::with_config(status_path, &config, changer_name)?;
|
let mut pool = MediaPool::with_config(status_path, &config, changer_name, true)?;
|
||||||
|
|
||||||
let current_time = proxmox::tools::time::epoch_i64();
|
let current_time = proxmox::tools::time::epoch_i64();
|
||||||
|
|
||||||
// Call start_write_session, so that we show the same status a
|
// Call start_write_session, so that we show the same status a
|
||||||
// backup job would see.
|
// backup job would see.
|
||||||
pool.force_media_availability();
|
pool.force_media_availability();
|
||||||
pool.start_write_session(current_time)?;
|
pool.start_write_session(current_time, false)?;
|
||||||
|
|
||||||
for media in pool.list_media() {
|
for media in pool.list_media() {
|
||||||
let expired = pool.media_is_expired(&media, current_time);
|
let expired = pool.media_is_expired(&media, current_time);
|
||||||
@ -432,9 +502,10 @@ pub fn list_content(
|
|||||||
.generate_media_set_name(&set.uuid, template)
|
.generate_media_set_name(&set.uuid, template)
|
||||||
.unwrap_or_else(|_| set.uuid.to_string());
|
.unwrap_or_else(|_| set.uuid.to_string());
|
||||||
|
|
||||||
let catalog = MediaCatalog::open(status_path, &media_id.label.uuid, false, false)?;
|
let catalog = MediaCatalog::open(status_path, &media_id, false, false)?;
|
||||||
|
|
||||||
for snapshot in catalog.snapshot_index().keys() {
|
for (store, content) in catalog.content() {
|
||||||
|
for snapshot in content.snapshot_index.keys() {
|
||||||
let backup_dir: BackupDir = snapshot.parse()?;
|
let backup_dir: BackupDir = snapshot.parse()?;
|
||||||
|
|
||||||
if let Some(ref backup_type) = filter.backup_type {
|
if let Some(ref backup_type) = filter.backup_type {
|
||||||
@ -453,10 +524,12 @@ pub fn list_content(
|
|||||||
media_set_ctime: set.ctime,
|
media_set_ctime: set.ctime,
|
||||||
seq_nr: set.seq_nr,
|
seq_nr: set.seq_nr,
|
||||||
snapshot: snapshot.to_owned(),
|
snapshot: snapshot.to_owned(),
|
||||||
|
store: store.to_owned(),
|
||||||
backup_time: backup_dir.backup_time(),
|
backup_time: backup_dir.backup_time(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(list)
|
Ok(list)
|
||||||
}
|
}
|
||||||
@ -543,6 +616,11 @@ const SUBDIRS: SubdirMap = &[
|
|||||||
.get(&API_METHOD_DESTROY_MEDIA)
|
.get(&API_METHOD_DESTROY_MEDIA)
|
||||||
),
|
),
|
||||||
( "list", &MEDIA_LIST_ROUTER ),
|
( "list", &MEDIA_LIST_ROUTER ),
|
||||||
|
(
|
||||||
|
"media-sets",
|
||||||
|
&Router::new()
|
||||||
|
.get(&API_METHOD_LIST_MEDIA_SETS)
|
||||||
|
),
|
||||||
(
|
(
|
||||||
"move",
|
"move",
|
||||||
&Router::new()
|
&Router::new()
|
||||||
|
@ -15,7 +15,7 @@ use proxmox::{
|
|||||||
use crate::{
|
use crate::{
|
||||||
api2::types::TapeDeviceInfo,
|
api2::types::TapeDeviceInfo,
|
||||||
tape::{
|
tape::{
|
||||||
linux_tape_device_list,
|
lto_tape_device_list,
|
||||||
linux_tape_changer_list,
|
linux_tape_changer_list,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -41,7 +41,7 @@ pub mod restore;
|
|||||||
/// Scan tape drives
|
/// Scan tape drives
|
||||||
pub fn scan_drives(_param: Value) -> Result<Vec<TapeDeviceInfo>, Error> {
|
pub fn scan_drives(_param: Value) -> Result<Vec<TapeDeviceInfo>, Error> {
|
||||||
|
|
||||||
let list = linux_tape_device_list();
|
let list = lto_tape_device_list();
|
||||||
|
|
||||||
Ok(list)
|
Ok(list)
|
||||||
}
|
}
|
||||||
|
100
src/api2/types/acme.rs
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox::api::{api, schema::{Schema, StringSchema, ApiStringFormat}};
|
||||||
|
|
||||||
|
use crate::api2::types::{
|
||||||
|
DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, PROXMOX_SAFE_ID_FORMAT,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"domain": { format: &DNS_NAME_FORMAT },
|
||||||
|
"alias": {
|
||||||
|
optional: true,
|
||||||
|
format: &DNS_ALIAS_FORMAT,
|
||||||
|
},
|
||||||
|
"plugin": {
|
||||||
|
optional: true,
|
||||||
|
format: &PROXMOX_SAFE_ID_FORMAT,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
default_key: "domain",
|
||||||
|
)]
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
/// A domain entry for an ACME certificate.
|
||||||
|
pub struct AcmeDomain {
|
||||||
|
/// The domain to certify for.
|
||||||
|
pub domain: String,
|
||||||
|
|
||||||
|
/// The domain to use for challenges instead of the default acme challenge domain.
|
||||||
|
///
|
||||||
|
/// This is useful if you use CNAME entries to redirect `_acme-challenge.*` domains to a
|
||||||
|
/// different DNS server.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub alias: Option<String>,
|
||||||
|
|
||||||
|
/// The plugin to use to validate this domain.
|
||||||
|
///
|
||||||
|
/// Empty means standalone HTTP validation is used.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub plugin: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const ACME_DOMAIN_PROPERTY_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"ACME domain configuration string")
|
||||||
|
.format(&ApiStringFormat::PropertyString(&AcmeDomain::API_SCHEMA))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
name: { type: String },
|
||||||
|
url: { type: String },
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// An ACME directory endpoint with a name and URL.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub struct KnownAcmeDirectory {
|
||||||
|
/// The ACME directory's name.
|
||||||
|
pub name: &'static str,
|
||||||
|
|
||||||
|
/// The ACME directory's endpoint URL.
|
||||||
|
pub url: &'static str,
|
||||||
|
}
|
||||||
|
|
||||||
|
proxmox::api_string_type! {
|
||||||
|
#[api(format: &PROXMOX_SAFE_ID_FORMAT)]
|
||||||
|
/// ACME account name.
|
||||||
|
#[derive(Clone, Eq, PartialEq, Hash, Deserialize, Serialize)]
|
||||||
|
#[serde(transparent)]
|
||||||
|
pub struct AcmeAccountName(String);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
schema: {
|
||||||
|
type: Object,
|
||||||
|
additional_properties: true,
|
||||||
|
properties: {},
|
||||||
|
},
|
||||||
|
type: {
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize)]
|
||||||
|
/// Schema for an ACME challenge plugin.
|
||||||
|
pub struct AcmeChallengeSchema {
|
||||||
|
/// Plugin ID.
|
||||||
|
pub id: String,
|
||||||
|
|
||||||
|
/// Human readable name, falls back to id.
|
||||||
|
pub name: String,
|
||||||
|
|
||||||
|
/// Plugin Type.
|
||||||
|
#[serde(rename = "type")]
|
||||||
|
pub ty: &'static str,
|
||||||
|
|
||||||
|
/// The plugin's parameter schema.
|
||||||
|
pub schema: Value,
|
||||||
|
}
|
15
src/api2/types/file_restore.rs
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// General status information about a running VM file-restore daemon
|
||||||
|
pub struct RestoreDaemonStatus {
|
||||||
|
/// VM uptime in seconds
|
||||||
|
pub uptime: i64,
|
||||||
|
/// time left until auto-shutdown, keep in mind that this is useless when 'keep-timeout' is
|
||||||
|
/// not set, as then the status call will have reset the timer before returning the value
|
||||||
|
pub timeout: i64,
|
||||||
|
}
|
||||||
|
|
@ -11,7 +11,6 @@ use crate::{
|
|||||||
backup::{
|
backup::{
|
||||||
CryptMode,
|
CryptMode,
|
||||||
Fingerprint,
|
Fingerprint,
|
||||||
BACKUP_ID_REGEX,
|
|
||||||
DirEntryAttribute,
|
DirEntryAttribute,
|
||||||
CatalogEntryType,
|
CatalogEntryType,
|
||||||
},
|
},
|
||||||
@ -34,6 +33,12 @@ pub use userid::{PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA, PROXMOX_GRO
|
|||||||
mod tape;
|
mod tape;
|
||||||
pub use tape::*;
|
pub use tape::*;
|
||||||
|
|
||||||
|
mod file_restore;
|
||||||
|
pub use file_restore::*;
|
||||||
|
|
||||||
|
mod acme;
|
||||||
|
pub use acme::*;
|
||||||
|
|
||||||
// File names: may not contain slashes, may not start with "."
|
// File names: may not contain slashes, may not start with "."
|
||||||
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
||||||
if name.starts_with('.') {
|
if name.starts_with('.') {
|
||||||
@ -45,9 +50,25 @@ pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
|||||||
Ok(())
|
Ok(())
|
||||||
});
|
});
|
||||||
|
|
||||||
|
macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9_][A-Za-z0-9._\-]*") }
|
||||||
|
macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") }
|
||||||
|
macro_rules! BACKUP_TIME_RE {
|
||||||
|
() => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z")
|
||||||
|
}
|
||||||
|
macro_rules! SNAPSHOT_PATH_REGEX_STR {
|
||||||
|
() => (
|
||||||
|
concat!(r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
|
macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
|
||||||
macro_rules! DNS_NAME { () => (concat!(r"(?:(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!(), ")")) }
|
macro_rules! DNS_NAME { () => (concat!(r"(?:(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!(), ")")) }
|
||||||
|
|
||||||
|
macro_rules! DNS_ALIAS_LABEL { () => (r"(?:[a-zA-Z0-9_](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
|
||||||
|
macro_rules! DNS_ALIAS_NAME {
|
||||||
|
() => (concat!(r"(?:(?:", DNS_ALIAS_LABEL!() , r"\.)*", DNS_ALIAS_LABEL!(), ")"))
|
||||||
|
}
|
||||||
|
|
||||||
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
|
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
|
||||||
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
|
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
|
||||||
|
|
||||||
@ -84,6 +105,8 @@ const_regex!{
|
|||||||
|
|
||||||
pub DNS_NAME_REGEX = concat!(r"^", DNS_NAME!(), r"$");
|
pub DNS_NAME_REGEX = concat!(r"^", DNS_NAME!(), r"$");
|
||||||
|
|
||||||
|
pub DNS_ALIAS_REGEX = concat!(r"^", DNS_ALIAS_NAME!(), r"$");
|
||||||
|
|
||||||
pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE!(), r")$");
|
pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE!(), r")$");
|
||||||
|
|
||||||
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE_BRACKET!() ,"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE_BRACKET!() ,"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
||||||
@ -99,6 +122,22 @@ const_regex!{
|
|||||||
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
||||||
|
|
||||||
pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$";
|
pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$";
|
||||||
|
|
||||||
|
pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
|
||||||
|
|
||||||
|
pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
|
||||||
|
|
||||||
|
pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
|
||||||
|
|
||||||
|
pub GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$");
|
||||||
|
|
||||||
|
pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$");
|
||||||
|
|
||||||
|
pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
|
||||||
|
|
||||||
|
pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!());
|
||||||
|
|
||||||
|
pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$");
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
||||||
@ -137,6 +176,9 @@ pub const HOSTNAME_FORMAT: ApiStringFormat =
|
|||||||
pub const DNS_NAME_FORMAT: ApiStringFormat =
|
pub const DNS_NAME_FORMAT: ApiStringFormat =
|
||||||
ApiStringFormat::Pattern(&DNS_NAME_REGEX);
|
ApiStringFormat::Pattern(&DNS_NAME_REGEX);
|
||||||
|
|
||||||
|
pub const DNS_ALIAS_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&DNS_ALIAS_REGEX);
|
||||||
|
|
||||||
pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
|
pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
|
||||||
ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
|
ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
|
||||||
|
|
||||||
@ -164,6 +206,12 @@ pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat =
|
|||||||
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
|
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
|
||||||
ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
|
ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
|
||||||
|
|
||||||
|
pub const DATASTORE_MAP_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&DATASTORE_MAP_REGEX);
|
||||||
|
|
||||||
|
pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX);
|
||||||
|
|
||||||
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
|
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
|
||||||
.format(&PASSWORD_FORMAT)
|
.format(&PASSWORD_FORMAT)
|
||||||
.min_length(1)
|
.min_length(1)
|
||||||
@ -356,6 +404,31 @@ pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
|
|||||||
.max_length(32)
|
.max_length(32)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
pub const DATASTORE_MAP_SCHEMA: Schema = StringSchema::new("Datastore mapping.")
|
||||||
|
.format(&DATASTORE_MAP_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(65)
|
||||||
|
.type_text("(<source>=)?<target>")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DATASTORE_MAP_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||||
|
"Datastore mapping list.", &DATASTORE_MAP_SCHEMA)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"A list of Datastore mappings (or single datastore), comma separated. \
|
||||||
|
For example 'a=b,e' maps the source datastore 'a' to target 'b and \
|
||||||
|
all other sources to the default 'e'. If no default is given, only the \
|
||||||
|
specified sources are mapped.")
|
||||||
|
.format(&ApiStringFormat::PropertyString(&DATASTORE_MAP_ARRAY_SCHEMA))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"A snapshot in the format: 'store:type/id/time")
|
||||||
|
.format(&TAPE_RESTORE_SNAPSHOT_FORMAT)
|
||||||
|
.type_text("store:type/id/time")
|
||||||
|
.schema();
|
||||||
|
|
||||||
pub const MEDIA_SET_UUID_SCHEMA: Schema =
|
pub const MEDIA_SET_UUID_SCHEMA: Schema =
|
||||||
StringSchema::new("MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).")
|
StringSchema::new("MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).")
|
||||||
.format(&UUID_FORMAT)
|
.format(&UUID_FORMAT)
|
||||||
@ -441,6 +514,12 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name
|
|||||||
.max_length(64)
|
.max_length(64)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.")
|
||||||
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
|
.min_length(2)
|
||||||
|
.max_length(32)
|
||||||
|
.schema();
|
||||||
|
|
||||||
// Complex type definitions
|
// Complex type definitions
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -724,9 +803,8 @@ impl Default for GarbageCollectionStatus {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[api()]
|
#[api()]
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Default, Serialize, Deserialize)]
|
||||||
/// Storage space usage information.
|
/// Storage space usage information.
|
||||||
pub struct StorageStatus {
|
pub struct StorageStatus {
|
||||||
/// Total space (bytes).
|
/// Total space (bytes).
|
||||||
@ -1327,20 +1405,32 @@ pub struct ArchiveEntry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ArchiveEntry {
|
impl ArchiveEntry {
|
||||||
pub fn new(filepath: &[u8], entry_type: &DirEntryAttribute) -> Self {
|
pub fn new(filepath: &[u8], entry_type: Option<&DirEntryAttribute>) -> Self {
|
||||||
|
let size = match entry_type {
|
||||||
|
Some(DirEntryAttribute::File { size, .. }) => Some(*size),
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
Self::new_with_size(filepath, entry_type, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_with_size(
|
||||||
|
filepath: &[u8],
|
||||||
|
entry_type: Option<&DirEntryAttribute>,
|
||||||
|
size: Option<u64>,
|
||||||
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
filepath: base64::encode(filepath),
|
filepath: base64::encode(filepath),
|
||||||
text: String::from_utf8_lossy(filepath.split(|x| *x == b'/').last().unwrap())
|
text: String::from_utf8_lossy(filepath.split(|x| *x == b'/').last().unwrap())
|
||||||
.to_string(),
|
.to_string(),
|
||||||
entry_type: CatalogEntryType::from(entry_type).to_string(),
|
entry_type: match entry_type {
|
||||||
leaf: !matches!(entry_type, DirEntryAttribute::Directory { .. }),
|
Some(entry_type) => CatalogEntryType::from(entry_type).to_string(),
|
||||||
size: match entry_type {
|
None => "v".to_owned(),
|
||||||
DirEntryAttribute::File { size, .. } => Some(*size),
|
|
||||||
_ => None
|
|
||||||
},
|
},
|
||||||
|
leaf: !matches!(entry_type, None | Some(DirEntryAttribute::Directory { .. })),
|
||||||
|
size,
|
||||||
mtime: match entry_type {
|
mtime: match entry_type {
|
||||||
DirEntryAttribute::File { mtime, .. } => Some(*mtime),
|
Some(DirEntryAttribute::File { mtime, .. }) => Some(*mtime),
|
||||||
_ => None
|
_ => None,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1464,8 +1554,8 @@ impl std::convert::TryFrom<openssl::rsa::Rsa<openssl::pkey::Public>> for RsaPubK
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
#[serde(rename_all="kebab-case")]
|
|
||||||
#[derive(Serialize,Deserialize,Default)]
|
#[derive(Serialize,Deserialize,Default)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
/// Job Scheduling Status
|
/// Job Scheduling Status
|
||||||
pub struct JobScheduleStatus {
|
pub struct JobScheduleStatus {
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
@ -1477,3 +1567,109 @@ pub struct JobScheduleStatus {
|
|||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub last_run_endtime: Option<i64>,
|
pub last_run_endtime: Option<i64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api]
|
||||||
|
#[derive(Serialize, Deserialize, Default)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Node memory usage counters
|
||||||
|
pub struct NodeMemoryCounters {
|
||||||
|
/// Total memory
|
||||||
|
pub total: u64,
|
||||||
|
/// Used memory
|
||||||
|
pub used: u64,
|
||||||
|
/// Free memory
|
||||||
|
pub free: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api]
|
||||||
|
#[derive(Serialize, Deserialize, Default)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Node swap usage counters
|
||||||
|
pub struct NodeSwapCounters {
|
||||||
|
/// Total swap
|
||||||
|
pub total: u64,
|
||||||
|
/// Used swap
|
||||||
|
pub used: u64,
|
||||||
|
/// Free swap
|
||||||
|
pub free: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api]
|
||||||
|
#[derive(Serialize,Deserialize,Default)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Contains general node information such as the fingerprint`
|
||||||
|
pub struct NodeInformation {
|
||||||
|
/// The SSL Fingerprint
|
||||||
|
pub fingerprint: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api]
|
||||||
|
#[derive(Serialize, Deserialize, Default)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Information about the CPU
|
||||||
|
pub struct NodeCpuInformation {
|
||||||
|
/// The CPU model
|
||||||
|
pub model: String,
|
||||||
|
/// The number of CPU sockets
|
||||||
|
pub sockets: usize,
|
||||||
|
/// The number of CPU cores (incl. threads)
|
||||||
|
pub cpus: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
memory: {
|
||||||
|
type: NodeMemoryCounters,
|
||||||
|
},
|
||||||
|
root: {
|
||||||
|
type: StorageStatus,
|
||||||
|
},
|
||||||
|
swap: {
|
||||||
|
type: NodeSwapCounters,
|
||||||
|
},
|
||||||
|
loadavg: {
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: Number,
|
||||||
|
description: "the load",
|
||||||
|
}
|
||||||
|
},
|
||||||
|
cpuinfo: {
|
||||||
|
type: NodeCpuInformation,
|
||||||
|
},
|
||||||
|
info: {
|
||||||
|
type: NodeInformation,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize, Default)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// The Node status
|
||||||
|
pub struct NodeStatus {
|
||||||
|
pub memory: NodeMemoryCounters,
|
||||||
|
pub root: StorageStatus,
|
||||||
|
pub swap: NodeSwapCounters,
|
||||||
|
/// The current uptime of the server.
|
||||||
|
pub uptime: u64,
|
||||||
|
/// Load for 1, 5 and 15 minutes.
|
||||||
|
pub loadavg: [f64; 3],
|
||||||
|
/// The current kernel version.
|
||||||
|
pub kversion: String,
|
||||||
|
/// Total CPU usage since last query.
|
||||||
|
pub cpu: f64,
|
||||||
|
/// Total IO wait since last query.
|
||||||
|
pub wait: f64,
|
||||||
|
pub cpuinfo: NodeCpuInformation,
|
||||||
|
pub info: NodeInformation,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const HTTP_PROXY_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"HTTP proxy configuration [http://]<host>[:port]")
|
||||||
|
.format(&ApiStringFormat::VerifyFn(|s| {
|
||||||
|
proxmox_http::ProxyConfig::parse_proxy_url(s)?;
|
||||||
|
Ok(())
|
||||||
|
}))
|
||||||
|
.min_length(1)
|
||||||
|
.max_length(128)
|
||||||
|
.type_text("[http://]<host>[:port]")
|
||||||
|
.schema();
|
||||||
|
@ -21,8 +21,8 @@ pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.")
|
|||||||
.max_length(32)
|
.max_length(32)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const LINUX_DRIVE_PATH_SCHEMA: Schema = StringSchema::new(
|
pub const LTO_DRIVE_PATH_SCHEMA: Schema = StringSchema::new(
|
||||||
"The path to a LINUX non-rewinding SCSI tape device (i.e. '/dev/nst0')")
|
"The path to a LTO SCSI-generic tape device (i.e. '/dev/sg0')")
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const CHANGER_DRIVENUM_SCHEMA: Schema = IntegerSchema::new(
|
pub const CHANGER_DRIVENUM_SCHEMA: Schema = IntegerSchema::new(
|
||||||
@ -57,7 +57,7 @@ pub struct VirtualTapeDrive {
|
|||||||
schema: DRIVE_NAME_SCHEMA,
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
},
|
},
|
||||||
path: {
|
path: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
},
|
},
|
||||||
changer: {
|
changer: {
|
||||||
schema: CHANGER_NAME_SCHEMA,
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
@ -71,8 +71,8 @@ pub struct VirtualTapeDrive {
|
|||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[derive(Serialize,Deserialize)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Linux SCSI tape driver
|
/// Lto SCSI tape driver
|
||||||
pub struct LinuxTapeDrive {
|
pub struct LtoTapeDrive {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub path: String,
|
pub path: String,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
@ -84,7 +84,7 @@ pub struct LinuxTapeDrive {
|
|||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
config: {
|
config: {
|
||||||
type: LinuxTapeDrive,
|
type: LtoTapeDrive,
|
||||||
},
|
},
|
||||||
info: {
|
info: {
|
||||||
type: OptionalDeviceIdentification,
|
type: OptionalDeviceIdentification,
|
||||||
@ -96,7 +96,7 @@ pub struct LinuxTapeDrive {
|
|||||||
/// Drive list entry
|
/// Drive list entry
|
||||||
pub struct DriveListEntry {
|
pub struct DriveListEntry {
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
pub config: LinuxTapeDrive,
|
pub config: LtoTapeDrive,
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
pub info: OptionalDeviceIdentification,
|
pub info: OptionalDeviceIdentification,
|
||||||
/// the state of the drive if locked
|
/// the state of the drive if locked
|
||||||
@ -119,6 +119,8 @@ pub struct MamAttribute {
|
|||||||
#[api()]
|
#[api()]
|
||||||
#[derive(Serialize,Deserialize,Copy,Clone,Debug)]
|
#[derive(Serialize,Deserialize,Copy,Clone,Debug)]
|
||||||
pub enum TapeDensity {
|
pub enum TapeDensity {
|
||||||
|
/// Unknown (no media loaded)
|
||||||
|
Unknown,
|
||||||
/// LTO1
|
/// LTO1
|
||||||
LTO1,
|
LTO1,
|
||||||
/// LTO2
|
/// LTO2
|
||||||
@ -144,6 +146,7 @@ impl TryFrom<u8> for TapeDensity {
|
|||||||
|
|
||||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||||
let density = match value {
|
let density = match value {
|
||||||
|
0x00 => TapeDensity::Unknown,
|
||||||
0x40 => TapeDensity::LTO1,
|
0x40 => TapeDensity::LTO1,
|
||||||
0x42 => TapeDensity::LTO2,
|
0x42 => TapeDensity::LTO2,
|
||||||
0x44 => TapeDensity::LTO3,
|
0x44 => TapeDensity::LTO3,
|
||||||
@ -169,29 +172,37 @@ impl TryFrom<u8> for TapeDensity {
|
|||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[derive(Serialize,Deserialize)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Drive/Media status for Linux SCSI drives.
|
/// Drive/Media status for Lto SCSI drives.
|
||||||
///
|
///
|
||||||
/// Media related data is optional - only set if there is a medium
|
/// Media related data is optional - only set if there is a medium
|
||||||
/// loaded.
|
/// loaded.
|
||||||
pub struct LinuxDriveAndMediaStatus {
|
pub struct LtoDriveAndMediaStatus {
|
||||||
|
/// Vendor
|
||||||
|
pub vendor: String,
|
||||||
|
/// Product
|
||||||
|
pub product: String,
|
||||||
|
/// Revision
|
||||||
|
pub revision: String,
|
||||||
/// Block size (0 is variable size)
|
/// Block size (0 is variable size)
|
||||||
pub blocksize: u32,
|
pub blocksize: u32,
|
||||||
|
/// Compression enabled
|
||||||
|
pub compression: bool,
|
||||||
|
/// Drive buffer mode
|
||||||
|
pub buffer_mode: u8,
|
||||||
/// Tape density
|
/// Tape density
|
||||||
|
pub density: TapeDensity,
|
||||||
|
/// Media is write protected
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub density: Option<TapeDensity>,
|
pub write_protect: Option<bool>,
|
||||||
/// Status flags
|
|
||||||
pub status: String,
|
|
||||||
/// Linux Driver Options
|
|
||||||
pub options: String,
|
|
||||||
/// Tape Alert Flags
|
/// Tape Alert Flags
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub alert_flags: Option<String>,
|
pub alert_flags: Option<String>,
|
||||||
/// Current file number
|
/// Current file number
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub file_number: Option<u32>,
|
pub file_number: Option<u64>,
|
||||||
/// Current block number
|
/// Current block number
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub block_number: Option<u32>,
|
pub block_number: Option<u64>,
|
||||||
/// Medium Manufacture Date (epoch)
|
/// Medium Manufacture Date (epoch)
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub manufactured: Option<i64>,
|
pub manufactured: Option<i64>,
|
||||||
@ -212,3 +223,62 @@ pub struct LinuxDriveAndMediaStatus {
|
|||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub medium_wearout: Option<f64>,
|
pub medium_wearout: Option<f64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
/// Volume statistics from SCSI log page 17h
|
||||||
|
#[derive(Default, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct Lp17VolumeStatistics {
|
||||||
|
/// Volume mounts (thread count)
|
||||||
|
pub volume_mounts: u64,
|
||||||
|
/// Total data sets written
|
||||||
|
pub volume_datasets_written: u64,
|
||||||
|
/// Write retries
|
||||||
|
pub volume_recovered_write_data_errors: u64,
|
||||||
|
/// Total unrecovered write errors
|
||||||
|
pub volume_unrecovered_write_data_errors: u64,
|
||||||
|
/// Total suspended writes
|
||||||
|
pub volume_write_servo_errors: u64,
|
||||||
|
/// Total fatal suspended writes
|
||||||
|
pub volume_unrecovered_write_servo_errors: u64,
|
||||||
|
/// Total datasets read
|
||||||
|
pub volume_datasets_read: u64,
|
||||||
|
/// Total read retries
|
||||||
|
pub volume_recovered_read_errors: u64,
|
||||||
|
/// Total unrecovered read errors
|
||||||
|
pub volume_unrecovered_read_errors: u64,
|
||||||
|
/// Last mount unrecovered write errors
|
||||||
|
pub last_mount_unrecovered_write_errors: u64,
|
||||||
|
/// Last mount unrecovered read errors
|
||||||
|
pub last_mount_unrecovered_read_errors: u64,
|
||||||
|
/// Last mount bytes written
|
||||||
|
pub last_mount_bytes_written: u64,
|
||||||
|
/// Last mount bytes read
|
||||||
|
pub last_mount_bytes_read: u64,
|
||||||
|
/// Lifetime bytes written
|
||||||
|
pub lifetime_bytes_written: u64,
|
||||||
|
/// Lifetime bytes read
|
||||||
|
pub lifetime_bytes_read: u64,
|
||||||
|
/// Last load write compression ratio
|
||||||
|
pub last_load_write_compression_ratio: u64,
|
||||||
|
/// Last load read compression ratio
|
||||||
|
pub last_load_read_compression_ratio: u64,
|
||||||
|
/// Medium mount time
|
||||||
|
pub medium_mount_time: u64,
|
||||||
|
/// Medium ready time
|
||||||
|
pub medium_ready_time: u64,
|
||||||
|
/// Total native capacity
|
||||||
|
pub total_native_capacity: u64,
|
||||||
|
/// Total used native capacity
|
||||||
|
pub total_used_native_capacity: u64,
|
||||||
|
/// Write protect
|
||||||
|
pub write_protect: bool,
|
||||||
|
/// Volume is WORM
|
||||||
|
pub worm: bool,
|
||||||
|
/// Beginning of medium passes
|
||||||
|
pub beginning_of_medium_passes: u64,
|
||||||
|
/// Middle of medium passes
|
||||||
|
pub middle_of_tape_passes: u64,
|
||||||
|
/// Volume serial number
|
||||||
|
pub serial: String,
|
||||||
|
}
|
||||||
|
@ -12,6 +12,26 @@ use crate::api2::types::{
|
|||||||
MediaLocation,
|
MediaLocation,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"media-set-uuid": {
|
||||||
|
schema: MEDIA_SET_UUID_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Media Set list entry
|
||||||
|
pub struct MediaSetListEntry {
|
||||||
|
/// Media set name
|
||||||
|
pub media_set_name: String,
|
||||||
|
pub media_set_uuid: Uuid,
|
||||||
|
/// MediaSet creation time stamp
|
||||||
|
pub media_set_ctime: i64,
|
||||||
|
/// Media Pool
|
||||||
|
pub pool: String,
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
location: {
|
location: {
|
||||||
@ -144,6 +164,8 @@ pub struct MediaContentEntry {
|
|||||||
pub seq_nr: u64,
|
pub seq_nr: u64,
|
||||||
/// Media Pool
|
/// Media Pool
|
||||||
pub pool: String,
|
pub pool: String,
|
||||||
|
/// Datastore Name
|
||||||
|
pub store: String,
|
||||||
/// Backup snapshot
|
/// Backup snapshot
|
||||||
pub snapshot: String,
|
pub snapshot: String,
|
||||||
/// Snapshot creation time (epoch)
|
/// Snapshot creation time (epoch)
|
||||||
|
@ -13,7 +13,7 @@ pub const PROXMOX_PKG_VERSION: &str =
|
|||||||
env!("CARGO_PKG_VERSION_MINOR"),
|
env!("CARGO_PKG_VERSION_MINOR"),
|
||||||
);
|
);
|
||||||
pub const PROXMOX_PKG_RELEASE: &str = env!("CARGO_PKG_VERSION_PATCH");
|
pub const PROXMOX_PKG_RELEASE: &str = env!("CARGO_PKG_VERSION_PATCH");
|
||||||
pub const PROXMOX_PKG_REPOID: &str = env!("CARGO_PKG_REPOSITORY");
|
pub const PROXMOX_PKG_REPOID: &str = env!("REPOID");
|
||||||
|
|
||||||
fn get_version(
|
fn get_version(
|
||||||
_param: Value,
|
_param: Value,
|
||||||
|
24
src/auth.rs
@ -14,6 +14,7 @@ use crate::api2::types::{Userid, UsernameRef, RealmRef};
|
|||||||
pub trait ProxmoxAuthenticator {
|
pub trait ProxmoxAuthenticator {
|
||||||
fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
|
fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
|
||||||
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
|
fn store_password(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
|
||||||
|
fn remove_password(&self, username: &UsernameRef) -> Result<(), Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct PAM();
|
pub struct PAM();
|
||||||
@ -60,6 +61,11 @@ impl ProxmoxAuthenticator for PAM {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// do not remove password for pam users
|
||||||
|
fn remove_password(&self, _username: &UsernameRef) -> Result<(), Error> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct PBS();
|
pub struct PBS();
|
||||||
@ -132,6 +138,24 @@ impl ProxmoxAuthenticator for PBS {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn remove_password(&self, username: &UsernameRef) -> Result<(), Error> {
|
||||||
|
let mut data = proxmox::tools::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
|
||||||
|
if let Some(map) = data.as_object_mut() {
|
||||||
|
map.remove(username.as_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
|
||||||
|
let options = proxmox::tools::fs::CreateOptions::new()
|
||||||
|
.perm(mode)
|
||||||
|
.owner(nix::unistd::ROOT)
|
||||||
|
.group(nix::unistd::Gid::from_raw(0));
|
||||||
|
|
||||||
|
let data = serde_json::to_vec_pretty(&data)?;
|
||||||
|
proxmox::tools::fs::replace_file(SHADOW_CONFIG_FILENAME, &data, options)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Lookup the autenticator for the specified realm
|
/// Lookup the autenticator for the specified realm
|
||||||
|
@ -238,6 +238,7 @@ pub use fixed_index::*;
|
|||||||
mod dynamic_index;
|
mod dynamic_index;
|
||||||
pub use dynamic_index::*;
|
pub use dynamic_index::*;
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
mod backup_info;
|
mod backup_info;
|
||||||
pub use backup_info::*;
|
pub use backup_info::*;
|
||||||
|
|
||||||
@ -256,5 +257,5 @@ pub use verify::*;
|
|||||||
mod catalog_shell;
|
mod catalog_shell;
|
||||||
pub use catalog_shell::*;
|
pub use catalog_shell::*;
|
||||||
|
|
||||||
mod async_index_reader;
|
mod cached_chunk_reader;
|
||||||
pub use async_index_reader::*;
|
pub use cached_chunk_reader::*;
|
||||||
|
@ -1,215 +0,0 @@
|
|||||||
use std::future::Future;
|
|
||||||
use std::task::{Poll, Context};
|
|
||||||
use std::pin::Pin;
|
|
||||||
use std::io::SeekFrom;
|
|
||||||
|
|
||||||
use anyhow::Error;
|
|
||||||
use futures::future::FutureExt;
|
|
||||||
use futures::ready;
|
|
||||||
use tokio::io::{AsyncRead, AsyncSeek, ReadBuf};
|
|
||||||
|
|
||||||
use proxmox::sys::error::io_err_other;
|
|
||||||
use proxmox::io_format_err;
|
|
||||||
|
|
||||||
use super::IndexFile;
|
|
||||||
use super::read_chunk::AsyncReadChunk;
|
|
||||||
use super::index::ChunkReadInfo;
|
|
||||||
|
|
||||||
type ReadFuture<S> = dyn Future<Output = Result<(S, Vec<u8>), Error>> + Send + 'static;
|
|
||||||
|
|
||||||
// FIXME: This enum may not be required?
|
|
||||||
// - Put the `WaitForData` case directly into a `read_future: Option<>`
|
|
||||||
// - make the read loop as follows:
|
|
||||||
// * if read_buffer is not empty:
|
|
||||||
// use it
|
|
||||||
// * else if read_future is there:
|
|
||||||
// poll it
|
|
||||||
// if read: move data to read_buffer
|
|
||||||
// * else
|
|
||||||
// create read future
|
|
||||||
#[allow(clippy::enum_variant_names)]
|
|
||||||
enum AsyncIndexReaderState<S> {
|
|
||||||
NoData,
|
|
||||||
WaitForData(Pin<Box<ReadFuture<S>>>),
|
|
||||||
HaveData,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct AsyncIndexReader<S, I: IndexFile> {
|
|
||||||
store: Option<S>,
|
|
||||||
index: I,
|
|
||||||
read_buffer: Vec<u8>,
|
|
||||||
current_chunk_offset: u64,
|
|
||||||
current_chunk_idx: usize,
|
|
||||||
current_chunk_info: Option<ChunkReadInfo>,
|
|
||||||
position: u64,
|
|
||||||
seek_to_pos: i64,
|
|
||||||
state: AsyncIndexReaderState<S>,
|
|
||||||
}
|
|
||||||
|
|
||||||
// ok because the only public interfaces operates on &mut Self
|
|
||||||
unsafe impl<S: Sync, I: IndexFile + Sync> Sync for AsyncIndexReader<S, I> {}
|
|
||||||
|
|
||||||
impl<S: AsyncReadChunk, I: IndexFile> AsyncIndexReader<S, I> {
|
|
||||||
pub fn new(index: I, store: S) -> Self {
|
|
||||||
Self {
|
|
||||||
store: Some(store),
|
|
||||||
index,
|
|
||||||
read_buffer: Vec::with_capacity(1024 * 1024),
|
|
||||||
current_chunk_offset: 0,
|
|
||||||
current_chunk_idx: 0,
|
|
||||||
current_chunk_info: None,
|
|
||||||
position: 0,
|
|
||||||
seek_to_pos: 0,
|
|
||||||
state: AsyncIndexReaderState::NoData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, I> AsyncRead for AsyncIndexReader<S, I>
|
|
||||||
where
|
|
||||||
S: AsyncReadChunk + Unpin + Sync + 'static,
|
|
||||||
I: IndexFile + Unpin,
|
|
||||||
{
|
|
||||||
fn poll_read(
|
|
||||||
self: Pin<&mut Self>,
|
|
||||||
cx: &mut Context,
|
|
||||||
buf: &mut ReadBuf,
|
|
||||||
) -> Poll<tokio::io::Result<()>> {
|
|
||||||
let this = Pin::get_mut(self);
|
|
||||||
loop {
|
|
||||||
match &mut this.state {
|
|
||||||
AsyncIndexReaderState::NoData => {
|
|
||||||
let (idx, offset) = if this.current_chunk_info.is_some() &&
|
|
||||||
this.position == this.current_chunk_info.as_ref().unwrap().range.end
|
|
||||||
{
|
|
||||||
// optimization for sequential chunk read
|
|
||||||
let next_idx = this.current_chunk_idx + 1;
|
|
||||||
(next_idx, 0)
|
|
||||||
} else {
|
|
||||||
match this.index.chunk_from_offset(this.position) {
|
|
||||||
Some(res) => res,
|
|
||||||
None => return Poll::Ready(Ok(()))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if idx >= this.index.index_count() {
|
|
||||||
return Poll::Ready(Ok(()));
|
|
||||||
}
|
|
||||||
|
|
||||||
let info = this
|
|
||||||
.index
|
|
||||||
.chunk_info(idx)
|
|
||||||
.ok_or_else(|| io_format_err!("could not get digest"))?;
|
|
||||||
|
|
||||||
this.current_chunk_offset = offset;
|
|
||||||
this.current_chunk_idx = idx;
|
|
||||||
let old_info = this.current_chunk_info.replace(info.clone());
|
|
||||||
|
|
||||||
if let Some(old_info) = old_info {
|
|
||||||
if old_info.digest == info.digest {
|
|
||||||
// hit, chunk is currently in cache
|
|
||||||
this.state = AsyncIndexReaderState::HaveData;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// miss, need to download new chunk
|
|
||||||
let store = match this.store.take() {
|
|
||||||
Some(store) => store,
|
|
||||||
None => {
|
|
||||||
return Poll::Ready(Err(io_format_err!("could not find store")));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let future = async move {
|
|
||||||
store.read_chunk(&info.digest)
|
|
||||||
.await
|
|
||||||
.map(move |x| (store, x))
|
|
||||||
};
|
|
||||||
|
|
||||||
this.state = AsyncIndexReaderState::WaitForData(future.boxed());
|
|
||||||
}
|
|
||||||
AsyncIndexReaderState::WaitForData(ref mut future) => {
|
|
||||||
match ready!(future.as_mut().poll(cx)) {
|
|
||||||
Ok((store, chunk_data)) => {
|
|
||||||
this.read_buffer = chunk_data;
|
|
||||||
this.state = AsyncIndexReaderState::HaveData;
|
|
||||||
this.store = Some(store);
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
return Poll::Ready(Err(io_err_other(err)));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
AsyncIndexReaderState::HaveData => {
|
|
||||||
let offset = this.current_chunk_offset as usize;
|
|
||||||
let len = this.read_buffer.len();
|
|
||||||
let n = if len - offset < buf.remaining() {
|
|
||||||
len - offset
|
|
||||||
} else {
|
|
||||||
buf.remaining()
|
|
||||||
};
|
|
||||||
|
|
||||||
buf.put_slice(&this.read_buffer[offset..(offset + n)]);
|
|
||||||
this.position += n as u64;
|
|
||||||
|
|
||||||
if offset + n == len {
|
|
||||||
this.state = AsyncIndexReaderState::NoData;
|
|
||||||
} else {
|
|
||||||
this.current_chunk_offset += n as u64;
|
|
||||||
this.state = AsyncIndexReaderState::HaveData;
|
|
||||||
}
|
|
||||||
|
|
||||||
return Poll::Ready(Ok(()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, I> AsyncSeek for AsyncIndexReader<S, I>
|
|
||||||
where
|
|
||||||
S: AsyncReadChunk + Unpin + Sync + 'static,
|
|
||||||
I: IndexFile + Unpin,
|
|
||||||
{
|
|
||||||
fn start_seek(
|
|
||||||
self: Pin<&mut Self>,
|
|
||||||
pos: SeekFrom,
|
|
||||||
) -> tokio::io::Result<()> {
|
|
||||||
let this = Pin::get_mut(self);
|
|
||||||
this.seek_to_pos = match pos {
|
|
||||||
SeekFrom::Start(offset) => {
|
|
||||||
offset as i64
|
|
||||||
},
|
|
||||||
SeekFrom::End(offset) => {
|
|
||||||
this.index.index_bytes() as i64 + offset
|
|
||||||
},
|
|
||||||
SeekFrom::Current(offset) => {
|
|
||||||
this.position as i64 + offset
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_complete(
|
|
||||||
self: Pin<&mut Self>,
|
|
||||||
_cx: &mut Context<'_>,
|
|
||||||
) -> Poll<tokio::io::Result<u64>> {
|
|
||||||
let this = Pin::get_mut(self);
|
|
||||||
|
|
||||||
let index_bytes = this.index.index_bytes();
|
|
||||||
if this.seek_to_pos < 0 {
|
|
||||||
return Poll::Ready(Err(io_format_err!("cannot seek to negative values")));
|
|
||||||
} else if this.seek_to_pos > index_bytes as i64 {
|
|
||||||
this.position = index_bytes;
|
|
||||||
} else {
|
|
||||||
this.position = this.seek_to_pos as u64;
|
|
||||||
}
|
|
||||||
|
|
||||||
// even if seeking within one chunk, we need to go to NoData to
|
|
||||||
// recalculate the current_chunk_offset (data is cached anyway)
|
|
||||||
this.state = AsyncIndexReaderState::NoData;
|
|
||||||
|
|
||||||
Poll::Ready(Ok(this.position))
|
|
||||||
}
|
|
||||||
}
|
|
@ -3,31 +3,19 @@ use crate::tools;
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use std::os::unix::io::RawFd;
|
use std::os::unix::io::RawFd;
|
||||||
|
|
||||||
use std::path::{PathBuf, Path};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
use proxmox::const_regex;
|
use crate::api2::types::{
|
||||||
|
BACKUP_ID_REGEX,
|
||||||
|
BACKUP_TYPE_REGEX,
|
||||||
|
BACKUP_DATE_REGEX,
|
||||||
|
GROUP_PATH_REGEX,
|
||||||
|
SNAPSHOT_PATH_REGEX,
|
||||||
|
BACKUP_FILE_REGEX,
|
||||||
|
};
|
||||||
|
|
||||||
use super::manifest::MANIFEST_BLOB_NAME;
|
use super::manifest::MANIFEST_BLOB_NAME;
|
||||||
|
|
||||||
macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9_][A-Za-z0-9._\-]*") }
|
|
||||||
macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") }
|
|
||||||
macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z") }
|
|
||||||
|
|
||||||
const_regex!{
|
|
||||||
BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
|
|
||||||
|
|
||||||
BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
|
|
||||||
|
|
||||||
pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
|
|
||||||
|
|
||||||
BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
|
|
||||||
|
|
||||||
GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$");
|
|
||||||
|
|
||||||
SNAPSHOT_PATH_REGEX = concat!(
|
|
||||||
r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")$");
|
|
||||||
}
|
|
||||||
|
|
||||||
/// BackupGroup is a directory containing a list of BackupDir
|
/// BackupGroup is a directory containing a list of BackupDir
|
||||||
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
|
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
|
||||||
pub struct BackupGroup {
|
pub struct BackupGroup {
|
||||||
@ -38,7 +26,6 @@ pub struct BackupGroup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl std::cmp::Ord for BackupGroup {
|
impl std::cmp::Ord for BackupGroup {
|
||||||
|
|
||||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||||
let type_order = self.backup_type.cmp(&other.backup_type);
|
let type_order = self.backup_type.cmp(&other.backup_type);
|
||||||
if type_order != std::cmp::Ordering::Equal {
|
if type_order != std::cmp::Ordering::Equal {
|
||||||
@ -63,9 +50,11 @@ impl std::cmp::PartialOrd for BackupGroup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BackupGroup {
|
impl BackupGroup {
|
||||||
|
|
||||||
pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
|
pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
|
||||||
Self { backup_type: backup_type.into(), backup_id: backup_id.into() }
|
Self {
|
||||||
|
backup_type: backup_type.into(),
|
||||||
|
backup_id: backup_id.into(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn backup_type(&self) -> &str {
|
pub fn backup_type(&self) -> &str {
|
||||||
@ -77,7 +66,6 @@ impl BackupGroup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn group_path(&self) -> PathBuf {
|
pub fn group_path(&self) -> PathBuf {
|
||||||
|
|
||||||
let mut relative_path = PathBuf::new();
|
let mut relative_path = PathBuf::new();
|
||||||
|
|
||||||
relative_path.push(&self.backup_type);
|
relative_path.push(&self.backup_type);
|
||||||
@ -88,46 +76,65 @@ impl BackupGroup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn list_backups(&self, base_path: &Path) -> Result<Vec<BackupInfo>, Error> {
|
pub fn list_backups(&self, base_path: &Path) -> Result<Vec<BackupInfo>, Error> {
|
||||||
|
|
||||||
let mut list = vec![];
|
let mut list = vec![];
|
||||||
|
|
||||||
let mut path = base_path.to_owned();
|
let mut path = base_path.to_owned();
|
||||||
path.push(self.group_path());
|
path.push(self.group_path());
|
||||||
|
|
||||||
tools::scandir(libc::AT_FDCWD, &path, &BACKUP_DATE_REGEX, |l2_fd, backup_time, file_type| {
|
tools::scandir(
|
||||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
libc::AT_FDCWD,
|
||||||
|
&path,
|
||||||
|
&BACKUP_DATE_REGEX,
|
||||||
|
|l2_fd, backup_time, file_type| {
|
||||||
|
if file_type != nix::dir::Type::Directory {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
let backup_dir = BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
|
let backup_dir =
|
||||||
|
BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
|
||||||
let files = list_backup_files(l2_fd, backup_time)?;
|
let files = list_backup_files(l2_fd, backup_time)?;
|
||||||
|
|
||||||
list.push(BackupInfo { backup_dir, files });
|
list.push(BackupInfo { backup_dir, files });
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})?;
|
},
|
||||||
|
)?;
|
||||||
Ok(list)
|
Ok(list)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<i64>, Error> {
|
pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<i64>, Error> {
|
||||||
|
|
||||||
let mut last = None;
|
let mut last = None;
|
||||||
|
|
||||||
let mut path = base_path.to_owned();
|
let mut path = base_path.to_owned();
|
||||||
path.push(self.group_path());
|
path.push(self.group_path());
|
||||||
|
|
||||||
tools::scandir(libc::AT_FDCWD, &path, &BACKUP_DATE_REGEX, |l2_fd, backup_time, file_type| {
|
tools::scandir(
|
||||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
libc::AT_FDCWD,
|
||||||
|
&path,
|
||||||
|
&BACKUP_DATE_REGEX,
|
||||||
|
|l2_fd, backup_time, file_type| {
|
||||||
|
if file_type != nix::dir::Type::Directory {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
let mut manifest_path = PathBuf::from(backup_time);
|
let mut manifest_path = PathBuf::from(backup_time);
|
||||||
manifest_path.push(MANIFEST_BLOB_NAME);
|
manifest_path.push(MANIFEST_BLOB_NAME);
|
||||||
|
|
||||||
use nix::fcntl::{openat, OFlag};
|
use nix::fcntl::{openat, OFlag};
|
||||||
match openat(l2_fd, &manifest_path, OFlag::O_RDONLY, nix::sys::stat::Mode::empty()) {
|
match openat(
|
||||||
|
l2_fd,
|
||||||
|
&manifest_path,
|
||||||
|
OFlag::O_RDONLY,
|
||||||
|
nix::sys::stat::Mode::empty(),
|
||||||
|
) {
|
||||||
Ok(rawfd) => {
|
Ok(rawfd) => {
|
||||||
/* manifest exists --> assume backup was successful */
|
/* manifest exists --> assume backup was successful */
|
||||||
/* close else this leaks! */
|
/* close else this leaks! */
|
||||||
nix::unistd::close(rawfd)?;
|
nix::unistd::close(rawfd)?;
|
||||||
},
|
}
|
||||||
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => { return Ok(()); }
|
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
bail!("last_successful_backup: unexpected error - {}", err);
|
bail!("last_successful_backup: unexpected error - {}", err);
|
||||||
}
|
}
|
||||||
@ -135,13 +142,16 @@ impl BackupGroup {
|
|||||||
|
|
||||||
let timestamp = proxmox::tools::time::parse_rfc3339(backup_time)?;
|
let timestamp = proxmox::tools::time::parse_rfc3339(backup_time)?;
|
||||||
if let Some(last_timestamp) = last {
|
if let Some(last_timestamp) = last {
|
||||||
if timestamp > last_timestamp { last = Some(timestamp); }
|
if timestamp > last_timestamp {
|
||||||
|
last = Some(timestamp);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
last = Some(timestamp);
|
last = Some(timestamp);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})?;
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
Ok(last)
|
Ok(last)
|
||||||
}
|
}
|
||||||
@ -162,7 +172,8 @@ impl std::str::FromStr for BackupGroup {
|
|||||||
///
|
///
|
||||||
/// This parses strings like `vm/100".
|
/// This parses strings like `vm/100".
|
||||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||||
let cap = GROUP_PATH_REGEX.captures(path)
|
let cap = GROUP_PATH_REGEX
|
||||||
|
.captures(path)
|
||||||
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
@ -182,11 +193,10 @@ pub struct BackupDir {
|
|||||||
/// Backup timestamp
|
/// Backup timestamp
|
||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
// backup_time as rfc3339
|
// backup_time as rfc3339
|
||||||
backup_time_string: String
|
backup_time_string: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BackupDir {
|
impl BackupDir {
|
||||||
|
|
||||||
pub fn new<T, U>(backup_type: T, backup_id: U, backup_time: i64) -> Result<Self, Error>
|
pub fn new<T, U>(backup_type: T, backup_id: U, backup_time: i64) -> Result<Self, Error>
|
||||||
where
|
where
|
||||||
T: Into<String>,
|
T: Into<String>,
|
||||||
@ -196,7 +206,11 @@ impl BackupDir {
|
|||||||
BackupDir::with_group(group, backup_time)
|
BackupDir::with_group(group, backup_time)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_rfc3339<T,U,V>(backup_type: T, backup_id: U, backup_time_string: V) -> Result<Self, Error>
|
pub fn with_rfc3339<T, U, V>(
|
||||||
|
backup_type: T,
|
||||||
|
backup_id: U,
|
||||||
|
backup_time_string: V,
|
||||||
|
) -> Result<Self, Error>
|
||||||
where
|
where
|
||||||
T: Into<String>,
|
T: Into<String>,
|
||||||
U: Into<String>,
|
U: Into<String>,
|
||||||
@ -205,12 +219,20 @@ impl BackupDir {
|
|||||||
let backup_time_string = backup_time_string.into();
|
let backup_time_string = backup_time_string.into();
|
||||||
let backup_time = proxmox::tools::time::parse_rfc3339(&backup_time_string)?;
|
let backup_time = proxmox::tools::time::parse_rfc3339(&backup_time_string)?;
|
||||||
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
||||||
Ok(Self { group, backup_time, backup_time_string })
|
Ok(Self {
|
||||||
|
group,
|
||||||
|
backup_time,
|
||||||
|
backup_time_string,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_group(group: BackupGroup, backup_time: i64) -> Result<Self, Error> {
|
pub fn with_group(group: BackupGroup, backup_time: i64) -> Result<Self, Error> {
|
||||||
let backup_time_string = Self::backup_time_to_string(backup_time)?;
|
let backup_time_string = Self::backup_time_to_string(backup_time)?;
|
||||||
Ok(Self { group, backup_time, backup_time_string })
|
Ok(Self {
|
||||||
|
group,
|
||||||
|
backup_time,
|
||||||
|
backup_time_string,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn group(&self) -> &BackupGroup {
|
pub fn group(&self) -> &BackupGroup {
|
||||||
@ -226,7 +248,6 @@ impl BackupDir {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn relative_path(&self) -> PathBuf {
|
pub fn relative_path(&self) -> PathBuf {
|
||||||
|
|
||||||
let mut relative_path = self.group.group_path();
|
let mut relative_path = self.group.group_path();
|
||||||
|
|
||||||
relative_path.push(self.backup_time_string.clone());
|
relative_path.push(self.backup_time_string.clone());
|
||||||
@ -247,7 +268,8 @@ impl std::str::FromStr for BackupDir {
|
|||||||
///
|
///
|
||||||
/// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
|
/// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
|
||||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||||
let cap = SNAPSHOT_PATH_REGEX.captures(path)
|
let cap = SNAPSHOT_PATH_REGEX
|
||||||
|
.captures(path)
|
||||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||||
|
|
||||||
BackupDir::with_rfc3339(
|
BackupDir::with_rfc3339(
|
||||||
@ -276,7 +298,6 @@ pub struct BackupInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BackupInfo {
|
impl BackupInfo {
|
||||||
|
|
||||||
pub fn new(base_path: &Path, backup_dir: BackupDir) -> Result<BackupInfo, Error> {
|
pub fn new(base_path: &Path, backup_dir: BackupDir) -> Result<BackupInfo, Error> {
|
||||||
let mut path = base_path.to_owned();
|
let mut path = base_path.to_owned();
|
||||||
path.push(backup_dir.relative_path());
|
path.push(backup_dir.relative_path());
|
||||||
@ -287,19 +308,24 @@ impl BackupInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Finds the latest backup inside a backup group
|
/// Finds the latest backup inside a backup group
|
||||||
pub fn last_backup(base_path: &Path, group: &BackupGroup, only_finished: bool)
|
pub fn last_backup(
|
||||||
-> Result<Option<BackupInfo>, Error>
|
base_path: &Path,
|
||||||
{
|
group: &BackupGroup,
|
||||||
|
only_finished: bool,
|
||||||
|
) -> Result<Option<BackupInfo>, Error> {
|
||||||
let backups = group.list_backups(base_path)?;
|
let backups = group.list_backups(base_path)?;
|
||||||
Ok(backups.into_iter()
|
Ok(backups
|
||||||
|
.into_iter()
|
||||||
.filter(|item| !only_finished || item.is_finished())
|
.filter(|item| !only_finished || item.is_finished())
|
||||||
.max_by_key(|item| item.backup_dir.backup_time()))
|
.max_by_key(|item| item.backup_dir.backup_time()))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
|
pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
|
||||||
if ascendending { // oldest first
|
if ascendending {
|
||||||
|
// oldest first
|
||||||
list.sort_unstable_by(|a, b| a.backup_dir.backup_time.cmp(&b.backup_dir.backup_time));
|
list.sort_unstable_by(|a, b| a.backup_dir.backup_time.cmp(&b.backup_dir.backup_time));
|
||||||
} else { // newest first
|
} else {
|
||||||
|
// newest first
|
||||||
list.sort_unstable_by(|a, b| b.backup_dir.backup_time.cmp(&a.backup_dir.backup_time));
|
list.sort_unstable_by(|a, b| b.backup_dir.backup_time.cmp(&a.backup_dir.backup_time));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -316,31 +342,52 @@ impl BackupInfo {
|
|||||||
pub fn list_backup_groups(base_path: &Path) -> Result<Vec<BackupGroup>, Error> {
|
pub fn list_backup_groups(base_path: &Path) -> Result<Vec<BackupGroup>, Error> {
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
||||||
tools::scandir(libc::AT_FDCWD, base_path, &BACKUP_TYPE_REGEX, |l0_fd, backup_type, file_type| {
|
tools::scandir(
|
||||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
libc::AT_FDCWD,
|
||||||
tools::scandir(l0_fd, backup_type, &BACKUP_ID_REGEX, |_, backup_id, file_type| {
|
base_path,
|
||||||
if file_type != nix::dir::Type::Directory { return Ok(()); }
|
&BACKUP_TYPE_REGEX,
|
||||||
|
|l0_fd, backup_type, file_type| {
|
||||||
|
if file_type != nix::dir::Type::Directory {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
tools::scandir(
|
||||||
|
l0_fd,
|
||||||
|
backup_type,
|
||||||
|
&BACKUP_ID_REGEX,
|
||||||
|
|_, backup_id, file_type| {
|
||||||
|
if file_type != nix::dir::Type::Directory {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
list.push(BackupGroup::new(backup_type, backup_id));
|
list.push(BackupGroup::new(backup_type, backup_id));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
},
|
||||||
})?;
|
)
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
Ok(list)
|
Ok(list)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_finished(&self) -> bool {
|
pub fn is_finished(&self) -> bool {
|
||||||
// backup is considered unfinished if there is no manifest
|
// backup is considered unfinished if there is no manifest
|
||||||
self.files.iter().any(|name| name == super::MANIFEST_BLOB_NAME)
|
self.files
|
||||||
|
.iter()
|
||||||
|
.any(|name| name == super::MANIFEST_BLOB_NAME)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn list_backup_files<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> Result<Vec<String>, Error> {
|
fn list_backup_files<P: ?Sized + nix::NixPath>(
|
||||||
|
dirfd: RawFd,
|
||||||
|
path: &P,
|
||||||
|
) -> Result<Vec<String>, Error> {
|
||||||
let mut files = vec![];
|
let mut files = vec![];
|
||||||
|
|
||||||
tools::scandir(dirfd, path, &BACKUP_FILE_REGEX, |_, filename, file_type| {
|
tools::scandir(dirfd, path, &BACKUP_FILE_REGEX, |_, filename, file_type| {
|
||||||
if file_type != nix::dir::Type::File { return Ok(()); }
|
if file_type != nix::dir::Type::File {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
files.push(filename.to_owned());
|
files.push(filename.to_owned());
|
||||||
Ok(())
|
Ok(())
|
||||||
})?;
|
})?;
|
||||||
|
189
src/backup/cached_chunk_reader.rs
Normal file
@ -0,0 +1,189 @@
|
|||||||
|
//! An async and concurrency safe data reader backed by a local LRU cache.
|
||||||
|
|
||||||
|
use anyhow::Error;
|
||||||
|
use futures::future::Future;
|
||||||
|
use futures::ready;
|
||||||
|
use tokio::io::{AsyncRead, AsyncSeek, ReadBuf};
|
||||||
|
|
||||||
|
use std::io::SeekFrom;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
|
use super::{AsyncReadChunk, IndexFile};
|
||||||
|
use crate::tools::async_lru_cache::{AsyncCacher, AsyncLruCache};
|
||||||
|
use proxmox::io_format_err;
|
||||||
|
use proxmox::sys::error::io_err_other;
|
||||||
|
|
||||||
|
struct AsyncChunkCacher<T> {
|
||||||
|
reader: Arc<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: AsyncReadChunk + Send + Sync + 'static> AsyncCacher<[u8; 32], Arc<Vec<u8>>>
|
||||||
|
for AsyncChunkCacher<T>
|
||||||
|
{
|
||||||
|
fn fetch(
|
||||||
|
&self,
|
||||||
|
key: [u8; 32],
|
||||||
|
) -> Box<dyn Future<Output = Result<Option<Arc<Vec<u8>>>, Error>> + Send> {
|
||||||
|
let reader = Arc::clone(&self.reader);
|
||||||
|
Box::new(async move {
|
||||||
|
AsyncReadChunk::read_chunk(reader.as_ref(), &key)
|
||||||
|
.await
|
||||||
|
.map(|x| Some(Arc::new(x)))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allows arbitrary data reads from an Index via an AsyncReadChunk implementation, using an LRU
|
||||||
|
/// cache internally to cache chunks and provide support for multiple concurrent reads (potentially
|
||||||
|
/// to the same chunk).
|
||||||
|
pub struct CachedChunkReader<I: IndexFile, R: AsyncReadChunk + Send + Sync + 'static> {
|
||||||
|
cache: Arc<AsyncLruCache<[u8; 32], Arc<Vec<u8>>>>,
|
||||||
|
cacher: AsyncChunkCacher<R>,
|
||||||
|
index: I,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<I: IndexFile, R: AsyncReadChunk + Send + Sync + 'static> CachedChunkReader<I, R> {
|
||||||
|
/// Create a new reader with a local LRU cache containing 'capacity' chunks.
|
||||||
|
pub fn new(reader: R, index: I, capacity: usize) -> Self {
|
||||||
|
let cache = Arc::new(AsyncLruCache::new(capacity));
|
||||||
|
Self::new_with_cache(reader, index, cache)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new reader with a custom LRU cache. Use this to share a cache between multiple
|
||||||
|
/// readers.
|
||||||
|
pub fn new_with_cache(
|
||||||
|
reader: R,
|
||||||
|
index: I,
|
||||||
|
cache: Arc<AsyncLruCache<[u8; 32], Arc<Vec<u8>>>>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
cache,
|
||||||
|
cacher: AsyncChunkCacher {
|
||||||
|
reader: Arc::new(reader),
|
||||||
|
},
|
||||||
|
index,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read data at a given byte offset into a variable size buffer. Returns the amount of bytes
|
||||||
|
/// read, which will always be the size of the buffer except when reaching EOF.
|
||||||
|
pub async fn read_at(&self, buf: &mut [u8], offset: u64) -> Result<usize, Error> {
|
||||||
|
let size = buf.len();
|
||||||
|
let mut read: usize = 0;
|
||||||
|
while read < size {
|
||||||
|
let cur_offset = offset + read as u64;
|
||||||
|
if let Some(chunk) = self.index.chunk_from_offset(cur_offset) {
|
||||||
|
// chunk indices retrieved from chunk_from_offset always resolve to Some(_)
|
||||||
|
let info = self.index.chunk_info(chunk.0).unwrap();
|
||||||
|
|
||||||
|
// will never be None, see AsyncChunkCacher
|
||||||
|
let data = self.cache.access(info.digest, &self.cacher).await?.unwrap();
|
||||||
|
|
||||||
|
let want_bytes = ((info.range.end - cur_offset) as usize).min(size - read);
|
||||||
|
let slice = &mut buf[read..(read + want_bytes)];
|
||||||
|
let intra_chunk = chunk.1 as usize;
|
||||||
|
slice.copy_from_slice(&data[intra_chunk..(intra_chunk + want_bytes)]);
|
||||||
|
read += want_bytes;
|
||||||
|
} else {
|
||||||
|
// EOF
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(read)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<I: IndexFile + Send + Sync + 'static, R: AsyncReadChunk + Send + Sync + 'static>
|
||||||
|
CachedChunkReader<I, R>
|
||||||
|
{
|
||||||
|
/// Returns a SeekableCachedChunkReader based on this instance, which implements AsyncSeek and
|
||||||
|
/// AsyncRead for use in interfaces which require that. Direct use of read_at is preferred
|
||||||
|
/// otherwise.
|
||||||
|
pub fn seekable(self) -> SeekableCachedChunkReader<I, R> {
|
||||||
|
SeekableCachedChunkReader {
|
||||||
|
index_bytes: self.index.index_bytes(),
|
||||||
|
reader: Arc::new(self),
|
||||||
|
position: 0,
|
||||||
|
read_future: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct SeekableCachedChunkReader<
|
||||||
|
I: IndexFile + Send + Sync + 'static,
|
||||||
|
R: AsyncReadChunk + Send + Sync + 'static,
|
||||||
|
> {
|
||||||
|
reader: Arc<CachedChunkReader<I, R>>,
|
||||||
|
index_bytes: u64,
|
||||||
|
position: u64,
|
||||||
|
read_future: Option<Pin<Box<dyn Future<Output = Result<(Vec<u8>, usize), Error>> + Send>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<I, R> AsyncSeek for SeekableCachedChunkReader<I, R>
|
||||||
|
where
|
||||||
|
I: IndexFile + Send + Sync + 'static,
|
||||||
|
R: AsyncReadChunk + Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
fn start_seek(self: Pin<&mut Self>, pos: SeekFrom) -> tokio::io::Result<()> {
|
||||||
|
let this = Pin::get_mut(self);
|
||||||
|
let seek_to_pos = match pos {
|
||||||
|
SeekFrom::Start(offset) => offset as i64,
|
||||||
|
SeekFrom::End(offset) => this.index_bytes as i64 + offset,
|
||||||
|
SeekFrom::Current(offset) => this.position as i64 + offset,
|
||||||
|
};
|
||||||
|
if seek_to_pos < 0 {
|
||||||
|
return Err(io_format_err!("cannot seek to negative values"));
|
||||||
|
} else if seek_to_pos > this.index_bytes as i64 {
|
||||||
|
this.position = this.index_bytes;
|
||||||
|
} else {
|
||||||
|
this.position = seek_to_pos as u64;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_complete(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<tokio::io::Result<u64>> {
|
||||||
|
Poll::Ready(Ok(self.position))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<I, R> AsyncRead for SeekableCachedChunkReader<I, R>
|
||||||
|
where
|
||||||
|
I: IndexFile + Send + Sync + 'static,
|
||||||
|
R: AsyncReadChunk + Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
fn poll_read(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context,
|
||||||
|
buf: &mut ReadBuf,
|
||||||
|
) -> Poll<tokio::io::Result<()>> {
|
||||||
|
let this = Pin::get_mut(self);
|
||||||
|
|
||||||
|
let offset = this.position;
|
||||||
|
let wanted = buf.capacity();
|
||||||
|
let reader = Arc::clone(&this.reader);
|
||||||
|
|
||||||
|
let fut = this.read_future.get_or_insert_with(|| {
|
||||||
|
Box::pin(async move {
|
||||||
|
let mut read_buf = vec![0u8; wanted];
|
||||||
|
let read = reader.read_at(&mut read_buf[..wanted], offset).await?;
|
||||||
|
Ok((read_buf, read))
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
let ret = match ready!(fut.as_mut().poll(cx)) {
|
||||||
|
Ok((read_buf, read)) => {
|
||||||
|
buf.put_slice(&read_buf[..read]);
|
||||||
|
this.position += read as u64;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Err(err) => Err(io_err_other(err)),
|
||||||
|
};
|
||||||
|
|
||||||
|
// future completed, drop
|
||||||
|
this.read_future = None;
|
||||||
|
|
||||||
|
Poll::Ready(ret)
|
||||||
|
}
|
||||||
|
}
|
@ -19,9 +19,10 @@ use proxmox::tools::fs::{create_path, CreateOptions};
|
|||||||
use pxar::{EntryKind, Metadata};
|
use pxar::{EntryKind, Metadata};
|
||||||
|
|
||||||
use crate::backup::catalog::{self, DirEntryAttribute};
|
use crate::backup::catalog::{self, DirEntryAttribute};
|
||||||
use crate::pxar::Flags;
|
|
||||||
use crate::pxar::fuse::{Accessor, FileEntry};
|
use crate::pxar::fuse::{Accessor, FileEntry};
|
||||||
|
use crate::pxar::Flags;
|
||||||
use crate::tools::runtime::block_in_place;
|
use crate::tools::runtime::block_in_place;
|
||||||
|
use crate::tools::ControlFlow;
|
||||||
|
|
||||||
type CatalogReader = crate::backup::CatalogReader<std::fs::File>;
|
type CatalogReader = crate::backup::CatalogReader<std::fs::File>;
|
||||||
|
|
||||||
@ -998,11 +999,6 @@ impl Shell {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum LoopState {
|
|
||||||
Break,
|
|
||||||
Continue,
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ExtractorState<'a> {
|
struct ExtractorState<'a> {
|
||||||
path: Vec<u8>,
|
path: Vec<u8>,
|
||||||
path_len: usize,
|
path_len: usize,
|
||||||
@ -1060,8 +1056,8 @@ impl<'a> ExtractorState<'a> {
|
|||||||
let entry = match self.read_dir.next() {
|
let entry = match self.read_dir.next() {
|
||||||
Some(entry) => entry,
|
Some(entry) => entry,
|
||||||
None => match self.handle_end_of_directory()? {
|
None => match self.handle_end_of_directory()? {
|
||||||
LoopState::Break => break, // done with root directory
|
ControlFlow::Break(()) => break, // done with root directory
|
||||||
LoopState::Continue => continue,
|
ControlFlow::Continue(()) => continue,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1079,11 +1075,11 @@ impl<'a> ExtractorState<'a> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_end_of_directory(&mut self) -> Result<LoopState, Error> {
|
fn handle_end_of_directory(&mut self) -> Result<ControlFlow<()>, Error> {
|
||||||
// go up a directory:
|
// go up a directory:
|
||||||
self.read_dir = match self.read_dir_stack.pop() {
|
self.read_dir = match self.read_dir_stack.pop() {
|
||||||
Some(r) => r,
|
Some(r) => r,
|
||||||
None => return Ok(LoopState::Break), // out of root directory
|
None => return Ok(ControlFlow::Break(())), // out of root directory
|
||||||
};
|
};
|
||||||
|
|
||||||
self.matches = self
|
self.matches = self
|
||||||
@ -1102,7 +1098,7 @@ impl<'a> ExtractorState<'a> {
|
|||||||
|
|
||||||
self.extractor.leave_directory()?;
|
self.extractor.leave_directory()?;
|
||||||
|
|
||||||
Ok(LoopState::Continue)
|
Ok(ControlFlow::CONTINUE)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_new_directory(
|
async fn handle_new_directory(
|
||||||
|
@ -7,6 +7,7 @@ use std::os::unix::io::AsRawFd;
|
|||||||
|
|
||||||
use proxmox::tools::fs::{CreateOptions, create_path, create_dir};
|
use proxmox::tools::fs::{CreateOptions, create_path, create_dir};
|
||||||
|
|
||||||
|
use crate::task_log;
|
||||||
use crate::tools;
|
use crate::tools;
|
||||||
use crate::api2::types::GarbageCollectionStatus;
|
use crate::api2::types::GarbageCollectionStatus;
|
||||||
|
|
||||||
@ -61,7 +62,7 @@ impl ChunkStore {
|
|||||||
chunk_dir
|
chunk_dir
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create<P>(name: &str, path: P, uid: nix::unistd::Uid, gid: nix::unistd::Gid) -> Result<Self, Error>
|
pub fn create<P>(name: &str, path: P, uid: nix::unistd::Uid, gid: nix::unistd::Gid, worker: Option<&dyn TaskState>) -> Result<Self, Error>
|
||||||
where
|
where
|
||||||
P: Into<PathBuf>,
|
P: Into<PathBuf>,
|
||||||
{
|
{
|
||||||
@ -104,7 +105,9 @@ impl ChunkStore {
|
|||||||
}
|
}
|
||||||
let percentage = (i*100)/(64*1024);
|
let percentage = (i*100)/(64*1024);
|
||||||
if percentage != last_percentage {
|
if percentage != last_percentage {
|
||||||
// eprintln!("ChunkStore::create {}%", percentage);
|
if let Some(worker) = worker {
|
||||||
|
task_log!(worker, "Chunkstore create: {}%", percentage)
|
||||||
|
}
|
||||||
last_percentage = percentage;
|
last_percentage = percentage;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -461,7 +464,7 @@ fn test_chunk_store1() {
|
|||||||
assert!(chunk_store.is_err());
|
assert!(chunk_store.is_err());
|
||||||
|
|
||||||
let user = nix::unistd::User::from_uid(nix::unistd::Uid::current()).unwrap().unwrap();
|
let user = nix::unistd::User::from_uid(nix::unistd::Uid::current()).unwrap().unwrap();
|
||||||
let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid).unwrap();
|
let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid, None).unwrap();
|
||||||
|
|
||||||
let (chunk, digest) = super::DataChunkBuilder::new(&[0u8, 1u8]).build().unwrap();
|
let (chunk, digest) = super::DataChunkBuilder::new(&[0u8, 1u8]).build().unwrap();
|
||||||
|
|
||||||
@ -472,7 +475,7 @@ fn test_chunk_store1() {
|
|||||||
assert!(exists);
|
assert!(exists);
|
||||||
|
|
||||||
|
|
||||||
let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid);
|
let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid, None);
|
||||||
assert!(chunk_store.is_err());
|
assert!(chunk_store.is_err());
|
||||||
|
|
||||||
if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
|
if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
|
||||||
|
@ -69,6 +69,18 @@ impl DataStore {
|
|||||||
Ok(datastore)
|
Ok(datastore)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// removes all datastores that are not configured anymore
|
||||||
|
pub fn remove_unused_datastores() -> Result<(), Error>{
|
||||||
|
let (config, _digest) = datastore::config()?;
|
||||||
|
|
||||||
|
let mut map = DATASTORE_MAP.lock().unwrap();
|
||||||
|
// removes all elements that are not in the config
|
||||||
|
map.retain(|key, _| {
|
||||||
|
config.sections.contains_key(key)
|
||||||
|
});
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn open_with_path(store_name: &str, path: &Path, config: DataStoreConfig) -> Result<Self, Error> {
|
fn open_with_path(store_name: &str, path: &Path, config: DataStoreConfig) -> Result<Self, Error> {
|
||||||
let chunk_store = ChunkStore::open(store_name, path)?;
|
let chunk_store = ChunkStore::open(store_name, path)?;
|
||||||
|
|
||||||
@ -153,6 +165,34 @@ impl DataStore {
|
|||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Fast index verification - only check if chunks exists
|
||||||
|
pub fn fast_index_verification(
|
||||||
|
&self,
|
||||||
|
index: &dyn IndexFile,
|
||||||
|
checked: &mut HashSet<[u8;32]>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
for pos in 0..index.index_count() {
|
||||||
|
let info = index.chunk_info(pos).unwrap();
|
||||||
|
if checked.contains(&info.digest) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.stat_chunk(&info.digest).
|
||||||
|
map_err(|err| {
|
||||||
|
format_err!(
|
||||||
|
"fast_index_verification error, stat_chunk {} failed - {}",
|
||||||
|
proxmox::tools::digest_to_hex(&info.digest),
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
checked.insert(info.digest);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn name(&self) -> &str {
|
pub fn name(&self) -> &str {
|
||||||
self.chunk_store.name()
|
self.chunk_store.name()
|
||||||
}
|
}
|
||||||
@ -686,6 +726,11 @@ impl DataStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
pub fn stat_chunk(&self, digest: &[u8; 32]) -> Result<std::fs::Metadata, Error> {
|
||||||
|
let (chunk_path, _digest_str) = self.chunk_store.chunk_path(digest);
|
||||||
|
std::fs::metadata(chunk_path).map_err(Error::from)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||||
|
|
||||||
let (chunk_path, digest_str) = self.chunk_store.chunk_path(digest);
|
let (chunk_path, digest_str) = self.chunk_store.chunk_path(digest);
|
||||||
@ -781,4 +826,3 @@ impl DataStore {
|
|||||||
self.verify_new
|
self.verify_new
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -233,6 +233,14 @@ impl IndexFile for DynamicIndexReader {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn index_ctime(&self) -> i64 {
|
||||||
|
self.ctime
|
||||||
|
}
|
||||||
|
|
||||||
|
fn index_size(&self) -> usize {
|
||||||
|
self.size as usize
|
||||||
|
}
|
||||||
|
|
||||||
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)> {
|
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)> {
|
||||||
let end_idx = self.index.len() - 1;
|
let end_idx = self.index.len() - 1;
|
||||||
let end = self.chunk_end(end_idx);
|
let end = self.chunk_end(end_idx);
|
||||||
|
@ -193,6 +193,14 @@ impl IndexFile for FixedIndexReader {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn index_ctime(&self) -> i64 {
|
||||||
|
self.ctime
|
||||||
|
}
|
||||||
|
|
||||||
|
fn index_size(&self) -> usize {
|
||||||
|
self.size as usize
|
||||||
|
}
|
||||||
|
|
||||||
fn compute_csum(&self) -> ([u8; 32], u64) {
|
fn compute_csum(&self) -> ([u8; 32], u64) {
|
||||||
let mut csum = openssl::sha::Sha256::new();
|
let mut csum = openssl::sha::Sha256::new();
|
||||||
let mut chunk_end = 0;
|
let mut chunk_end = 0;
|
||||||
|
@ -22,6 +22,8 @@ pub trait IndexFile {
|
|||||||
fn index_digest(&self, pos: usize) -> Option<&[u8; 32]>;
|
fn index_digest(&self, pos: usize) -> Option<&[u8; 32]>;
|
||||||
fn index_bytes(&self) -> u64;
|
fn index_bytes(&self) -> u64;
|
||||||
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo>;
|
fn chunk_info(&self, pos: usize) -> Option<ChunkReadInfo>;
|
||||||
|
fn index_ctime(&self) -> i64;
|
||||||
|
fn index_size(&self) -> usize;
|
||||||
|
|
||||||
/// Get the chunk index and the relative offset within it for a byte offset
|
/// Get the chunk index and the relative offset within it for a byte offset
|
||||||
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)>;
|
fn chunk_from_offset(&self, offset: u64) -> Option<(usize, u64)>;
|
||||||
|
@ -33,10 +33,16 @@ impl StoreProgress {
|
|||||||
|
|
||||||
impl std::fmt::Display for StoreProgress {
|
impl std::fmt::Display for StoreProgress {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
let current_group = if self.done_groups < self.total_groups {
|
||||||
|
self.done_groups + 1
|
||||||
|
} else {
|
||||||
|
self.done_groups
|
||||||
|
};
|
||||||
|
|
||||||
if self.group_snapshots == 0 {
|
if self.group_snapshots == 0 {
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"{:.2}% ({} of {} groups)",
|
"{:.2}% ({}/{} groups)",
|
||||||
self.percentage() * 100.0,
|
self.percentage() * 100.0,
|
||||||
self.done_groups,
|
self.done_groups,
|
||||||
self.total_groups,
|
self.total_groups,
|
||||||
@ -44,20 +50,29 @@ impl std::fmt::Display for StoreProgress {
|
|||||||
} else if self.total_groups == 1 {
|
} else if self.total_groups == 1 {
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"{:.2}% ({} of {} snapshots)",
|
"{:.2}% ({}/{} snapshots)",
|
||||||
self.percentage() * 100.0,
|
self.percentage() * 100.0,
|
||||||
self.done_snapshots,
|
self.done_snapshots,
|
||||||
self.group_snapshots,
|
self.group_snapshots,
|
||||||
)
|
)
|
||||||
|
} else if self.done_snapshots == self.group_snapshots {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"{:.2}% ({}/{} groups)",
|
||||||
|
self.percentage() * 100.0,
|
||||||
|
current_group,
|
||||||
|
self.total_groups,
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"{:.2}% ({} of {} groups, {} of {} group snapshots)",
|
"{:.2}% ({}/{} groups, {}/{} snapshots in group #{})",
|
||||||
self.percentage() * 100.0,
|
self.percentage() * 100.0,
|
||||||
self.done_groups,
|
self.done_groups,
|
||||||
self.total_groups,
|
self.total_groups,
|
||||||
self.done_snapshots,
|
self.done_snapshots,
|
||||||
self.group_snapshots,
|
self.group_snapshots,
|
||||||
|
current_group,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
use std::collections::HashSet;
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use std::sync::atomic::{Ordering, AtomicUsize};
|
|
||||||
use std::time::Instant;
|
|
||||||
use nix::dir::Dir;
|
use nix::dir::Dir;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
@ -25,8 +25,8 @@ use crate::{
|
|||||||
server::UPID,
|
server::UPID,
|
||||||
task::TaskState,
|
task::TaskState,
|
||||||
task_log,
|
task_log,
|
||||||
tools::ParallelHandler,
|
|
||||||
tools::fs::lock_dir_noblock_shared,
|
tools::fs::lock_dir_noblock_shared,
|
||||||
|
tools::ParallelHandler,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// A VerifyWorker encapsulates a task worker, datastore and information about which chunks have
|
/// A VerifyWorker encapsulates a task worker, datastore and information about which chunks have
|
||||||
@ -34,8 +34,8 @@ use crate::{
|
|||||||
pub struct VerifyWorker {
|
pub struct VerifyWorker {
|
||||||
worker: Arc<dyn TaskState + Send + Sync>,
|
worker: Arc<dyn TaskState + Send + Sync>,
|
||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
verified_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
verified_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
corrupt_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VerifyWorker {
|
impl VerifyWorker {
|
||||||
@ -45,15 +45,18 @@ impl VerifyWorker {
|
|||||||
worker,
|
worker,
|
||||||
datastore,
|
datastore,
|
||||||
// start with 16k chunks == up to 64G data
|
// start with 16k chunks == up to 64G data
|
||||||
verified_chunks: Arc::new(Mutex::new(HashSet::with_capacity(16*1024))),
|
verified_chunks: Arc::new(Mutex::new(HashSet::with_capacity(16 * 1024))),
|
||||||
// start with 64 chunks since we assume there are few corrupt ones
|
// start with 64 chunks since we assume there are few corrupt ones
|
||||||
corrupt_chunks: Arc::new(Mutex::new(HashSet::with_capacity(64))),
|
corrupt_chunks: Arc::new(Mutex::new(HashSet::with_capacity(64))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_blob(datastore: Arc<DataStore>, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
fn verify_blob(
|
||||||
|
datastore: Arc<DataStore>,
|
||||||
|
backup_dir: &BackupDir,
|
||||||
|
info: &FileInfo,
|
||||||
|
) -> Result<(), Error> {
|
||||||
let blob = datastore.load_blob(backup_dir, &info.filename)?;
|
let blob = datastore.load_blob(backup_dir, &info.filename)?;
|
||||||
|
|
||||||
let raw_size = blob.raw_size();
|
let raw_size = blob.raw_size();
|
||||||
@ -88,7 +91,11 @@ fn rename_corrupted_chunk(
|
|||||||
let mut new_path = path.clone();
|
let mut new_path = path.clone();
|
||||||
loop {
|
loop {
|
||||||
new_path.set_file_name(format!("{}.{}.bad", digest_str, counter));
|
new_path.set_file_name(format!("{}.{}.bad", digest_str, counter));
|
||||||
if new_path.exists() && counter < 9 { counter += 1; } else { break; }
|
if new_path.exists() && counter < 9 {
|
||||||
|
counter += 1;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match std::fs::rename(&path, &new_path) {
|
match std::fs::rename(&path, &new_path) {
|
||||||
@ -109,7 +116,6 @@ fn verify_index_chunks(
|
|||||||
index: Box<dyn IndexFile + Send>,
|
index: Box<dyn IndexFile + Send>,
|
||||||
crypt_mode: CryptMode,
|
crypt_mode: CryptMode,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let errors = Arc::new(AtomicUsize::new(0));
|
let errors = Arc::new(AtomicUsize::new(0));
|
||||||
|
|
||||||
let start_time = Instant::now();
|
let start_time = Instant::now();
|
||||||
@ -124,8 +130,9 @@ fn verify_index_chunks(
|
|||||||
let errors2 = Arc::clone(&errors);
|
let errors2 = Arc::clone(&errors);
|
||||||
|
|
||||||
let decoder_pool = ParallelHandler::new(
|
let decoder_pool = ParallelHandler::new(
|
||||||
"verify chunk decoder", 4,
|
"verify chunk decoder",
|
||||||
move |(chunk, digest, size): (DataBlob, [u8;32], u64)| {
|
4,
|
||||||
|
move |(chunk, digest, size): (DataBlob, [u8; 32], u64)| {
|
||||||
let chunk_crypt_mode = match chunk.crypt_mode() {
|
let chunk_crypt_mode = match chunk.crypt_mode() {
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
corrupt_chunks2.lock().unwrap().insert(digest);
|
corrupt_chunks2.lock().unwrap().insert(digest);
|
||||||
@ -159,23 +166,65 @@ fn verify_index_chunks(
|
|||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
for pos in 0..index.index_count() {
|
let skip_chunk = |digest: &[u8; 32]| -> bool {
|
||||||
|
if verify_worker.verified_chunks.lock().unwrap().contains(digest) {
|
||||||
|
true
|
||||||
|
} else if verify_worker.corrupt_chunks.lock().unwrap().contains(digest) {
|
||||||
|
let digest_str = proxmox::tools::digest_to_hex(digest);
|
||||||
|
task_log!(verify_worker.worker, "chunk {} was marked as corrupt", digest_str);
|
||||||
|
errors.fetch_add(1, Ordering::SeqCst);
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let index_count = index.index_count();
|
||||||
|
let mut chunk_list = Vec::with_capacity(index_count);
|
||||||
|
|
||||||
|
use std::os::unix::fs::MetadataExt;
|
||||||
|
|
||||||
|
for pos in 0..index_count {
|
||||||
|
if pos & 1023 == 0 {
|
||||||
|
verify_worker.worker.check_abort()?;
|
||||||
|
crate::tools::fail_on_shutdown()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let info = index.chunk_info(pos).unwrap();
|
||||||
|
|
||||||
|
if skip_chunk(&info.digest) {
|
||||||
|
continue; // already verified or marked corrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
match verify_worker.datastore.stat_chunk(&info.digest) {
|
||||||
|
Err(err) => {
|
||||||
|
verify_worker.corrupt_chunks.lock().unwrap().insert(info.digest);
|
||||||
|
task_log!(verify_worker.worker, "can't verify chunk, stat failed - {}", err);
|
||||||
|
errors.fetch_add(1, Ordering::SeqCst);
|
||||||
|
rename_corrupted_chunk(
|
||||||
|
verify_worker.datastore.clone(),
|
||||||
|
&info.digest,
|
||||||
|
&verify_worker.worker,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(metadata) => {
|
||||||
|
chunk_list.push((pos, metadata.ino()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sorting by inode improves data locality, which makes it lots faster on spinners
|
||||||
|
chunk_list.sort_unstable_by(|(_, ino_a), (_, ino_b)| ino_a.cmp(&ino_b));
|
||||||
|
|
||||||
|
for (pos, _) in chunk_list {
|
||||||
verify_worker.worker.check_abort()?;
|
verify_worker.worker.check_abort()?;
|
||||||
crate::tools::fail_on_shutdown()?;
|
crate::tools::fail_on_shutdown()?;
|
||||||
|
|
||||||
let info = index.chunk_info(pos).unwrap();
|
let info = index.chunk_info(pos).unwrap();
|
||||||
let size = info.size();
|
|
||||||
|
|
||||||
if verify_worker.verified_chunks.lock().unwrap().contains(&info.digest) {
|
// we must always recheck this here, the parallel worker below alter it!
|
||||||
continue; // already verified
|
if skip_chunk(&info.digest) {
|
||||||
}
|
continue; // already verified or marked corrupt
|
||||||
|
|
||||||
if verify_worker.corrupt_chunks.lock().unwrap().contains(&info.digest) {
|
|
||||||
let digest_str = proxmox::tools::digest_to_hex(&info.digest);
|
|
||||||
task_log!(verify_worker.worker, "chunk {} was marked as corrupt", digest_str);
|
|
||||||
errors.fetch_add(1, Ordering::SeqCst);
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
match verify_worker.datastore.load_chunk(&info.digest) {
|
match verify_worker.datastore.load_chunk(&info.digest) {
|
||||||
@ -183,10 +232,14 @@ fn verify_index_chunks(
|
|||||||
verify_worker.corrupt_chunks.lock().unwrap().insert(info.digest);
|
verify_worker.corrupt_chunks.lock().unwrap().insert(info.digest);
|
||||||
task_log!(verify_worker.worker, "can't verify chunk, load failed - {}", err);
|
task_log!(verify_worker.worker, "can't verify chunk, load failed - {}", err);
|
||||||
errors.fetch_add(1, Ordering::SeqCst);
|
errors.fetch_add(1, Ordering::SeqCst);
|
||||||
rename_corrupted_chunk(verify_worker.datastore.clone(), &info.digest, &verify_worker.worker);
|
rename_corrupted_chunk(
|
||||||
continue;
|
verify_worker.datastore.clone(),
|
||||||
|
&info.digest,
|
||||||
|
&verify_worker.worker,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
Ok(chunk) => {
|
Ok(chunk) => {
|
||||||
|
let size = info.size();
|
||||||
read_bytes += chunk.raw_size();
|
read_bytes += chunk.raw_size();
|
||||||
decoder_pool.send((chunk, info.digest, size))?;
|
decoder_pool.send((chunk, info.digest, size))?;
|
||||||
decoded_bytes += size;
|
decoded_bytes += size;
|
||||||
@ -198,11 +251,11 @@ fn verify_index_chunks(
|
|||||||
|
|
||||||
let elapsed = start_time.elapsed().as_secs_f64();
|
let elapsed = start_time.elapsed().as_secs_f64();
|
||||||
|
|
||||||
let read_bytes_mib = (read_bytes as f64)/(1024.0*1024.0);
|
let read_bytes_mib = (read_bytes as f64) / (1024.0 * 1024.0);
|
||||||
let decoded_bytes_mib = (decoded_bytes as f64)/(1024.0*1024.0);
|
let decoded_bytes_mib = (decoded_bytes as f64) / (1024.0 * 1024.0);
|
||||||
|
|
||||||
let read_speed = read_bytes_mib/elapsed;
|
let read_speed = read_bytes_mib / elapsed;
|
||||||
let decode_speed = decoded_bytes_mib/elapsed;
|
let decode_speed = decoded_bytes_mib / elapsed;
|
||||||
|
|
||||||
let error_count = errors.load(Ordering::SeqCst);
|
let error_count = errors.load(Ordering::SeqCst);
|
||||||
|
|
||||||
@ -229,7 +282,6 @@ fn verify_fixed_index(
|
|||||||
backup_dir: &BackupDir,
|
backup_dir: &BackupDir,
|
||||||
info: &FileInfo,
|
info: &FileInfo,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut path = backup_dir.relative_path();
|
let mut path = backup_dir.relative_path();
|
||||||
path.push(&info.filename);
|
path.push(&info.filename);
|
||||||
|
|
||||||
@ -244,11 +296,7 @@ fn verify_fixed_index(
|
|||||||
bail!("wrong index checksum");
|
bail!("wrong index checksum");
|
||||||
}
|
}
|
||||||
|
|
||||||
verify_index_chunks(
|
verify_index_chunks(verify_worker, Box::new(index), info.chunk_crypt_mode())
|
||||||
verify_worker,
|
|
||||||
Box::new(index),
|
|
||||||
info.chunk_crypt_mode(),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_dynamic_index(
|
fn verify_dynamic_index(
|
||||||
@ -256,7 +304,6 @@ fn verify_dynamic_index(
|
|||||||
backup_dir: &BackupDir,
|
backup_dir: &BackupDir,
|
||||||
info: &FileInfo,
|
info: &FileInfo,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let mut path = backup_dir.relative_path();
|
let mut path = backup_dir.relative_path();
|
||||||
path.push(&info.filename);
|
path.push(&info.filename);
|
||||||
|
|
||||||
@ -271,11 +318,7 @@ fn verify_dynamic_index(
|
|||||||
bail!("wrong index checksum");
|
bail!("wrong index checksum");
|
||||||
}
|
}
|
||||||
|
|
||||||
verify_index_chunks(
|
verify_index_chunks(verify_worker, Box::new(index), info.chunk_crypt_mode())
|
||||||
verify_worker,
|
|
||||||
Box::new(index),
|
|
||||||
info.chunk_crypt_mode(),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify a single backup snapshot
|
/// Verify a single backup snapshot
|
||||||
@ -296,15 +339,12 @@ pub fn verify_backup_dir(
|
|||||||
let snap_lock = lock_dir_noblock_shared(
|
let snap_lock = lock_dir_noblock_shared(
|
||||||
&verify_worker.datastore.snapshot_path(&backup_dir),
|
&verify_worker.datastore.snapshot_path(&backup_dir),
|
||||||
"snapshot",
|
"snapshot",
|
||||||
"locked by another operation");
|
"locked by another operation",
|
||||||
|
);
|
||||||
match snap_lock {
|
match snap_lock {
|
||||||
Ok(snap_lock) => verify_backup_dir_with_lock(
|
Ok(snap_lock) => {
|
||||||
verify_worker,
|
verify_backup_dir_with_lock(verify_worker, backup_dir, upid, filter, snap_lock)
|
||||||
backup_dir,
|
}
|
||||||
upid,
|
|
||||||
filter,
|
|
||||||
snap_lock
|
|
||||||
),
|
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(
|
task_log!(
|
||||||
verify_worker.worker,
|
verify_worker.worker,
|
||||||
@ -361,19 +401,11 @@ pub fn verify_backup_dir_with_lock(
|
|||||||
let result = proxmox::try_block!({
|
let result = proxmox::try_block!({
|
||||||
task_log!(verify_worker.worker, " check {}", info.filename);
|
task_log!(verify_worker.worker, " check {}", info.filename);
|
||||||
match archive_type(&info.filename)? {
|
match archive_type(&info.filename)? {
|
||||||
ArchiveType::FixedIndex =>
|
ArchiveType::FixedIndex => verify_fixed_index(verify_worker, &backup_dir, info),
|
||||||
verify_fixed_index(
|
ArchiveType::DynamicIndex => verify_dynamic_index(verify_worker, &backup_dir, info),
|
||||||
verify_worker,
|
ArchiveType::Blob => {
|
||||||
&backup_dir,
|
verify_blob(verify_worker.datastore.clone(), &backup_dir, info)
|
||||||
info,
|
}
|
||||||
),
|
|
||||||
ArchiveType::DynamicIndex =>
|
|
||||||
verify_dynamic_index(
|
|
||||||
verify_worker,
|
|
||||||
&backup_dir,
|
|
||||||
info,
|
|
||||||
),
|
|
||||||
ArchiveType::Blob => verify_blob(verify_worker.datastore.clone(), &backup_dir, info),
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -392,7 +424,6 @@ pub fn verify_backup_dir_with_lock(
|
|||||||
error_count += 1;
|
error_count += 1;
|
||||||
verify_result = VerifyState::Failed;
|
verify_result = VerifyState::Failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let verify_state = SnapshotVerifyState {
|
let verify_state = SnapshotVerifyState {
|
||||||
@ -400,9 +431,12 @@ pub fn verify_backup_dir_with_lock(
|
|||||||
upid,
|
upid,
|
||||||
};
|
};
|
||||||
let verify_state = serde_json::to_value(verify_state)?;
|
let verify_state = serde_json::to_value(verify_state)?;
|
||||||
verify_worker.datastore.update_manifest(&backup_dir, |manifest| {
|
verify_worker
|
||||||
|
.datastore
|
||||||
|
.update_manifest(&backup_dir, |manifest| {
|
||||||
manifest.unprotected["verify_state"] = verify_state;
|
manifest.unprotected["verify_state"] = verify_state;
|
||||||
}).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
})
|
||||||
|
.map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
||||||
|
|
||||||
Ok(error_count == 0)
|
Ok(error_count == 0)
|
||||||
}
|
}
|
||||||
@ -421,7 +455,6 @@ pub fn verify_backup_group(
|
|||||||
upid: &UPID,
|
upid: &UPID,
|
||||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||||
) -> Result<Vec<String>, Error> {
|
) -> Result<Vec<String>, Error> {
|
||||||
|
|
||||||
let mut errors = Vec::new();
|
let mut errors = Vec::new();
|
||||||
let mut list = match group.list_backups(&verify_worker.datastore.base_path()) {
|
let mut list = match group.list_backups(&verify_worker.datastore.base_path()) {
|
||||||
Ok(list) => list,
|
Ok(list) => list,
|
||||||
@ -438,26 +471,23 @@ pub fn verify_backup_group(
|
|||||||
};
|
};
|
||||||
|
|
||||||
let snapshot_count = list.len();
|
let snapshot_count = list.len();
|
||||||
task_log!(verify_worker.worker, "verify group {}:{} ({} snapshots)", verify_worker.datastore.name(), group, snapshot_count);
|
task_log!(
|
||||||
|
verify_worker.worker,
|
||||||
|
"verify group {}:{} ({} snapshots)",
|
||||||
|
verify_worker.datastore.name(),
|
||||||
|
group,
|
||||||
|
snapshot_count
|
||||||
|
);
|
||||||
|
|
||||||
progress.group_snapshots = snapshot_count as u64;
|
progress.group_snapshots = snapshot_count as u64;
|
||||||
|
|
||||||
BackupInfo::sort_list(&mut list, false); // newest first
|
BackupInfo::sort_list(&mut list, false); // newest first
|
||||||
for (pos, info) in list.into_iter().enumerate() {
|
for (pos, info) in list.into_iter().enumerate() {
|
||||||
if !verify_backup_dir(
|
if !verify_backup_dir(verify_worker, &info.backup_dir, upid.clone(), filter)? {
|
||||||
verify_worker,
|
|
||||||
&info.backup_dir,
|
|
||||||
upid.clone(),
|
|
||||||
filter,
|
|
||||||
)? {
|
|
||||||
errors.push(info.backup_dir.to_string());
|
errors.push(info.backup_dir.to_string());
|
||||||
}
|
}
|
||||||
progress.done_snapshots = pos as u64 + 1;
|
progress.done_snapshots = pos as u64 + 1;
|
||||||
task_log!(
|
task_log!(verify_worker.worker, "percentage done: {}", progress);
|
||||||
verify_worker.worker,
|
|
||||||
"percentage done: {}",
|
|
||||||
progress
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(errors)
|
Ok(errors)
|
||||||
@ -521,11 +551,7 @@ pub fn verify_all_backups(
|
|||||||
.filter(filter_by_owner)
|
.filter(filter_by_owner)
|
||||||
.collect::<Vec<BackupGroup>>(),
|
.collect::<Vec<BackupGroup>>(),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(
|
task_log!(worker, "unable to list backups: {}", err,);
|
||||||
worker,
|
|
||||||
"unable to list backups: {}",
|
|
||||||
err,
|
|
||||||
);
|
|
||||||
return Ok(errors);
|
return Ok(errors);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -542,13 +568,8 @@ pub fn verify_all_backups(
|
|||||||
progress.done_snapshots = 0;
|
progress.done_snapshots = 0;
|
||||||
progress.group_snapshots = 0;
|
progress.group_snapshots = 0;
|
||||||
|
|
||||||
let mut group_errors = verify_backup_group(
|
let mut group_errors =
|
||||||
verify_worker,
|
verify_backup_group(verify_worker, &group, &mut progress, upid, filter)?;
|
||||||
&group,
|
|
||||||
&mut progress,
|
|
||||||
upid,
|
|
||||||
filter,
|
|
||||||
)?;
|
|
||||||
errors.append(&mut group_errors);
|
errors.append(&mut group_errors);
|
||||||
}
|
}
|
||||||
|
|
||||||
|