Compare commits
568 Commits
Author | SHA1 | Date | |
---|---|---|---|
64394b0de8 | |||
2f617a4548 | |||
2ba64bed18 | |||
cafccb5991 | |||
b22e8c3632 | |||
7929292618 | |||
0d4e4cae7f | |||
f4ba2e3155 | |||
7101ed6e27 | |||
85ac35aa9a | |||
40590561fe | |||
631e550920 | |||
f806c0effa | |||
50a4797fb1 | |||
cc2a0b12f8 | |||
988e8de122 | |||
2f8809c6bc | |||
92b7775fa1 | |||
f4d231e70a | |||
b419050aa7 | |||
8937c65951 | |||
6c6ad82d90 | |||
d0f11b66f7 | |||
f9fcac51a5 | |||
ca953d831f | |||
01c023d50f | |||
c2113a405e | |||
5dae81d199 | |||
bd768c3320 | |||
572fc035a2 | |||
99b2f045af | |||
6248e51797 | |||
19e4a36c70 | |||
90769e5694 | |||
b8cbe5d65b | |||
35c95ca653 | |||
2dbc1a9a55 | |||
dceecb0bbf | |||
d690d14568 | |||
85ef624440 | |||
e995996290 | |||
8e6ad4301d | |||
86740dfc89 | |||
1399c592d1 | |||
9883b54cba | |||
83b8949a98 | |||
28f60e5291 | |||
1f31d06f48 | |||
2f2e83c890 | |||
b22c618734 | |||
1e041082bb | |||
a57ce270ac | |||
b5b99a52cd | |||
9586ce2f46 | |||
b8d526f18d | |||
d2edc68ead | |||
4d651378e2 | |||
58791864d7 | |||
1a41e9af4f | |||
c297835b01 | |||
e68269fcaf | |||
5243df4712 | |||
4470eba551 | |||
1f2c4713ef | |||
a6c16894ff | |||
271764deb9 | |||
52f7a73009 | |||
bdb6e6b83f | |||
41dacd5d3d | |||
eb1dfb02b5 | |||
1a0eb86344 | |||
bdb62b20a3 | |||
f2ca03d7d0 | |||
00ac86c31b | |||
627d000098 | |||
4be4736603 | |||
2da7aca8e8 | |||
8306b8b1a5 | |||
605cfd4ab1 | |||
dec3147501 | |||
c642aec128 | |||
fd9aa8dfa2 | |||
07d6c0967d | |||
80a3749088 | |||
c72fdb53ae | |||
b03ec281bf | |||
cef4654ff4 | |||
f45dceeb73 | |||
18262a88c9 | |||
87f4be7998 | |||
d737adc6be | |||
5fdaecf6f4 | |||
d8792b88ef | |||
8b1174f50a | |||
8c8f7b5a09 | |||
44915932d5 | |||
e90fdf5bed | |||
a11c8ab485 | |||
74a50158ca | |||
6ee85d57be | |||
b2fc6f9228 | |||
f91481eded | |||
651a61f559 | |||
b06edeca02 | |||
89ccb125d1 | |||
c972704477 | |||
887f1cb90c | |||
16b4d78400 | |||
ec8d9c6b80 | |||
49c2d1dcad | |||
d0f51651f9 | |||
481ccf16a5 | |||
a223458753 | |||
e1740f3f01 | |||
740dc9d1d4 | |||
bbf01b644c | |||
66d066964c | |||
c81c46c336 | |||
c3747b93c8 | |||
d43265b7f1 | |||
6864fd0149 | |||
340c0bf9e3 | |||
4d104cd4d8 | |||
367c0ff7c6 | |||
9c26a3d61a | |||
93e3581ce7 | |||
f4e52bb27d | |||
72064fd0df | |||
77486a608e | |||
e97025ab02 | |||
e43b9175c0 | |||
9cc1415ef5 | |||
bd215dc0e4 | |||
12e874cef0 | |||
6d233161b0 | |||
905a570489 | |||
432fe44187 | |||
51b938496d | |||
b7f9b25e4d | |||
fe61280b6b | |||
68c087d578 | |||
d6bf87cab7 | |||
2b96a43879 | |||
697c41c584 | |||
a2379996e6 | |||
29077d95db | |||
dbd00a57b0 | |||
d08cff51a4 | |||
3e461dec1c | |||
4d08e25913 | |||
43313c2ee7 | |||
81b2a87232 | |||
3d8cd0ced7 | |||
7c78d54231 | |||
f9d71e8b17 | |||
0107fd323c | |||
8ba47929a0 | |||
794b0fe9ce | |||
979dccc7ec | |||
44a5f38bc4 | |||
bf78f70885 | |||
545706cbee | |||
0d916ac531 | |||
d4ab407045 | |||
45212a8c78 | |||
64b83c3d70 | |||
639a6782bd | |||
5f34d69bcc | |||
337ff5a3cc | |||
8e6459a818 | |||
aff3e16194 | |||
9372c0787d | |||
83fb2da53e | |||
645a044bf6 | |||
37796ff73f | |||
e1fdcb1678 | |||
aab9a26409 | |||
958055a789 | |||
edda5039d4 | |||
1c86893d95 | |||
d543587d34 | |||
780bc4cad2 | |||
18bd6ba13d | |||
4dafc513cc | |||
7acd5c5659 | |||
8428063d9e | |||
f490dda05a | |||
2b191385ea | |||
bc228e5eaf | |||
8be65e34de | |||
d967d8f1a7 | |||
50deb0d3f8 | |||
1d928b25fe | |||
f2f81791d1 | |||
382f10a0cc | |||
0d2133db98 | |||
09faa9ee95 | |||
ccec086e25 | |||
05725ac9a4 | |||
96b7483138 | |||
81281d04a4 | |||
e062ebbc29 | |||
b92cad0938 | |||
ea368a06cd | |||
3f48cdb380 | |||
17c7b46a69 | |||
a375df6f4c | |||
a3775bb4e8 | |||
1e0c6194b5 | |||
a6bd669854 | |||
6334bdc1c5 | |||
3b82f3eea5 | |||
38556bf60d | |||
d8d8af9826 | |||
3984a5fd77 | |||
397356096a | |||
365915da9a | |||
87152fbac6 | |||
22a9189ee0 | |||
4428818412 | |||
47ea98e0e3 | |||
6dd0513546 | |||
8abe51b71d | |||
69b8bc3bfa | |||
301b8aa0a5 | |||
e5b6c93323 | |||
9a045790ed | |||
82a103c8f9 | |||
0123039271 | |||
9a0e115a37 | |||
867bfc4378 | |||
feb1645f37 | |||
8ca37d6a65 | |||
ac163a7c18 | |||
9b6bddb24c | |||
f57ae48286 | |||
4cbd7eb7f9 | |||
310686726a | |||
ad5cee1d22 | |||
bad6e32075 | |||
8ae6d28cd4 | |||
ca1060862e | |||
8a0046f519 | |||
84cbdb35c4 | |||
1e93fbb5c1 | |||
619554af2b | |||
d5a48b5ce4 | |||
4e9cc3e97c | |||
492bc2ba63 | |||
995492100a | |||
854319d88c | |||
3189d05134 | |||
b2a43b987c | |||
6676409f7f | |||
44de5bcc00 | |||
e2956c605d | |||
b22b6c2299 | |||
90950c9c20 | |||
0c5b9e7820 | |||
a9ffa010c8 | |||
a6a903293b | |||
3fffcb5d77 | |||
a670b99db1 | |||
aefd74197a | |||
9ff747ef50 | |||
a08a198577 | |||
4cfb123448 | |||
198ebc6c86 | |||
a8abcd9b30 | |||
b7469f5a9a | |||
6bbe49aa14 | |||
5aa1019010 | |||
29a59b380c | |||
0bfcea6a11 | |||
19f5aa252f | |||
89e9134a3f | |||
b5a202acb6 | |||
0f860f712f | |||
7c66701366 | |||
585e90c0de | |||
5c852d5b82 | |||
484172b5f8 | |||
d148958b67 | |||
0a8d773ad0 | |||
427d90e6c1 | |||
9b2e4079d0 | |||
1a0b410554 | |||
2d50a6192f | |||
781da7f6f0 | |||
646221cc29 | |||
b168a27f73 | |||
a442bd9792 | |||
884fec7735 | |||
1cb89f302f | |||
da36bbe756 | |||
25e464c5ce | |||
8446fbca85 | |||
9738dd545f | |||
0bce2118e7 | |||
6543214dde | |||
d91c6fd4e1 | |||
711d1f6fc3 | |||
e422beec74 | |||
a484c9cf96 | |||
5654d8ceba | |||
31cf625af5 | |||
93be18ffd2 | |||
e96464c795 | |||
ad0ed40a59 | |||
63fd8e58b2 | |||
758a827c2d | |||
7ad33e8052 | |||
abfe0c0e70 | |||
f22dfb5ece | |||
4bda51688b | |||
eab25e2f33 | |||
94bd11bae2 | |||
759af9f00c | |||
f58e5132aa | |||
d831846706 | |||
1fc9ac0433 | |||
5c48d0af1f | |||
30fb19be35 | |||
fbeac4ea28 | |||
7f066a9b21 | |||
c5a767cd1d | |||
027ef213aa | |||
dc1fdd6267 | |||
96918252e5 | |||
014dc5f9d7 | |||
59e94227af | |||
e84b801c2e | |||
6638c034d2 | |||
04df41cec1 | |||
483da89d03 | |||
c92e3832bf | |||
edb90f6afa | |||
0057f0e580 | |||
e6217b8b36 | |||
6fe16039b9 | |||
42967bf185 | |||
5843268c47 | |||
7273ba3de2 | |||
0bf1c314da | |||
c7926d8e8c | |||
44ce25e7ac | |||
3a2cc5c66e | |||
3838ce3330 | |||
59217472aa | |||
df69a4fc59 | |||
25d3965769 | |||
08d8b2a4fd | |||
879569d73f | |||
b63f833d36 | |||
482c6e33dd | |||
46a1863f88 | |||
632756b6fb | |||
04eba29c55 | |||
0912878ecf | |||
d5035c5600 | |||
38ae42b11a | |||
a174854a0d | |||
c4b2b9ab41 | |||
ef942e04c2 | |||
f54cd66924 | |||
b40ab10d38 | |||
f8ccbfdedd | |||
470f1c798a | |||
5c012b392a | |||
165b641c1d | |||
66e42bec05 | |||
c503ea7045 | |||
745ec187ce | |||
f046313c0e | |||
74595b8821 | |||
c9fdd142a4 | |||
abaa6d0ac9 | |||
cfae8f0656 | |||
54f4ecd46a | |||
1835d86e9d | |||
b9b4b31284 | |||
b4772d1c43 | |||
9933dc3133 | |||
08ac90f920 | |||
13f5863561 | |||
81764111fe | |||
cb022525ff | |||
75656a78c6 | |||
284eb5daff | |||
ff58c51919 | |||
2fb1bdda20 | |||
12299b333b | |||
b017bbc441 | |||
9e8c0d2e33 | |||
250c29edd2 | |||
c431659d05 | |||
a33389c391 | |||
3460565414 | |||
26b62138ee | |||
afb0220642 | |||
0993923ed5 | |||
e0362b0d0f | |||
df3a74d7e0 | |||
d5d457e667 | |||
b27c32821c | |||
76b15a035f | |||
eb8feb1281 | |||
fc6ce9835b | |||
8ae9f4efc2 | |||
c9d13b0fc4 | |||
bfacc1d8c3 | |||
02d484370f | |||
5ae86dfaa1 | |||
dbe7e556b0 | |||
4799280ccd | |||
cb4865466e | |||
cb80d900b3 | |||
ee01737e87 | |||
2012825913 | |||
eb5e3420ae | |||
b2362a1207 | |||
54d968664a | |||
1e20f819d5 | |||
8001c82e81 | |||
baefbc444e | |||
4a227b54bf | |||
8a192bedde | |||
d5efa18ae4 | |||
5f79dc2805 | |||
9aa58f0143 | |||
8835664653 | |||
d37da6b7fc | |||
b9ee86efe1 | |||
d108b610fd | |||
0ec79339f7 | |||
2afdc7f27d | |||
26aa9aca40 | |||
3e2984bcb9 | |||
a7a5406c32 | |||
4f727a783e | |||
23dc68fdea | |||
b532dd00c4 | |||
c01742855a | |||
9c953dd260 | |||
3fbf2d2fcd | |||
e0af222ec3 | |||
73b5011786 | |||
2ea5abcd65 | |||
7137630d43 | |||
8acfd15d6e | |||
48fbbfeb7e | |||
9990af3042 | |||
fe6c19383b | |||
42150d263b | |||
9839d3f778 | |||
dd59e3c2a1 | |||
0b7432ae09 | |||
c1c2c8f635 | |||
7680525eec | |||
42298d5896 | |||
39478aa52c | |||
6a99b930c4 | |||
f6ce45b373 | |||
205e187613 | |||
a78348acbb | |||
410611b4f2 | |||
af07ec8f29 | |||
3f803af00b | |||
ac461bd651 | |||
ce955e1635 | |||
e20d008c6a | |||
fb657d8ee5 | |||
fba0b77469 | |||
b5c1296eaa | |||
065df12872 | |||
7e1d4712b8 | |||
49c965a497 | |||
6fe9aedd0b | |||
42cb9bd6a5 | |||
66dbe5639e | |||
2d87f2fb73 | |||
4c81273274 | |||
73b8f6793e | |||
663ef85992 | |||
e92c75815b | |||
6dbad5b4b5 | |||
bff7e3f3e4 | |||
83abc7497d | |||
8bc5eebeb8 | |||
1433b96ba0 | |||
be1a8c94ae | |||
4606f34353 | |||
7bb720cb4d | |||
c4d8542ec1 | |||
9700d5374a | |||
05e90d6463 | |||
55118ca18e | |||
f70d8091d3 | |||
a3c709ef21 | |||
4917f1e2d4 | |||
93829fc680 | |||
5605ca5619 | |||
e49f0c03d9 | |||
0098b712a5 | |||
5fb694e8c0 | |||
583a68a446 | |||
e6604cf391 | |||
43cfb3c35a | |||
8a16c571d2 | |||
314652a499 | |||
6b68e5d597 | |||
cafd51bf42 | |||
eaff09f483 | |||
9b93c62044 | |||
5d90860688 | |||
5ba83ed099 | |||
50bf10ad56 | |||
16d444c979 | |||
fa9c9be737 | |||
2e7014e31d | |||
a84050c1f0 | |||
7c9835465e | |||
ec00200411 | |||
956e5fec1f | |||
b107fdb99a | |||
7320e9ff4b | |||
c4d2d54a6d | |||
1142350e8d | |||
d735b31345 | |||
e211fee562 | |||
8c15560b68 | |||
327e93711f | |||
a076571470 | |||
ff50c07ebf | |||
179145dc24 | |||
6bd0a00c46 | |||
f6e28f4e62 | |||
37f1b7dd8d | |||
60e6ee46de | |||
2260f065d4 | |||
6eff8dec4f | |||
7e25b9aaaa | |||
f867ef9c4a | |||
fc8920e35d | |||
7f3b0f67e7 | |||
844660036b | |||
efcac39d34 | |||
cb4b721cb0 | |||
7956877f14 | |||
2241c6795f | |||
43e60ceb41 | |||
b760d8a23f | |||
2c1592263d | |||
616533823c | |||
913dddea85 | |||
3530430365 | |||
a4ba60be8f | |||
99e98f605c | |||
935ee97b17 | |||
6b9bfd7fe9 | |||
dd519bbad1 | |||
35fe981c7d | |||
b6570abe79 | |||
54813c650e | |||
781106f8c5 | |||
96f35520a0 | |||
490560e0c6 | |||
52f53d8280 |
34
Cargo.toml
34
Cargo.toml
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "1.0.4"
|
version = "1.0.7"
|
||||||
authors = [
|
authors = [
|
||||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||||
@ -23,22 +23,22 @@ name = "proxmox_backup"
|
|||||||
path = "src/lib.rs"
|
path = "src/lib.rs"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
apt-pkg-native = "0.3.1" # custom patched version
|
apt-pkg-native = "0.3.2"
|
||||||
base64 = "0.12"
|
base64 = "0.12"
|
||||||
bitflags = "1.2.1"
|
bitflags = "1.2.1"
|
||||||
bytes = "0.5"
|
bytes = "1.0"
|
||||||
crc32fast = "1"
|
crc32fast = "1"
|
||||||
endian_trait = { version = "0.6", features = ["arrays"] }
|
endian_trait = { version = "0.6", features = ["arrays"] }
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
h2 = { version = "0.2", features = ["stream"] }
|
h2 = { version = "0.3", features = [ "stream" ] }
|
||||||
handlebars = "3.0"
|
handlebars = "3.0"
|
||||||
http = "0.2"
|
http = "0.2"
|
||||||
hyper = "0.13.6"
|
hyper = { version = "0.14", features = [ "full" ] }
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
nix = "0.19"
|
nix = "0.19.1"
|
||||||
num-traits = "0.2"
|
num-traits = "0.2"
|
||||||
once_cell = "1.3.1"
|
once_cell = "1.3.1"
|
||||||
openssl = "0.10"
|
openssl = "0.10"
|
||||||
@ -46,32 +46,34 @@ pam = "0.7"
|
|||||||
pam-sys = "0.5"
|
pam-sys = "0.5"
|
||||||
percent-encoding = "2.1"
|
percent-encoding = "2.1"
|
||||||
pin-utils = "0.1.0"
|
pin-utils = "0.1.0"
|
||||||
pin-project = "0.4"
|
pin-project = "1.0"
|
||||||
pathpatterns = "0.1.2"
|
pathpatterns = "0.1.2"
|
||||||
proxmox = { version = "0.7.2", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
proxmox = { version = "0.10.1", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
proxmox-fuse = "0.1.0"
|
proxmox-fuse = "0.1.1"
|
||||||
pxar = { version = "0.6.1", features = [ "tokio-io", "futures-io" ] }
|
pxar = { version = "0.8.0", features = [ "tokio-io" ] }
|
||||||
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
|
||||||
regex = "1.2"
|
regex = "1.2"
|
||||||
rustyline = "6"
|
rustyline = "7"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
siphasher = "0.3"
|
siphasher = "0.3"
|
||||||
syslog = "4.0"
|
syslog = "4.0"
|
||||||
tokio = { version = "0.2.9", features = [ "blocking", "fs", "dns", "io-util", "macros", "process", "rt-threaded", "signal", "stream", "tcp", "time", "uds" ] }
|
tokio = { version = "1.0", features = [ "fs", "io-util", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
|
||||||
tokio-openssl = "0.4.0"
|
tokio-openssl = "0.6.1"
|
||||||
tokio-util = { version = "0.3", features = [ "codec" ] }
|
tokio-stream = "0.1.0"
|
||||||
|
tokio-util = { version = "0.6", features = [ "codec" ] }
|
||||||
tower-service = "0.3.0"
|
tower-service = "0.3.0"
|
||||||
udev = ">= 0.3, <0.5"
|
udev = ">= 0.3, <0.5"
|
||||||
url = "2.1"
|
url = "2.1"
|
||||||
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
||||||
walkdir = "2"
|
walkdir = "2"
|
||||||
|
webauthn-rs = "0.2.5"
|
||||||
xdg = "2.2"
|
xdg = "2.2"
|
||||||
zstd = { version = "0.4", features = [ "bindgen" ] }
|
zstd = { version = "0.4", features = [ "bindgen" ] }
|
||||||
nom = "5.1"
|
nom = "5.1"
|
||||||
crossbeam-channel = "0.4"
|
crossbeam-channel = "0.5"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
|
7
Makefile
7
Makefile
@ -9,7 +9,8 @@ SUBDIRS := etc www docs
|
|||||||
# Binaries usable by users
|
# Binaries usable by users
|
||||||
USR_BIN := \
|
USR_BIN := \
|
||||||
proxmox-backup-client \
|
proxmox-backup-client \
|
||||||
pxar
|
pxar \
|
||||||
|
pmtx
|
||||||
|
|
||||||
# Binaries usable by admins
|
# Binaries usable by admins
|
||||||
USR_SBIN := \
|
USR_SBIN := \
|
||||||
@ -20,7 +21,7 @@ SERVICE_BIN := \
|
|||||||
proxmox-backup-api \
|
proxmox-backup-api \
|
||||||
proxmox-backup-banner \
|
proxmox-backup-banner \
|
||||||
proxmox-backup-proxy \
|
proxmox-backup-proxy \
|
||||||
proxmox-daily-update \
|
proxmox-daily-update
|
||||||
|
|
||||||
ifeq ($(BUILD_MODE), release)
|
ifeq ($(BUILD_MODE), release)
|
||||||
CARGO_BUILD_ARGS += --release
|
CARGO_BUILD_ARGS += --release
|
||||||
@ -141,6 +142,8 @@ install: $(COMPILED_BINS)
|
|||||||
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(SBINDIR)/ ; \
|
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(SBINDIR)/ ; \
|
||||||
install -m644 zsh-completions/_$(i) $(DESTDIR)$(ZSH_COMPL_DEST)/ ;)
|
install -m644 zsh-completions/_$(i) $(DESTDIR)$(ZSH_COMPL_DEST)/ ;)
|
||||||
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup
|
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup
|
||||||
|
# install sg-tape-cmd as setuid binary
|
||||||
|
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
|
||||||
$(foreach i,$(SERVICE_BIN), \
|
$(foreach i,$(SERVICE_BIN), \
|
||||||
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
|
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
|
||||||
$(MAKE) -C www install
|
$(MAKE) -C www install
|
||||||
|
80
README.rst
80
README.rst
@ -53,3 +53,83 @@ Setup:
|
|||||||
Note: 2. may be skipped if you already added the PVE or PBS package repository
|
Note: 2. may be skipped if you already added the PVE or PBS package repository
|
||||||
|
|
||||||
You are now able to build using the Makefile or cargo itself.
|
You are now able to build using the Makefile or cargo itself.
|
||||||
|
|
||||||
|
|
||||||
|
Design Notes
|
||||||
|
============
|
||||||
|
|
||||||
|
Here are some random thought about the software design (unless I find a better place).
|
||||||
|
|
||||||
|
|
||||||
|
Large chunk sizes
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
It is important to notice that large chunk sizes are crucial for
|
||||||
|
performance. We have a multi-user system, where different people can do
|
||||||
|
different operations on a datastore at the same time, and most operation
|
||||||
|
involves reading a series of chunks.
|
||||||
|
|
||||||
|
So what is the maximal theoretical speed we can get when reading a
|
||||||
|
series of chunks? Reading a chunk sequence need the following steps:
|
||||||
|
|
||||||
|
- seek to the first chunk start location
|
||||||
|
- read the chunk data
|
||||||
|
- seek to the first chunk start location
|
||||||
|
- read the chunk data
|
||||||
|
- ...
|
||||||
|
|
||||||
|
Lets use the following disk performance metrics:
|
||||||
|
|
||||||
|
:AST: Average Seek Time (second)
|
||||||
|
:MRS: Maximum sequential Read Speed (bytes/second)
|
||||||
|
:ACS: Average Chunk Size (bytes)
|
||||||
|
|
||||||
|
The maximum performance you can get is::
|
||||||
|
|
||||||
|
MAX(ACS) = ACS /(AST + ACS/MRS)
|
||||||
|
|
||||||
|
Please note that chunk data is likely to be sequential arranged on disk, but
|
||||||
|
this it is sort of a best case assumption.
|
||||||
|
|
||||||
|
For a typical rotational disk, we assume the following values::
|
||||||
|
|
||||||
|
AST: 10ms
|
||||||
|
MRS: 170MB/s
|
||||||
|
|
||||||
|
MAX(4MB) = 115.37 MB/s
|
||||||
|
MAX(1MB) = 61.85 MB/s;
|
||||||
|
MAX(64KB) = 6.02 MB/s;
|
||||||
|
MAX(4KB) = 0.39 MB/s;
|
||||||
|
MAX(1KB) = 0.10 MB/s;
|
||||||
|
|
||||||
|
Modern SSD are much faster, lets assume the following::
|
||||||
|
|
||||||
|
max IOPS: 20000 => AST = 0.00005
|
||||||
|
MRS: 500Mb/s
|
||||||
|
|
||||||
|
MAX(4MB) = 474 MB/s
|
||||||
|
MAX(1MB) = 465 MB/s;
|
||||||
|
MAX(64KB) = 354 MB/s;
|
||||||
|
MAX(4KB) = 67 MB/s;
|
||||||
|
MAX(1KB) = 18 MB/s;
|
||||||
|
|
||||||
|
|
||||||
|
Also, the average chunk directly relates to the number of chunks produced by
|
||||||
|
a backup::
|
||||||
|
|
||||||
|
CHUNK_COUNT = BACKUP_SIZE / ACS
|
||||||
|
|
||||||
|
Here are some staticics from my developer worstation::
|
||||||
|
|
||||||
|
Disk Usage: 65 GB
|
||||||
|
Directories: 58971
|
||||||
|
Files: 726314
|
||||||
|
Files < 64KB: 617541
|
||||||
|
|
||||||
|
As you see, there are really many small files. If we would do file
|
||||||
|
level deduplication, i.e. generate one chunk per file, we end up with
|
||||||
|
more than 700000 chunks.
|
||||||
|
|
||||||
|
Instead, our current algorithm only produce large chunks with an
|
||||||
|
average chunks size of 4MB. With above data, this produce about 15000
|
||||||
|
chunks (factor 50 less chunks).
|
||||||
|
70
debian/changelog
vendored
70
debian/changelog
vendored
@ -1,3 +1,73 @@
|
|||||||
|
rust-proxmox-backup (1.0.7-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* fix #3197: skip fingerprint check when restoring key
|
||||||
|
|
||||||
|
* client: add 'import-with-master-key' command
|
||||||
|
|
||||||
|
* fix #3192: correct sort in prune sim
|
||||||
|
|
||||||
|
* tools/daemon: improve reload behaviour
|
||||||
|
|
||||||
|
* http client: add timeouts for critical connects
|
||||||
|
|
||||||
|
* api: improve error messages for restricted endpoints
|
||||||
|
|
||||||
|
* api: allow tokens to list users
|
||||||
|
|
||||||
|
* ui: running tasks: Use gettext for column labels
|
||||||
|
|
||||||
|
* login: add two-factor authenication (TFA) and integrate in web-interface
|
||||||
|
|
||||||
|
* login: support webAuthn, recovery keys and TOTP as TFA methods
|
||||||
|
|
||||||
|
* make it possible to abort tasks with CTRL-C
|
||||||
|
|
||||||
|
* fix #3245: only use default schedule for new jobs
|
||||||
|
|
||||||
|
* manager CLI: user/token list: fix rendering 0 (never) expire date
|
||||||
|
|
||||||
|
* update the event-driven, non-blocking I/O tokio platform to 1.0
|
||||||
|
|
||||||
|
* access: limit editing all pam credentials to superuser
|
||||||
|
|
||||||
|
* access: restrict password changes on @pam realm to superuser
|
||||||
|
|
||||||
|
* patch out wrongly linked libraries from ELFs to avoid extra, bogus
|
||||||
|
dependencies in resulting package
|
||||||
|
|
||||||
|
* add "password hint" to encryption key config
|
||||||
|
|
||||||
|
* improve GC error handling
|
||||||
|
|
||||||
|
* cli: make it possible to abort tasks with CTRL-C
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 03 Feb 2021 10:34:23 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.6-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* stricter handling of file-descriptors, fixes some cases where some could
|
||||||
|
leak
|
||||||
|
|
||||||
|
* ui: fix various usages of the findRecord emthod, ensuring it matches exact
|
||||||
|
|
||||||
|
* garbage collection: improve task log format
|
||||||
|
|
||||||
|
* verification: improve progress log, make it similar to what's logged on
|
||||||
|
pull (sync)
|
||||||
|
|
||||||
|
* datastore: move manifest locking to /run. This avoids issues with
|
||||||
|
filesystems which cannot natively handle removing in-use files ("delete on
|
||||||
|
last close"), and create a virtual, internal, replacement file to work
|
||||||
|
around that. This is done, for example, by NFS or CIFS (samba).
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 11 Dec 2020 12:51:33 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.5-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* client: restore: print meta information exclusively to standard error
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 25 Nov 2020 15:29:58 +0100
|
||||||
|
|
||||||
rust-proxmox-backup (1.0.4-1) unstable; urgency=medium
|
rust-proxmox-backup (1.0.4-1) unstable; urgency=medium
|
||||||
|
|
||||||
* fingerprint: add bytes() accessor
|
* fingerprint: add bytes() accessor
|
||||||
|
87
debian/control
vendored
87
debian/control
vendored
@ -7,24 +7,25 @@ Build-Depends: debhelper (>= 11),
|
|||||||
rustc:native,
|
rustc:native,
|
||||||
libstd-rust-dev,
|
libstd-rust-dev,
|
||||||
librust-anyhow-1+default-dev,
|
librust-anyhow-1+default-dev,
|
||||||
librust-apt-pkg-native-0.3+default-dev (>= 0.3.1-~~),
|
librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~),
|
||||||
librust-base64-0.12+default-dev,
|
librust-base64-0.12+default-dev,
|
||||||
librust-bitflags-1+default-dev (>= 1.2.1-~~),
|
librust-bitflags-1+default-dev (>= 1.2.1-~~),
|
||||||
librust-bytes-0.5+default-dev,
|
librust-bytes-1+default-dev,
|
||||||
librust-crc32fast-1+default-dev,
|
librust-crc32fast-1+default-dev,
|
||||||
librust-crossbeam-channel-0.4+default-dev,
|
librust-crossbeam-channel-0.5+default-dev,
|
||||||
librust-endian-trait-0.6+arrays-dev,
|
librust-endian-trait-0.6+arrays-dev,
|
||||||
librust-endian-trait-0.6+default-dev,
|
librust-endian-trait-0.6+default-dev,
|
||||||
librust-futures-0.3+default-dev,
|
librust-futures-0.3+default-dev,
|
||||||
librust-h2-0.2+default-dev,
|
librust-h2-0.3+default-dev,
|
||||||
librust-h2-0.2+stream-dev,
|
librust-h2-0.3+stream-dev,
|
||||||
librust-handlebars-3+default-dev,
|
librust-handlebars-3+default-dev,
|
||||||
librust-http-0.2+default-dev,
|
librust-http-0.2+default-dev,
|
||||||
librust-hyper-0.13+default-dev (>= 0.13.6-~~),
|
librust-hyper-0.14+default-dev,
|
||||||
|
librust-hyper-0.14+full-dev,
|
||||||
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
||||||
librust-libc-0.2+default-dev,
|
librust-libc-0.2+default-dev,
|
||||||
librust-log-0.4+default-dev,
|
librust-log-0.4+default-dev,
|
||||||
librust-nix-0.19+default-dev,
|
librust-nix-0.19+default-dev (>= 0.19.1-~~),
|
||||||
librust-nom-5+default-dev (>= 5.1-~~),
|
librust-nom-5+default-dev (>= 5.1-~~),
|
||||||
librust-num-traits-0.2+default-dev,
|
librust-num-traits-0.2+default-dev,
|
||||||
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
||||||
@ -33,43 +34,42 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-pam-sys-0.5+default-dev,
|
librust-pam-sys-0.5+default-dev,
|
||||||
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
||||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||||
librust-pin-project-0.4+default-dev,
|
librust-pin-project-1+default-dev,
|
||||||
librust-pin-utils-0.1+default-dev,
|
librust-pin-utils-0.1+default-dev,
|
||||||
librust-proxmox-0.7+api-macro-dev (>= 0.7.1-~~),
|
librust-proxmox-0.10+api-macro-dev (>= 0.10.1-~~),
|
||||||
librust-proxmox-0.7+default-dev (>= 0.7.1-~~),
|
librust-proxmox-0.10+default-dev (>= 0.10.1-~~),
|
||||||
librust-proxmox-0.7+sortable-macro-dev (>= 0.7.1-~~),
|
librust-proxmox-0.10+sortable-macro-dev (>= 0.10.1-~~),
|
||||||
librust-proxmox-0.7+websocket-dev (>= 0.7.1-~~),
|
librust-proxmox-0.10+websocket-dev (>= 0.10.1-~~),
|
||||||
librust-proxmox-fuse-0.1+default-dev,
|
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
||||||
librust-pxar-0.6+default-dev (>= 0.6.1-~~),
|
librust-pxar-0.8+default-dev,
|
||||||
librust-pxar-0.6+futures-io-dev (>= 0.6.1-~~),
|
librust-pxar-0.8+tokio-io-dev,
|
||||||
librust-pxar-0.6+tokio-io-dev (>= 0.6.1-~~),
|
|
||||||
librust-regex-1+default-dev (>= 1.2-~~),
|
librust-regex-1+default-dev (>= 1.2-~~),
|
||||||
librust-rustyline-6+default-dev,
|
librust-rustyline-7+default-dev,
|
||||||
librust-serde-1+default-dev,
|
librust-serde-1+default-dev,
|
||||||
librust-serde-1+derive-dev,
|
librust-serde-1+derive-dev,
|
||||||
librust-serde-json-1+default-dev,
|
librust-serde-json-1+default-dev,
|
||||||
librust-siphasher-0.3+default-dev,
|
librust-siphasher-0.3+default-dev,
|
||||||
librust-syslog-4+default-dev,
|
librust-syslog-4+default-dev,
|
||||||
librust-tokio-0.2+blocking-dev (>= 0.2.9-~~),
|
librust-tokio-1+default-dev,
|
||||||
librust-tokio-0.2+default-dev (>= 0.2.9-~~),
|
librust-tokio-1+fs-dev,
|
||||||
librust-tokio-0.2+dns-dev (>= 0.2.9-~~),
|
librust-tokio-1+io-util-dev,
|
||||||
librust-tokio-0.2+fs-dev (>= 0.2.9-~~),
|
librust-tokio-1+macros-dev,
|
||||||
librust-tokio-0.2+io-util-dev (>= 0.2.9-~~),
|
librust-tokio-1+net-dev,
|
||||||
librust-tokio-0.2+macros-dev (>= 0.2.9-~~),
|
librust-tokio-1+parking-lot-dev,
|
||||||
librust-tokio-0.2+process-dev (>= 0.2.9-~~),
|
librust-tokio-1+process-dev,
|
||||||
librust-tokio-0.2+rt-threaded-dev (>= 0.2.9-~~),
|
librust-tokio-1+rt-dev,
|
||||||
librust-tokio-0.2+signal-dev (>= 0.2.9-~~),
|
librust-tokio-1+rt-multi-thread-dev,
|
||||||
librust-tokio-0.2+stream-dev (>= 0.2.9-~~),
|
librust-tokio-1+signal-dev,
|
||||||
librust-tokio-0.2+tcp-dev (>= 0.2.9-~~),
|
librust-tokio-1+time-dev,
|
||||||
librust-tokio-0.2+time-dev (>= 0.2.9-~~),
|
librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~),
|
||||||
librust-tokio-0.2+uds-dev (>= 0.2.9-~~),
|
librust-tokio-stream-0.1+default-dev,
|
||||||
librust-tokio-openssl-0.4+default-dev,
|
librust-tokio-util-0.6+codec-dev,
|
||||||
librust-tokio-util-0.3+codec-dev,
|
librust-tokio-util-0.6+default-dev,
|
||||||
librust-tokio-util-0.3+default-dev,
|
|
||||||
librust-tower-service-0.3+default-dev,
|
librust-tower-service-0.3+default-dev,
|
||||||
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
|
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
|
||||||
librust-url-2+default-dev (>= 2.1-~~),
|
librust-url-2+default-dev (>= 2.1-~~),
|
||||||
librust-walkdir-2+default-dev,
|
librust-walkdir-2+default-dev,
|
||||||
|
librust-webauthn-rs-0.2+default-dev (>= 0.2.5-~~),
|
||||||
librust-xdg-2+default-dev (>= 2.2-~~),
|
librust-xdg-2+default-dev (>= 2.2-~~),
|
||||||
librust-zstd-0.4+bindgen-dev,
|
librust-zstd-0.4+bindgen-dev,
|
||||||
librust-zstd-0.4+default-dev,
|
librust-zstd-0.4+default-dev,
|
||||||
@ -77,34 +77,40 @@ Build-Depends: debhelper (>= 11),
|
|||||||
libfuse3-dev,
|
libfuse3-dev,
|
||||||
libsystemd-dev,
|
libsystemd-dev,
|
||||||
uuid-dev,
|
uuid-dev,
|
||||||
debhelper (>= 12~),
|
libsgutils2-dev,
|
||||||
bash-completion,
|
bash-completion,
|
||||||
pve-eslint (>= 7.12.1-1),
|
debhelper (>= 12~),
|
||||||
python3-docutils,
|
|
||||||
python3-pygments,
|
|
||||||
rsync,
|
|
||||||
fonts-dejavu-core <!nodoc>,
|
fonts-dejavu-core <!nodoc>,
|
||||||
fonts-lato <!nodoc>,
|
fonts-lato <!nodoc>,
|
||||||
fonts-open-sans <!nodoc>,
|
fonts-open-sans <!nodoc>,
|
||||||
graphviz <!nodoc>,
|
graphviz <!nodoc>,
|
||||||
latexmk <!nodoc>,
|
latexmk <!nodoc>,
|
||||||
|
patchelf,
|
||||||
|
pve-eslint (>= 7.18.0-1),
|
||||||
|
python3-docutils,
|
||||||
|
python3-pygments,
|
||||||
python3-sphinx <!nodoc>,
|
python3-sphinx <!nodoc>,
|
||||||
|
rsync,
|
||||||
texlive-fonts-extra <!nodoc>,
|
texlive-fonts-extra <!nodoc>,
|
||||||
texlive-fonts-recommended <!nodoc>,
|
texlive-fonts-recommended <!nodoc>,
|
||||||
texlive-xetex <!nodoc>,
|
texlive-xetex <!nodoc>,
|
||||||
xindy <!nodoc>
|
xindy <!nodoc>
|
||||||
Maintainer: Proxmox Support Team <support@proxmox.com>
|
Maintainer: Proxmox Support Team <support@proxmox.com>
|
||||||
Standards-Version: 4.4.1
|
Standards-Version: 4.4.1
|
||||||
Vcs-Git:
|
Vcs-Git: git://git.proxmox.com/git/proxmox-backup.git
|
||||||
Vcs-Browser:
|
Vcs-Browser: https://git.proxmox.com/?p=proxmox-backup.git;a=summary
|
||||||
Homepage: https://www.proxmox.com
|
Homepage: https://www.proxmox.com
|
||||||
|
|
||||||
Package: proxmox-backup-server
|
Package: proxmox-backup-server
|
||||||
Architecture: any
|
Architecture: any
|
||||||
Depends: fonts-font-awesome,
|
Depends: fonts-font-awesome,
|
||||||
libjs-extjs (>= 6.0.1),
|
libjs-extjs (>= 6.0.1),
|
||||||
|
libjs-qrcodejs (>= 1.20201119),
|
||||||
|
libsgutils2-2,
|
||||||
libzstd1 (>= 1.3.8),
|
libzstd1 (>= 1.3.8),
|
||||||
lvm2,
|
lvm2,
|
||||||
|
mt-st,
|
||||||
|
mtx,
|
||||||
openssh-server,
|
openssh-server,
|
||||||
pbs-i18n,
|
pbs-i18n,
|
||||||
postfix | mail-transport-agent,
|
postfix | mail-transport-agent,
|
||||||
@ -112,6 +118,7 @@ Depends: fonts-font-awesome,
|
|||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.3-6),
|
proxmox-widget-toolkit (>= 2.3-6),
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
|
sg3-utils,
|
||||||
smartmontools,
|
smartmontools,
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
${shlibs:Depends},
|
${shlibs:Depends},
|
||||||
|
5
debian/control.in
vendored
5
debian/control.in
vendored
@ -2,8 +2,12 @@ Package: proxmox-backup-server
|
|||||||
Architecture: any
|
Architecture: any
|
||||||
Depends: fonts-font-awesome,
|
Depends: fonts-font-awesome,
|
||||||
libjs-extjs (>= 6.0.1),
|
libjs-extjs (>= 6.0.1),
|
||||||
|
libjs-qrcodejs (>= 1.20201119),
|
||||||
|
libsgutils2-2,
|
||||||
libzstd1 (>= 1.3.8),
|
libzstd1 (>= 1.3.8),
|
||||||
lvm2,
|
lvm2,
|
||||||
|
mt-st,
|
||||||
|
mtx,
|
||||||
openssh-server,
|
openssh-server,
|
||||||
pbs-i18n,
|
pbs-i18n,
|
||||||
postfix | mail-transport-agent,
|
postfix | mail-transport-agent,
|
||||||
@ -11,6 +15,7 @@ Depends: fonts-font-awesome,
|
|||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.3-6),
|
proxmox-widget-toolkit (>= 2.3-6),
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
|
sg3-utils,
|
||||||
smartmontools,
|
smartmontools,
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
${shlibs:Depends},
|
${shlibs:Depends},
|
||||||
|
22
debian/debcargo.toml
vendored
22
debian/debcargo.toml
vendored
@ -2,33 +2,32 @@ overlay = "."
|
|||||||
crate_src_path = ".."
|
crate_src_path = ".."
|
||||||
whitelist = ["tests/*.c"]
|
whitelist = ["tests/*.c"]
|
||||||
|
|
||||||
# needed for pinutils alpha
|
maintainer = "Proxmox Support Team <support@proxmox.com>"
|
||||||
allow_prerelease_deps = true
|
|
||||||
|
|
||||||
[source]
|
[source]
|
||||||
# TODO: update once public
|
vcs_git = "git://git.proxmox.com/git/proxmox-backup.git"
|
||||||
vcs_git = ""
|
vcs_browser = "https://git.proxmox.com/?p=proxmox-backup.git;a=summary"
|
||||||
vcs_browser = ""
|
|
||||||
maintainer = "Proxmox Support Team <support@proxmox.com>"
|
|
||||||
section = "admin"
|
section = "admin"
|
||||||
build_depends = [
|
build_depends = [
|
||||||
"debhelper (>= 12~)",
|
|
||||||
"bash-completion",
|
"bash-completion",
|
||||||
"pve-eslint (>= 7.12.1-1)",
|
"debhelper (>= 12~)",
|
||||||
"python3-docutils",
|
|
||||||
"python3-pygments",
|
|
||||||
"rsync",
|
|
||||||
"fonts-dejavu-core <!nodoc>",
|
"fonts-dejavu-core <!nodoc>",
|
||||||
"fonts-lato <!nodoc>",
|
"fonts-lato <!nodoc>",
|
||||||
"fonts-open-sans <!nodoc>",
|
"fonts-open-sans <!nodoc>",
|
||||||
"graphviz <!nodoc>",
|
"graphviz <!nodoc>",
|
||||||
"latexmk <!nodoc>",
|
"latexmk <!nodoc>",
|
||||||
|
"patchelf",
|
||||||
|
"pve-eslint (>= 7.18.0-1)",
|
||||||
|
"python3-docutils",
|
||||||
|
"python3-pygments",
|
||||||
"python3-sphinx <!nodoc>",
|
"python3-sphinx <!nodoc>",
|
||||||
|
"rsync",
|
||||||
"texlive-fonts-extra <!nodoc>",
|
"texlive-fonts-extra <!nodoc>",
|
||||||
"texlive-fonts-recommended <!nodoc>",
|
"texlive-fonts-recommended <!nodoc>",
|
||||||
"texlive-xetex <!nodoc>",
|
"texlive-xetex <!nodoc>",
|
||||||
"xindy <!nodoc>",
|
"xindy <!nodoc>",
|
||||||
]
|
]
|
||||||
|
|
||||||
build_depends_excludes = [
|
build_depends_excludes = [
|
||||||
"debhelper (>=11)",
|
"debhelper (>=11)",
|
||||||
]
|
]
|
||||||
@ -39,4 +38,5 @@ depends = [
|
|||||||
"libfuse3-dev",
|
"libfuse3-dev",
|
||||||
"libsystemd-dev",
|
"libsystemd-dev",
|
||||||
"uuid-dev",
|
"uuid-dev",
|
||||||
|
"libsgutils2-dev",
|
||||||
]
|
]
|
||||||
|
3
debian/pmtx.bc
vendored
Normal file
3
debian/pmtx.bc
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# pmtx bash completion
|
||||||
|
|
||||||
|
complete -C 'pmtx bashcomplete' pmtx
|
3
debian/postinst
vendored
3
debian/postinst
vendored
@ -6,6 +6,9 @@ set -e
|
|||||||
|
|
||||||
case "$1" in
|
case "$1" in
|
||||||
configure)
|
configure)
|
||||||
|
# need to have user backup in the tapoe group
|
||||||
|
usermod -a -G tape backup
|
||||||
|
|
||||||
# modeled after dh_systemd_start output
|
# modeled after dh_systemd_start output
|
||||||
systemctl --system daemon-reload >/dev/null || true
|
systemctl --system daemon-reload >/dev/null || true
|
||||||
if [ -n "$2" ]; then
|
if [ -n "$2" ]; then
|
||||||
|
1
debian/proxmox-backup-docs.links
vendored
1
debian/proxmox-backup-docs.links
vendored
@ -1,2 +1,3 @@
|
|||||||
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
|
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
|
||||||
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/prune-simulator/extjs
|
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/prune-simulator/extjs
|
||||||
|
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/lto-barcode/extjs
|
||||||
|
1
debian/proxmox-backup-server.bash-completion
vendored
1
debian/proxmox-backup-server.bash-completion
vendored
@ -1 +1,2 @@
|
|||||||
debian/proxmox-backup-manager.bc proxmox-backup-manager
|
debian/proxmox-backup-manager.bc proxmox-backup-manager
|
||||||
|
debian/pmtx.bc pmtx
|
||||||
|
7
debian/proxmox-backup-server.install
vendored
7
debian/proxmox-backup-server.install
vendored
@ -8,12 +8,15 @@ usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api
|
|||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
|
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
|
||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-daily-update
|
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-daily-update
|
||||||
|
usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd
|
||||||
usr/sbin/proxmox-backup-manager
|
usr/sbin/proxmox-backup-manager
|
||||||
|
usr/bin/pmtx
|
||||||
usr/share/javascript/proxmox-backup/index.hbs
|
usr/share/javascript/proxmox-backup/index.hbs
|
||||||
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
|
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
|
||||||
usr/share/javascript/proxmox-backup/images/logo-128.png
|
usr/share/javascript/proxmox-backup/images
|
||||||
usr/share/javascript/proxmox-backup/images/proxmox_logo.png
|
|
||||||
usr/share/javascript/proxmox-backup/js/proxmox-backup-gui.js
|
usr/share/javascript/proxmox-backup/js/proxmox-backup-gui.js
|
||||||
usr/share/man/man1/proxmox-backup-manager.1
|
usr/share/man/man1/proxmox-backup-manager.1
|
||||||
usr/share/man/man1/proxmox-backup-proxy.1
|
usr/share/man/man1/proxmox-backup-proxy.1
|
||||||
|
usr/share/man/man1/pmtx.1
|
||||||
usr/share/zsh/vendor-completions/_proxmox-backup-manager
|
usr/share/zsh/vendor-completions/_proxmox-backup-manager
|
||||||
|
usr/share/zsh/vendor-completions/_pmtx
|
||||||
|
10
debian/rules
vendored
10
debian/rules
vendored
@ -42,10 +42,20 @@ override_dh_installsystemd:
|
|||||||
# note: we start/try-reload-restart services manually in postinst
|
# note: we start/try-reload-restart services manually in postinst
|
||||||
dh_installsystemd --no-start --no-restart-after-upgrade
|
dh_installsystemd --no-start --no-restart-after-upgrade
|
||||||
|
|
||||||
|
override_dh_fixperms:
|
||||||
|
dh_fixperms --exclude sg-tape-cmd
|
||||||
|
|
||||||
# workaround https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=933541
|
# workaround https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=933541
|
||||||
# TODO: remove once available (Debian 11 ?)
|
# TODO: remove once available (Debian 11 ?)
|
||||||
override_dh_dwz:
|
override_dh_dwz:
|
||||||
dh_dwz --no-dwz-multifile
|
dh_dwz --no-dwz-multifile
|
||||||
|
|
||||||
|
override_dh_strip:
|
||||||
|
dh_strip
|
||||||
|
for exe in $$(find debian/proxmox-backup-client/usr \
|
||||||
|
debian/proxmox-backup-server/usr -executable -type f); do \
|
||||||
|
debian/scripts/elf-strip-unused-dependencies.sh "$$exe" || true; \
|
||||||
|
done
|
||||||
|
|
||||||
override_dh_compress:
|
override_dh_compress:
|
||||||
dh_compress -X.pdf
|
dh_compress -X.pdf
|
||||||
|
20
debian/scripts/elf-strip-unused-dependencies.sh
vendored
Executable file
20
debian/scripts/elf-strip-unused-dependencies.sh
vendored
Executable file
@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
binary=$1
|
||||||
|
|
||||||
|
exec 3< <(ldd -u "$binary" | grep -oP '[^/:]+$')
|
||||||
|
|
||||||
|
patchargs=""
|
||||||
|
dropped=""
|
||||||
|
while read -r dep; do
|
||||||
|
dropped="$dep $dropped"
|
||||||
|
patchargs="--remove-needed $dep $patchargs"
|
||||||
|
done <&3
|
||||||
|
exec 3<&-
|
||||||
|
|
||||||
|
if [[ $dropped == "" ]]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "patchelf '$binary' - removing unused dependencies:\n $dropped"
|
||||||
|
patchelf $patchargs $binary
|
@ -5,11 +5,13 @@ GENERATED_SYNOPSIS := \
|
|||||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||||
proxmox-backup-manager/synopsis.rst \
|
proxmox-backup-manager/synopsis.rst \
|
||||||
pxar/synopsis.rst \
|
pxar/synopsis.rst \
|
||||||
|
pmtx/synopsis.rst \
|
||||||
backup-protocol-api.rst \
|
backup-protocol-api.rst \
|
||||||
reader-protocol-api.rst
|
reader-protocol-api.rst
|
||||||
|
|
||||||
MANUAL_PAGES := \
|
MANUAL_PAGES := \
|
||||||
pxar.1 \
|
pxar.1 \
|
||||||
|
pmtx.1 \
|
||||||
proxmox-backup-proxy.1 \
|
proxmox-backup-proxy.1 \
|
||||||
proxmox-backup-client.1 \
|
proxmox-backup-client.1 \
|
||||||
proxmox-backup-manager.1
|
proxmox-backup-manager.1
|
||||||
@ -20,6 +22,19 @@ PRUNE_SIMULATOR_FILES := \
|
|||||||
prune-simulator/clear-trigger.png \
|
prune-simulator/clear-trigger.png \
|
||||||
prune-simulator/prune-simulator.js
|
prune-simulator/prune-simulator.js
|
||||||
|
|
||||||
|
LTO_BARCODE_FILES := \
|
||||||
|
lto-barcode/index.html \
|
||||||
|
lto-barcode/code39.js \
|
||||||
|
lto-barcode/prefix-field.js \
|
||||||
|
lto-barcode/label-style.js \
|
||||||
|
lto-barcode/tape-type.js \
|
||||||
|
lto-barcode/paper-size.js \
|
||||||
|
lto-barcode/page-layout.js \
|
||||||
|
lto-barcode/page-calibration.js \
|
||||||
|
lto-barcode/label-list.js \
|
||||||
|
lto-barcode/label-setup.js \
|
||||||
|
lto-barcode/lto-barcode.js
|
||||||
|
|
||||||
# Sphinx documentation setup
|
# Sphinx documentation setup
|
||||||
SPHINXOPTS =
|
SPHINXOPTS =
|
||||||
SPHINXBUILD = sphinx-build
|
SPHINXBUILD = sphinx-build
|
||||||
@ -54,6 +69,14 @@ pxar/synopsis.rst: ${COMPILEDIR}/pxar
|
|||||||
pxar.1: pxar/man1.rst pxar/description.rst pxar/synopsis.rst
|
pxar.1: pxar/man1.rst pxar/description.rst pxar/synopsis.rst
|
||||||
rst2man $< >$@
|
rst2man $< >$@
|
||||||
|
|
||||||
|
|
||||||
|
pmtx/synopsis.rst: ${COMPILEDIR}/pmtx
|
||||||
|
${COMPILEDIR}/pmtx printdoc > pmtx/synopsis.rst
|
||||||
|
|
||||||
|
pmtx.1: pmtx/man1.rst pmtx/description.rst pmtx/synopsis.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
|
|
||||||
proxmox-backup-client/synopsis.rst: ${COMPILEDIR}/proxmox-backup-client
|
proxmox-backup-client/synopsis.rst: ${COMPILEDIR}/proxmox-backup-client
|
||||||
${COMPILEDIR}/proxmox-backup-client printdoc > proxmox-backup-client/synopsis.rst
|
${COMPILEDIR}/proxmox-backup-client printdoc > proxmox-backup-client/synopsis.rst
|
||||||
|
|
||||||
@ -79,11 +102,13 @@ onlinehelpinfo:
|
|||||||
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
|
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
|
||||||
|
|
||||||
.PHONY: html
|
.PHONY: html
|
||||||
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES}
|
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES}
|
||||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||||
install -m 0644 custom.js custom.css images/proxmox-logo.svg $(BUILDDIR)/html/_static/
|
install -m 0644 custom.js custom.css images/proxmox-logo.svg $(BUILDDIR)/html/_static/
|
||||||
install -dm 0755 $(BUILDDIR)/html/prune-simulator
|
install -dm 0755 $(BUILDDIR)/html/prune-simulator
|
||||||
install -m 0644 ${PRUNE_SIMULATOR_FILES} $(BUILDDIR)/html/prune-simulator
|
install -m 0644 ${PRUNE_SIMULATOR_FILES} $(BUILDDIR)/html/prune-simulator
|
||||||
|
install -dm 0755 $(BUILDDIR)/html/lto-barcode
|
||||||
|
install -m 0644 ${LTO_BARCODE_FILES} $(BUILDDIR)/html/lto-barcode
|
||||||
@echo
|
@echo
|
||||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||||
|
|
||||||
|
@ -353,8 +353,10 @@ To set up a master key:
|
|||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out /path/to/target
|
# proxmox-backup-client key import-with-master-key /path/to/target --master-keyfile /path/to/master-private.pem --encrypted-keyfile /path/to/rsa-encrypted.key
|
||||||
Enter pass phrase for ./master-private.pem: *********
|
Master Key Password: ******
|
||||||
|
New Password: ******
|
||||||
|
Verify Password: ******
|
||||||
|
|
||||||
7. The target file will now contain the encryption key information in plain
|
7. The target file will now contain the encryption key information in plain
|
||||||
text. The success of this can be confirmed by passing the resulting ``json``
|
text. The success of this can be confirmed by passing the resulting ``json``
|
||||||
|
@ -172,6 +172,7 @@ html_theme_options = {
|
|||||||
'Proxmox Homepage': 'https://proxmox.com',
|
'Proxmox Homepage': 'https://proxmox.com',
|
||||||
'PDF': 'proxmox-backup.pdf',
|
'PDF': 'proxmox-backup.pdf',
|
||||||
'Prune Simulator' : 'prune-simulator/index.html',
|
'Prune Simulator' : 'prune-simulator/index.html',
|
||||||
|
'LTO Barcode Generator' : 'lto-barcode/index.html',
|
||||||
},
|
},
|
||||||
|
|
||||||
'sidebar_width': '320px',
|
'sidebar_width': '320px',
|
||||||
|
@ -53,9 +53,12 @@ checksums. This manifest file is used to verify the integrity of each backup.
|
|||||||
When backing up to remote servers, do I have to trust the remote server?
|
When backing up to remote servers, do I have to trust the remote server?
|
||||||
------------------------------------------------------------------------
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
Proxmox Backup Server supports client-side encryption, meaning your data is
|
Proxmox Backup Server transfers data via `Transport Layer Security (TLS)
|
||||||
encrypted before it reaches the server. Thus, in the event that an attacker
|
<https://en.wikipedia.org/wiki/Transport_Layer_Security>`_ and additionally
|
||||||
gains access to the server, they will not be able to read the data.
|
supports client-side encryption. This means that data is transferred securely
|
||||||
|
and can be encrypted before it reaches the server. Thus, in the event that an
|
||||||
|
attacker gains access to the server or any point of the network, they will not
|
||||||
|
be able to read the data.
|
||||||
|
|
||||||
.. note:: Encryption is not enabled by default. To set up encryption, see the
|
.. note:: Encryption is not enabled by default. To set up encryption, see the
|
||||||
`Encryption
|
`Encryption
|
||||||
|
BIN
docs/images/screenshots/pbs-gui-tfa-add-recovery-keys.png
Normal file
BIN
docs/images/screenshots/pbs-gui-tfa-add-recovery-keys.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 36 KiB |
BIN
docs/images/screenshots/pbs-gui-tfa-add-totp.png
Normal file
BIN
docs/images/screenshots/pbs-gui-tfa-add-totp.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 31 KiB |
BIN
docs/images/screenshots/pbs-gui-tfa-login.png
Normal file
BIN
docs/images/screenshots/pbs-gui-tfa-login.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 16 KiB |
@ -14,11 +14,12 @@ It supports deduplication, compression, and authenticated
|
|||||||
encryption (AE_). Using :term:`Rust` as the implementation language guarantees high
|
encryption (AE_). Using :term:`Rust` as the implementation language guarantees high
|
||||||
performance, low resource usage, and a safe, high-quality codebase.
|
performance, low resource usage, and a safe, high-quality codebase.
|
||||||
|
|
||||||
Proxmox Backup uses state of the art cryptography for client communication and
|
Proxmox Backup uses state of the art cryptography for both client-server
|
||||||
backup content :ref:`encryption <encryption>`. Encryption is done on the
|
communication and backup content :ref:`encryption <encryption>`. All
|
||||||
client side, making it safer to back up data to targets that are not fully
|
client-server communication uses `TLS
|
||||||
trusted.
|
<https://en.wikipedia.org/wiki/Transport_Layer_Security>`_, and backup data can
|
||||||
|
be encrypted on the client-side before sending, making it safer to back up data
|
||||||
|
to targets that are not fully trusted.
|
||||||
|
|
||||||
Architecture
|
Architecture
|
||||||
------------
|
------------
|
||||||
@ -65,8 +66,9 @@ Main Features
|
|||||||
several gigabytes of data per second.
|
several gigabytes of data per second.
|
||||||
|
|
||||||
:Encryption: Backups can be encrypted on the client-side, using AES-256 in
|
:Encryption: Backups can be encrypted on the client-side, using AES-256 in
|
||||||
Galois/Counter Mode (GCM_) mode. This authenticated encryption (AE_) mode
|
Galois/Counter Mode (GCM_). This authenticated encryption (AE_) mode
|
||||||
provides very high performance on modern hardware.
|
provides very high performance on modern hardware. In addition to client-side
|
||||||
|
encryption, all data is transferred via a secure TLS connection.
|
||||||
|
|
||||||
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
||||||
user interface.
|
user interface.
|
||||||
|
351
docs/lto-barcode/code39.js
Normal file
351
docs/lto-barcode/code39.js
Normal file
@ -0,0 +1,351 @@
|
|||||||
|
// Code39 barcode generator
|
||||||
|
// see https://en.wikipedia.org/wiki/Code_39
|
||||||
|
|
||||||
|
// IBM LTO Ultrium Cartridge Label Specification
|
||||||
|
// http://www-01.ibm.com/support/docview.wss?uid=ssg1S7000429
|
||||||
|
|
||||||
|
let code39_codes = {
|
||||||
|
"1": ['B', 's', 'b', 'S', 'b', 's', 'b', 's', 'B'],
|
||||||
|
"A": ['B', 's', 'b', 's', 'b', 'S', 'b', 's', 'B'],
|
||||||
|
"K": ['B', 's', 'b', 's', 'b', 's', 'b', 'S', 'B'],
|
||||||
|
"U": ['B', 'S', 'b', 's', 'b', 's', 'b', 's', 'B'],
|
||||||
|
|
||||||
|
"2": ['b', 's', 'B', 'S', 'b', 's', 'b', 's', 'B'],
|
||||||
|
"B": ['b', 's', 'B', 's', 'b', 'S', 'b', 's', 'B'],
|
||||||
|
"L": ['b', 's', 'B', 's', 'b', 's', 'b', 'S', 'B'],
|
||||||
|
"V": ['b', 'S', 'B', 's', 'b', 's', 'b', 's', 'B'],
|
||||||
|
|
||||||
|
"3": ['B', 's', 'B', 'S', 'b', 's', 'b', 's', 'b'],
|
||||||
|
"C": ['B', 's', 'B', 's', 'b', 'S', 'b', 's', 'b'],
|
||||||
|
"M": ['B', 's', 'B', 's', 'b', 's', 'b', 'S', 'b'],
|
||||||
|
"W": ['B', 'S', 'B', 's', 'b', 's', 'b', 's', 'b'],
|
||||||
|
|
||||||
|
"4": ['b', 's', 'b', 'S', 'B', 's', 'b', 's', 'B'],
|
||||||
|
"D": ['b', 's', 'b', 's', 'B', 'S', 'b', 's', 'B'],
|
||||||
|
"N": ['b', 's', 'b', 's', 'B', 's', 'b', 'S', 'B'],
|
||||||
|
"X": ['b', 'S', 'b', 's', 'B', 's', 'b', 's', 'B'],
|
||||||
|
|
||||||
|
"5": ['B', 's', 'b', 'S', 'B', 's', 'b', 's', 'b'],
|
||||||
|
"E": ['B', 's', 'b', 's', 'B', 'S', 'b', 's', 'b'],
|
||||||
|
"O": ['B', 's', 'b', 's', 'B', 's', 'b', 'S', 'b'],
|
||||||
|
"Y": ['B', 'S', 'b', 's', 'B', 's', 'b', 's', 'b'],
|
||||||
|
|
||||||
|
"6": ['b', 's', 'B', 'S', 'B', 's', 'b', 's', 'b'],
|
||||||
|
"F": ['b', 's', 'B', 's', 'B', 'S', 'b', 's', 'b'],
|
||||||
|
"P": ['b', 's', 'B', 's', 'B', 's', 'b', 'S', 'b'],
|
||||||
|
"Z": ['b', 'S', 'B', 's', 'B', 's', 'b', 's', 'b'],
|
||||||
|
|
||||||
|
"7": ['b', 's', 'b', 'S', 'b', 's', 'B', 's', 'B'],
|
||||||
|
"G": ['b', 's', 'b', 's', 'b', 'S', 'B', 's', 'B'],
|
||||||
|
"Q": ['b', 's', 'b', 's', 'b', 's', 'B', 'S', 'B'],
|
||||||
|
"-": ['b', 'S', 'b', 's', 'b', 's', 'B', 's', 'B'],
|
||||||
|
|
||||||
|
"8": ['B', 's', 'b', 'S', 'b', 's', 'B', 's', 'b'],
|
||||||
|
"H": ['B', 's', 'b', 's', 'b', 'S', 'B', 's', 'b'],
|
||||||
|
"R": ['B', 's', 'b', 's', 'b', 's', 'B', 'S', 'b'],
|
||||||
|
".": ['B', 'S', 'b', 's', 'b', 's', 'B', 's', 'b'],
|
||||||
|
|
||||||
|
"9": ['b', 's', 'B', 'S', 'b', 's', 'B', 's', 'b'],
|
||||||
|
"I": ['b', 's', 'B', 's', 'b', 'S', 'B', 's', 'b'],
|
||||||
|
"S": ['b', 's', 'B', 's', 'b', 's', 'B', 'S', 'b'],
|
||||||
|
" ": ['b', 'S', 'B', 's', 'b', 's', 'B', 's', 'b'],
|
||||||
|
|
||||||
|
"0": ['b', 's', 'b', 'S', 'B', 's', 'B', 's', 'b'],
|
||||||
|
"J": ['b', 's', 'b', 's', 'B', 'S', 'B', 's', 'b'],
|
||||||
|
"T": ['b', 's', 'b', 's', 'B', 's', 'B', 'S', 'b'],
|
||||||
|
"*": ['b', 'S', 'b', 's', 'B', 's', 'B', 's', 'b']
|
||||||
|
};
|
||||||
|
|
||||||
|
let colors = [
|
||||||
|
'#BB282E',
|
||||||
|
'#FAE54A',
|
||||||
|
'#9AC653',
|
||||||
|
'#01A5E2',
|
||||||
|
'#9EAAB6',
|
||||||
|
'#D97E35',
|
||||||
|
'#E27B99',
|
||||||
|
'#67A945',
|
||||||
|
'#F6B855',
|
||||||
|
'#705A81'
|
||||||
|
];
|
||||||
|
|
||||||
|
let lto_label_width = 70;
|
||||||
|
let lto_label_height = 17;
|
||||||
|
|
||||||
|
function foreach_label(page_layout, callback) {
|
||||||
|
|
||||||
|
let count = 0;
|
||||||
|
let row = 0;
|
||||||
|
let height = page_layout.margin_top;
|
||||||
|
|
||||||
|
while ((height + page_layout.label_height) <= page_layout.page_height) {
|
||||||
|
|
||||||
|
let column = 0;
|
||||||
|
let width = page_layout.margin_left;
|
||||||
|
|
||||||
|
while ((width + page_layout.label_width) <= page_layout.page_width) {
|
||||||
|
|
||||||
|
callback(column, row, count, width, height);
|
||||||
|
count += 1;
|
||||||
|
|
||||||
|
column += 1;
|
||||||
|
width += page_layout.label_width;
|
||||||
|
width += page_layout.column_spacing;
|
||||||
|
}
|
||||||
|
|
||||||
|
row += 1;
|
||||||
|
height += page_layout.label_height;
|
||||||
|
height += page_layout.row_spacing;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
function compute_max_labels(page_layout) {
|
||||||
|
|
||||||
|
let max_labels = 0;
|
||||||
|
foreach_label(page_layout, function() { max_labels += 1; });
|
||||||
|
return max_labels;
|
||||||
|
}
|
||||||
|
|
||||||
|
function svg_label(mode, label, label_type, pagex, pagey, label_borders) {
|
||||||
|
let svg = "";
|
||||||
|
|
||||||
|
if (label.length != 6) {
|
||||||
|
throw "wrong label length";
|
||||||
|
}
|
||||||
|
if (label_type.length != 2) {
|
||||||
|
throw "wrong label_type length";
|
||||||
|
}
|
||||||
|
|
||||||
|
let ratio = 2.75;
|
||||||
|
let parts = 3*ratio + 6; // 3*wide + 6*small;
|
||||||
|
let barcode_width = (lto_label_width/12)*10; // 10*code + 2margin
|
||||||
|
let small = barcode_width/(parts*10 + 9);
|
||||||
|
let code_width = small*parts;
|
||||||
|
let wide = small*ratio;
|
||||||
|
let xpos = pagex + code_width;
|
||||||
|
let height = 12;
|
||||||
|
|
||||||
|
if (mode === 'placeholder') {
|
||||||
|
if (label_borders) {
|
||||||
|
svg += `<rect class='unprintable' x='${pagex}' y='${pagey}' width='${lto_label_width}' height='${lto_label_height}' fill='none' style='stroke:black;stroke-width:0.1;'/>`;
|
||||||
|
}
|
||||||
|
return svg;
|
||||||
|
}
|
||||||
|
if (label_borders) {
|
||||||
|
svg += `<rect x='${pagex}' y='${pagey}' width='${lto_label_width}' height='${lto_label_height}' fill='none' style='stroke:black;stroke-width:0.1;'/>`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mode === "color" || mode == "frame") {
|
||||||
|
let w = lto_label_width/8;
|
||||||
|
let h = lto_label_height - height;
|
||||||
|
for (var i = 0; i < 7; i++) {
|
||||||
|
let textx = w/2 + pagex + i*w;
|
||||||
|
let texty = pagey;
|
||||||
|
|
||||||
|
let fill = "none";
|
||||||
|
if (mode === "color" && (i < 6)) {
|
||||||
|
let letter = label.charAt(i);
|
||||||
|
if (letter >= '0' && letter <= '9') {
|
||||||
|
fill = colors[parseInt(letter, 10)];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
svg += `<rect x='${textx}' y='${texty}' width='${w}' height='${h}' style='stroke:black;stroke-width:0.2;fill:${fill};'/>`;
|
||||||
|
|
||||||
|
if (i == 6) {
|
||||||
|
textx += 3;
|
||||||
|
texty += 3.7;
|
||||||
|
svg += `<text x='${textx}' y='${texty}' style='font-weight:bold;font-size:3px;font-family:sans-serif;'>${label_type}</text>`;
|
||||||
|
} else {
|
||||||
|
let letter = label.charAt(i);
|
||||||
|
textx += 3.5;
|
||||||
|
texty += 4;
|
||||||
|
svg += `<text x='${textx}' y='${texty}' style='font-weight:bold;font-size:4px;font-family:sans-serif;'>${letter}</text>`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let raw_label = `*${label}${label_type}*`;
|
||||||
|
|
||||||
|
for (var i = 0; i < raw_label.length; i++) {
|
||||||
|
let letter = raw_label.charAt(i);
|
||||||
|
|
||||||
|
let code = code39_codes[letter];
|
||||||
|
if (code === undefined) {
|
||||||
|
throw `unable to encode letter '${letter}' with code39`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mode === "simple") {
|
||||||
|
let textx = xpos + code_width/2;
|
||||||
|
let texty = pagey + 4;
|
||||||
|
|
||||||
|
if (i > 0 && (i+1) < raw_label.length) {
|
||||||
|
svg += `<text x='${textx}' y='${texty}' style='font-weight:bold;font-size:4px;font-family:sans-serif;'>${letter}</text>`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (let c of code) {
|
||||||
|
|
||||||
|
if (c === 's') {
|
||||||
|
xpos += small;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (c === 'S') {
|
||||||
|
xpos += wide;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let w = c === 'B' ? wide : small;
|
||||||
|
let ypos = pagey + lto_label_height - height;
|
||||||
|
|
||||||
|
svg += `<rect x='${xpos}' y='${ypos}' width='${w}' height='${height}' style='fill:black'/>`;
|
||||||
|
xpos = xpos + w;
|
||||||
|
}
|
||||||
|
xpos += small;
|
||||||
|
}
|
||||||
|
|
||||||
|
return svg;
|
||||||
|
}
|
||||||
|
|
||||||
|
function html_page_header() {
|
||||||
|
let html = "<html5>";
|
||||||
|
|
||||||
|
html += "<style>";
|
||||||
|
|
||||||
|
/* no page margins */
|
||||||
|
html += "@page{margin-left: 0px;margin-right: 0px;margin-top: 0px;margin-bottom: 0px;}";
|
||||||
|
/* to hide things on printed page */
|
||||||
|
html += "@media print { .unprintable { visibility: hidden; } }";
|
||||||
|
|
||||||
|
html += "</style>";
|
||||||
|
|
||||||
|
//html += "<body onload='window.print()'>";
|
||||||
|
html += "<body style='background-color: white;'>";
|
||||||
|
|
||||||
|
return html;
|
||||||
|
}
|
||||||
|
|
||||||
|
function svg_page_header(page_width, page_height) {
|
||||||
|
let svg = "<svg version='1.1' xmlns='http://www.w3.org/2000/svg'";
|
||||||
|
svg += ` width='${page_width}mm' height='${page_height}mm' viewBox='0 0 ${page_width} ${page_height}'>`;
|
||||||
|
|
||||||
|
return svg;
|
||||||
|
}
|
||||||
|
|
||||||
|
function printBarcodePage() {
|
||||||
|
let frame = document.getElementById("print_frame");
|
||||||
|
|
||||||
|
let window = frame.contentWindow;
|
||||||
|
window.print();
|
||||||
|
}
|
||||||
|
|
||||||
|
function generate_barcode_page(target_id, page_layout, label_list, calibration) {
|
||||||
|
|
||||||
|
let svg = svg_page_header(page_layout.page_width, page_layout.page_height);
|
||||||
|
|
||||||
|
let c = calibration;
|
||||||
|
|
||||||
|
console.log(calibration);
|
||||||
|
|
||||||
|
svg += "<g id='barcode_page'";
|
||||||
|
if (c !== undefined) {
|
||||||
|
svg += ` transform='scale(${c.scalex}, ${c.scaley}),translate(${c.offsetx}, ${c.offsety})'`;
|
||||||
|
}
|
||||||
|
svg += '>';
|
||||||
|
|
||||||
|
foreach_label(page_layout, function(column, row, count, xpos, ypos) {
|
||||||
|
|
||||||
|
if (count >= label_list.length) { return; }
|
||||||
|
|
||||||
|
let item = label_list[count];
|
||||||
|
|
||||||
|
svg += svg_label(item.mode, item.label, item.tape_type, xpos, ypos, page_layout.label_borders);
|
||||||
|
});
|
||||||
|
|
||||||
|
svg += "</g>";
|
||||||
|
svg += "</svg>";
|
||||||
|
|
||||||
|
let html = html_page_header();
|
||||||
|
html += svg;
|
||||||
|
html += "</body>";
|
||||||
|
html += "</html>";
|
||||||
|
|
||||||
|
let frame = document.getElementById(target_id);
|
||||||
|
|
||||||
|
setupPrintFrame(frame, page_layout.page_width, page_layout.page_height);
|
||||||
|
|
||||||
|
let fwindow = frame.contentWindow;
|
||||||
|
|
||||||
|
fwindow.document.open();
|
||||||
|
fwindow.document.write(html);
|
||||||
|
fwindow.document.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
function setupPrintFrame(frame, page_width, page_height) {
|
||||||
|
let dpi = 98;
|
||||||
|
|
||||||
|
let dpr = window.devicePixelRatio;
|
||||||
|
if (dpr !== undefined) {
|
||||||
|
dpi = dpi*dpr;
|
||||||
|
}
|
||||||
|
|
||||||
|
let ppmm = dpi/25.4;
|
||||||
|
|
||||||
|
frame.width = page_width*ppmm;
|
||||||
|
frame.height = page_height*ppmm;
|
||||||
|
}
|
||||||
|
|
||||||
|
function generate_calibration_page(target_id, page_layout, calibration) {
|
||||||
|
|
||||||
|
let frame = document.getElementById(target_id);
|
||||||
|
|
||||||
|
setupPrintFrame(frame, page_layout.page_width, page_layout.page_height);
|
||||||
|
|
||||||
|
let svg = svg_page_header( page_layout.page_width, page_layout.page_height);
|
||||||
|
|
||||||
|
svg += "<defs>";
|
||||||
|
svg += "<marker id='endarrow' markerWidth='10' markerHeight='7' ";
|
||||||
|
svg += "refX='10' refY='3.5' orient='auto'><polygon points='0 0, 10 3.5, 0 7' />";
|
||||||
|
svg += "</marker>";
|
||||||
|
|
||||||
|
svg += "<marker id='startarrow' markerWidth='10' markerHeight='7' ";
|
||||||
|
svg += "refX='0' refY='3.5' orient='auto'><polygon points='10 0, 10 7, 0 3.5' />";
|
||||||
|
svg += "</marker>";
|
||||||
|
svg += "</defs>";
|
||||||
|
|
||||||
|
svg += "<rect x='50' y='50' width='100' height='100' style='fill:none;stroke-width:0.05;stroke:rgb(0,0,0)'/>";
|
||||||
|
|
||||||
|
let text_style = "style='font-weight:bold;font-size:4;font-family:sans-serif;'";
|
||||||
|
|
||||||
|
svg += `<text x='10' y='99' ${text_style}>Sx = 50mm</text>`;
|
||||||
|
svg += "<line x1='0' y1='100' x2='50' y2='100' stroke='#000' marker-end='url(#endarrow)' stroke-width='.25'/>";
|
||||||
|
|
||||||
|
svg += `<text x='60' y='99' ${text_style}>Dx = 100mm</text>`;
|
||||||
|
svg += "<line x1='50' y1='100' x2='150' y2='100' stroke='#000' marker-start='url(#startarrow)' marker-end='url(#endarrow)' stroke-width='.25'/>";
|
||||||
|
|
||||||
|
svg += `<text x='142' y='10' ${text_style} writing-mode='tb'>Sy = 50mm</text>`;
|
||||||
|
svg += "<line x1='140' y1='0' x2='140' y2='50' stroke='#000' marker-end='url(#endarrow)' stroke-width='.25'/>";
|
||||||
|
|
||||||
|
svg += `<text x='142' y='60' ${text_style} writing-mode='tb'>Dy = 100mm</text>`;
|
||||||
|
svg += "<line x1='140' y1='50' x2='140' y2='150' stroke='#000' marker-start='url(#startarrow)' marker-end='url(#endarrow)' stroke-width='.25'/>";
|
||||||
|
|
||||||
|
let c = calibration;
|
||||||
|
if (c !== undefined) {
|
||||||
|
svg += `<rect x='50' y='50' width='100' height='100' style='fill:none;stroke-width:0.05;stroke:rgb(255,0,0)' `;
|
||||||
|
svg += `transform='scale(${c.scalex}, ${c.scaley}),translate(${c.offsetx}, ${c.offsety})'/>`;
|
||||||
|
}
|
||||||
|
|
||||||
|
svg += "</svg>";
|
||||||
|
|
||||||
|
let html = html_page_header();
|
||||||
|
html += svg;
|
||||||
|
html += "</body>";
|
||||||
|
html += "</html>";
|
||||||
|
|
||||||
|
let fwindow = frame.contentWindow;
|
||||||
|
|
||||||
|
fwindow.document.open();
|
||||||
|
fwindow.document.write(html);
|
||||||
|
fwindow.document.close();
|
||||||
|
}
|
51
docs/lto-barcode/index.html
Normal file
51
docs/lto-barcode/index.html
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no">
|
||||||
|
<title>Proxmox LTO Barcode Label Generator</title>
|
||||||
|
<link rel="stylesheet" type="text/css" href="extjs/theme-crisp/resources/theme-crisp-all.css">
|
||||||
|
<style>
|
||||||
|
/* fix action column icons */
|
||||||
|
.x-action-col-icon {
|
||||||
|
font-size: 13px;
|
||||||
|
height: 13px;
|
||||||
|
}
|
||||||
|
.x-grid-cell-inner-action-col {
|
||||||
|
padding: 6px 10px 5px;
|
||||||
|
}
|
||||||
|
.x-action-col-icon:before {
|
||||||
|
color: #555;
|
||||||
|
}
|
||||||
|
.x-action-col-icon {
|
||||||
|
color: #21BF4B;
|
||||||
|
}
|
||||||
|
.x-action-col-icon {
|
||||||
|
margin: 0 1px;
|
||||||
|
font-size: 14px;
|
||||||
|
}
|
||||||
|
.x-action-col-icon:before, .x-action-col-icon:after {
|
||||||
|
font-size: 14px;
|
||||||
|
}
|
||||||
|
.x-action-col-icon:hover:before, .x-action-col-icon:hover:after {
|
||||||
|
text-shadow: 1px 1px 1px #AAA;
|
||||||
|
font-weight: 800;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
<link rel="stylesheet" type="text/css" href="font-awesome/css/font-awesome.css"/>
|
||||||
|
<script type="text/javascript" src="extjs/ext-all.js"></script>
|
||||||
|
|
||||||
|
<script type="text/javascript" src="code39.js"></script>
|
||||||
|
<script type="text/javascript" src="prefix-field.js"></script>
|
||||||
|
<script type="text/javascript" src="label-style.js"></script>
|
||||||
|
<script type="text/javascript" src="tape-type.js"></script>
|
||||||
|
<script type="text/javascript" src="paper-size.js"></script>
|
||||||
|
<script type="text/javascript" src="page-layout.js"></script>
|
||||||
|
<script type="text/javascript" src="page-calibration.js"></script>
|
||||||
|
<script type="text/javascript" src="label-list.js"></script>
|
||||||
|
<script type="text/javascript" src="label-setup.js"></script>
|
||||||
|
<script type="text/javascript" src="lto-barcode.js"></script>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
</body>
|
||||||
|
</html>
|
140
docs/lto-barcode/label-list.js
Normal file
140
docs/lto-barcode/label-list.js
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
Ext.define('LabelList', {
|
||||||
|
extend: 'Ext.grid.Panel',
|
||||||
|
alias: 'widget.labelList',
|
||||||
|
|
||||||
|
plugins: {
|
||||||
|
ptype: 'cellediting',
|
||||||
|
clicksToEdit: 1
|
||||||
|
},
|
||||||
|
|
||||||
|
selModel: 'cellmodel',
|
||||||
|
|
||||||
|
store: {
|
||||||
|
field: [
|
||||||
|
'prefix',
|
||||||
|
'tape_type',
|
||||||
|
{
|
||||||
|
type: 'integer',
|
||||||
|
name: 'start',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'integer',
|
||||||
|
name: 'end',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
data: [],
|
||||||
|
},
|
||||||
|
|
||||||
|
listeners: {
|
||||||
|
validateedit: function(editor, context) {
|
||||||
|
console.log(context.field);
|
||||||
|
console.log(context.value);
|
||||||
|
context.record.set(context.field, context.value);
|
||||||
|
context.record.commit();
|
||||||
|
return true;
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
columns: [
|
||||||
|
{
|
||||||
|
text: 'Prefix',
|
||||||
|
dataIndex: 'prefix',
|
||||||
|
flex: 1,
|
||||||
|
editor: {
|
||||||
|
xtype: 'prefixfield',
|
||||||
|
allowBlank: false,
|
||||||
|
},
|
||||||
|
renderer: function (value, metaData, record) {
|
||||||
|
console.log(record);
|
||||||
|
if (record.data.mode === 'placeholder') {
|
||||||
|
return "-";
|
||||||
|
}
|
||||||
|
return value;
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: 'Type',
|
||||||
|
dataIndex: 'tape_type',
|
||||||
|
flex: 1,
|
||||||
|
editor: {
|
||||||
|
xtype: 'ltoTapeType',
|
||||||
|
allowBlank: false,
|
||||||
|
},
|
||||||
|
renderer: function (value, metaData, record) {
|
||||||
|
console.log(record);
|
||||||
|
if (record.data.mode === 'placeholder') {
|
||||||
|
return "-";
|
||||||
|
}
|
||||||
|
return value;
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: 'Mode',
|
||||||
|
dataIndex: 'mode',
|
||||||
|
flex: 1,
|
||||||
|
editor: {
|
||||||
|
xtype: 'ltoLabelStyle',
|
||||||
|
allowBlank: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: 'Start',
|
||||||
|
dataIndex: 'start',
|
||||||
|
flex: 1,
|
||||||
|
editor: {
|
||||||
|
xtype: 'numberfield',
|
||||||
|
allowBlank: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: 'End',
|
||||||
|
dataIndex: 'end',
|
||||||
|
flex: 1,
|
||||||
|
editor: {
|
||||||
|
xtype: 'numberfield',
|
||||||
|
},
|
||||||
|
renderer: function(value) {
|
||||||
|
if (value === null || value === '' || value === undefined) {
|
||||||
|
return "Fill";
|
||||||
|
}
|
||||||
|
return value;
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'actioncolumn',
|
||||||
|
width: 75,
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
tooltip: 'Move Up',
|
||||||
|
iconCls: 'fa fa-arrow-up',
|
||||||
|
handler: function(grid, rowIndex) {
|
||||||
|
if (rowIndex < 1) { return; }
|
||||||
|
let store = grid.getStore();
|
||||||
|
let record = store.getAt(rowIndex);
|
||||||
|
store.removeAt(rowIndex);
|
||||||
|
store.insert(rowIndex - 1, record);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
tooltip: 'Move Down',
|
||||||
|
iconCls: 'fa fa-arrow-down',
|
||||||
|
handler: function(grid, rowIndex) {
|
||||||
|
let store = grid.getStore();
|
||||||
|
if (rowIndex >= store.getCount()) { return; }
|
||||||
|
let record = store.getAt(rowIndex);
|
||||||
|
store.removeAt(rowIndex);
|
||||||
|
store.insert(rowIndex + 1, record);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
tooltip: 'Delete',
|
||||||
|
iconCls: 'fa fa-scissors',
|
||||||
|
//iconCls: 'fa critical fa-trash-o',
|
||||||
|
handler: function(grid, rowIndex) {
|
||||||
|
grid.getStore().removeAt(rowIndex);
|
||||||
|
},
|
||||||
|
}
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
107
docs/lto-barcode/label-setup.js
Normal file
107
docs/lto-barcode/label-setup.js
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
Ext.define('LabelSetupPanel', {
|
||||||
|
extend: 'Ext.panel.Panel',
|
||||||
|
alias: 'widget.labelSetupPanel',
|
||||||
|
|
||||||
|
layout: {
|
||||||
|
type: 'hbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
|
||||||
|
getValues: function() {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
let values = {};
|
||||||
|
|
||||||
|
Ext.Array.each(me.query('[isFormField]'), function(field) {
|
||||||
|
let data = field.getSubmitData();
|
||||||
|
Ext.Object.each(data, function(name, val) {
|
||||||
|
let parsed = parseInt(val, 10);
|
||||||
|
values[name] = isNaN(parsed) ? val : parsed;
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
return values;
|
||||||
|
},
|
||||||
|
|
||||||
|
controller: {
|
||||||
|
xclass: 'Ext.app.ViewController',
|
||||||
|
|
||||||
|
init: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
let list = view.down("labelList");
|
||||||
|
let store = list.getStore();
|
||||||
|
store.on('datachanged', function(store) {
|
||||||
|
view.fireEvent("listchanged", store);
|
||||||
|
});
|
||||||
|
store.on('update', function(store) {
|
||||||
|
view.fireEvent("listchanged", store);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
onAdd: function() {
|
||||||
|
let list = this.lookupReference('label_list');
|
||||||
|
let view = this.getView();
|
||||||
|
let params = view.getValues();
|
||||||
|
list.getStore().add(params);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
border: false,
|
||||||
|
layout: {
|
||||||
|
type: 'vbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'prefixfield',
|
||||||
|
name: 'prefix',
|
||||||
|
value: 'TEST',
|
||||||
|
fieldLabel: 'Prefix',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'ltoTapeType',
|
||||||
|
name: 'tape_type',
|
||||||
|
fieldLabel: 'Type',
|
||||||
|
value: 'L8',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'ltoLabelStyle',
|
||||||
|
name: 'mode',
|
||||||
|
fieldLabel: 'Mode',
|
||||||
|
value: 'color',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
name: 'start',
|
||||||
|
fieldLabel: 'Start',
|
||||||
|
minValue: 0,
|
||||||
|
allowBlank: false,
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
name: 'end',
|
||||||
|
fieldLabel: 'End',
|
||||||
|
minValue: 0,
|
||||||
|
emptyText: 'Fill',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'button',
|
||||||
|
text: 'Add',
|
||||||
|
handler: 'onAdd',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
margin: "0 0 0 10",
|
||||||
|
xtype: 'labelList',
|
||||||
|
reference: 'label_list',
|
||||||
|
flex: 1,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
20
docs/lto-barcode/label-style.js
Normal file
20
docs/lto-barcode/label-style.js
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
Ext.define('LtoLabelStyle', {
|
||||||
|
extend: 'Ext.form.field.ComboBox',
|
||||||
|
alias: 'widget.ltoLabelStyle',
|
||||||
|
|
||||||
|
editable: false,
|
||||||
|
|
||||||
|
displayField: 'text',
|
||||||
|
valueField: 'value',
|
||||||
|
queryMode: 'local',
|
||||||
|
|
||||||
|
store: {
|
||||||
|
field: ['value', 'text'],
|
||||||
|
data: [
|
||||||
|
{ value: 'simple', text: "Simple" },
|
||||||
|
{ value: 'color', text: 'Color (frames with color)' },
|
||||||
|
{ value: 'frame', text: 'Frame (no color)' },
|
||||||
|
{ value: 'placeholder', text: 'Placeholder (empty)' },
|
||||||
|
],
|
||||||
|
},
|
||||||
|
});
|
214
docs/lto-barcode/lto-barcode.js
Normal file
214
docs/lto-barcode/lto-barcode.js
Normal file
@ -0,0 +1,214 @@
|
|||||||
|
// FIXME: HACK! Makes scrolling in number spinner work again. fixed in ExtJS >= 6.1
|
||||||
|
if (Ext.isFirefox) {
|
||||||
|
Ext.$eventNameMap.DOMMouseScroll = 'DOMMouseScroll';
|
||||||
|
}
|
||||||
|
|
||||||
|
function draw_labels(target_id, label_list, page_layout, calibration) {
|
||||||
|
|
||||||
|
let max_labels = compute_max_labels(page_layout);
|
||||||
|
|
||||||
|
let count_fixed = 0;
|
||||||
|
let count_fill = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < label_list.length; i++) {
|
||||||
|
let item = label_list[i];
|
||||||
|
if (item.end === null || item.end === '' || item.end === undefined) {
|
||||||
|
count_fill += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (item.end <= item.start) {
|
||||||
|
count_fixed += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
count_fixed += (item.end - item.start) + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
let rest = max_labels - count_fixed;
|
||||||
|
let fill_size = 1;
|
||||||
|
if (rest >= count_fill) {
|
||||||
|
fill_size = Math.floor(rest/count_fill);
|
||||||
|
}
|
||||||
|
|
||||||
|
let list = [];
|
||||||
|
|
||||||
|
let count_fill_2 = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < label_list.length; i++) {
|
||||||
|
let item = label_list[i];
|
||||||
|
let count;
|
||||||
|
if (item.end === null || item.end === '' || item.end === undefined) {
|
||||||
|
count_fill_2 += 1;
|
||||||
|
if (count_fill_2 === count_fill) {
|
||||||
|
count = rest;
|
||||||
|
} else {
|
||||||
|
count = fill_size;
|
||||||
|
}
|
||||||
|
rest -= count;
|
||||||
|
} else {
|
||||||
|
if (item.end <= item.start) {
|
||||||
|
count = 1;
|
||||||
|
} else {
|
||||||
|
count = (item.end - item.start) + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (j = 0; j < count; j++) {
|
||||||
|
|
||||||
|
let id = item.start + j;
|
||||||
|
|
||||||
|
if (item.prefix.length == 6) {
|
||||||
|
|
||||||
|
list.push({
|
||||||
|
label: item.prefix,
|
||||||
|
tape_type: item.tape_type,
|
||||||
|
mode: item.mode,
|
||||||
|
id: id,
|
||||||
|
});
|
||||||
|
rest += count - j - 1;
|
||||||
|
break;
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
let pad_len = 6-item.prefix.length;
|
||||||
|
let label = item.prefix + id.toString().padStart(pad_len, 0);
|
||||||
|
|
||||||
|
if (label.length != 6) {
|
||||||
|
rest += count - j;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
list.push({
|
||||||
|
label: label,
|
||||||
|
tape_type: item.tape_type,
|
||||||
|
mode: item.mode,
|
||||||
|
id: id,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
generate_barcode_page(target_id, page_layout, list, calibration);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ext.define('MainView', {
|
||||||
|
extend: 'Ext.container.Viewport',
|
||||||
|
alias: 'widget.mainview',
|
||||||
|
|
||||||
|
layout: {
|
||||||
|
type: 'vbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
width: 800,
|
||||||
|
|
||||||
|
controller: {
|
||||||
|
xclass: 'Ext.app.ViewController',
|
||||||
|
|
||||||
|
update_barcode_preview: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
let list_view = view.down("labelList");
|
||||||
|
|
||||||
|
let store = list_view.getStore();
|
||||||
|
let label_list = [];
|
||||||
|
store.each((record) => {
|
||||||
|
label_list.push(record.data);
|
||||||
|
});
|
||||||
|
|
||||||
|
let page_layout_view = view.down("pageLayoutPanel");
|
||||||
|
let page_layout = page_layout_view.getValues();
|
||||||
|
|
||||||
|
let calibration_view = view.down("pageCalibration");
|
||||||
|
let page_calibration = calibration_view.getValues();
|
||||||
|
|
||||||
|
draw_labels("print_frame", label_list, page_layout, page_calibration);
|
||||||
|
},
|
||||||
|
|
||||||
|
update_calibration_preview: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
let page_layout_view = view.down("pageLayoutPanel");
|
||||||
|
let page_layout = page_layout_view.getValues();
|
||||||
|
|
||||||
|
let calibration_view = view.down("pageCalibration");
|
||||||
|
let page_calibration = calibration_view.getValues();
|
||||||
|
console.log(page_calibration);
|
||||||
|
generate_calibration_page('print_frame', page_layout, page_calibration);
|
||||||
|
},
|
||||||
|
|
||||||
|
control: {
|
||||||
|
labelSetupPanel: {
|
||||||
|
listchanged: function(store) {
|
||||||
|
this.update_barcode_preview();
|
||||||
|
},
|
||||||
|
activate: function() {
|
||||||
|
this.update_barcode_preview();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
pageLayoutPanel: {
|
||||||
|
pagechanged: function(layout) {
|
||||||
|
this.update_barcode_preview();
|
||||||
|
},
|
||||||
|
activate: function() {
|
||||||
|
this.update_barcode_preview();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
pageCalibration: {
|
||||||
|
calibrationchanged: function() {
|
||||||
|
this.update_calibration_preview();
|
||||||
|
},
|
||||||
|
activate: function() {
|
||||||
|
this.update_calibration_preview();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'tabpanel',
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'labelSetupPanel',
|
||||||
|
title: 'Proxmox LTO Barcode Label Generator',
|
||||||
|
bodyPadding: 10,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'pageLayoutPanel',
|
||||||
|
title: 'Page Layout',
|
||||||
|
bodyPadding: 10,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'pageCalibration',
|
||||||
|
title: 'Printer Calibration',
|
||||||
|
bodyPadding: 10,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'panel',
|
||||||
|
layout: "center",
|
||||||
|
title: 'Print Preview',
|
||||||
|
bodyStyle: "background-color: grey;",
|
||||||
|
bodyPadding: 10,
|
||||||
|
html: '<center><iframe id="print_frame" frameBorder="0"></iframe></center>',
|
||||||
|
border: false,
|
||||||
|
flex: 1,
|
||||||
|
scrollable: true,
|
||||||
|
tools:[{
|
||||||
|
type: 'print',
|
||||||
|
tooltip: 'Open Print Dialog',
|
||||||
|
handler: function(event, toolEl, panelHeader) {
|
||||||
|
printBarcodePage();
|
||||||
|
}
|
||||||
|
}],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
Ext.onReady(function() {
|
||||||
|
|
||||||
|
Ext.create('MainView', {
|
||||||
|
renderTo: Ext.getBody(),
|
||||||
|
});
|
||||||
|
});
|
142
docs/lto-barcode/page-calibration.js
Normal file
142
docs/lto-barcode/page-calibration.js
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
Ext.define('PageCalibration', {
|
||||||
|
extend: 'Ext.panel.Panel',
|
||||||
|
alias: 'widget.pageCalibration',
|
||||||
|
|
||||||
|
layout: {
|
||||||
|
type: 'hbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
|
||||||
|
getValues: function() {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
let values = {};
|
||||||
|
|
||||||
|
Ext.Array.each(me.query('[isFormField]'), function(field) {
|
||||||
|
if (field.isValid()) {
|
||||||
|
let data = field.getSubmitData();
|
||||||
|
Ext.Object.each(data, function(name, val) {
|
||||||
|
let parsed = parseFloat(val, 10);
|
||||||
|
values[name] = isNaN(parsed) ? val : parsed;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if (values.d_x === undefined) { return; }
|
||||||
|
if (values.d_y === undefined) { return; }
|
||||||
|
if (values.s_x === undefined) { return; }
|
||||||
|
if (values.s_y === undefined) { return; }
|
||||||
|
|
||||||
|
scalex = 100/values.d_x;
|
||||||
|
scaley = 100/values.d_y;
|
||||||
|
|
||||||
|
let offsetx = ((50*scalex) - values.s_x)/scalex;
|
||||||
|
let offsety = ((50*scaley) - values.s_y)/scaley;
|
||||||
|
|
||||||
|
return {
|
||||||
|
scalex: scalex,
|
||||||
|
scaley: scaley,
|
||||||
|
offsetx: offsetx,
|
||||||
|
offsety: offsety,
|
||||||
|
};
|
||||||
|
},
|
||||||
|
|
||||||
|
controller: {
|
||||||
|
xclass: 'Ext.app.ViewController',
|
||||||
|
|
||||||
|
control: {
|
||||||
|
'field': {
|
||||||
|
change: function() {
|
||||||
|
let view = this.getView();
|
||||||
|
let param = view.getValues();
|
||||||
|
view.fireEvent("calibrationchanged", param);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
border: false,
|
||||||
|
layout: {
|
||||||
|
type: 'vbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'displayfield',
|
||||||
|
value: 'a4',
|
||||||
|
fieldLabel: 'Start Offset Sx (mm)',
|
||||||
|
labelWidth: 150,
|
||||||
|
value: 50,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'displayfield',
|
||||||
|
value: 'a4',
|
||||||
|
fieldLabel: 'Length Dx (mm)',
|
||||||
|
labelWidth: 150,
|
||||||
|
value: 100,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'displayfield',
|
||||||
|
value: 'a4',
|
||||||
|
fieldLabel: 'Start Offset Sy (mm)',
|
||||||
|
labelWidth: 150,
|
||||||
|
value: 50,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'displayfield',
|
||||||
|
value: 'a4',
|
||||||
|
fieldLabel: 'Length Dy (mm)',
|
||||||
|
labelWidth: 150,
|
||||||
|
value: 100,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
border: false,
|
||||||
|
margin: '0 0 0 20',
|
||||||
|
layout: {
|
||||||
|
type: 'vbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
value: 'a4',
|
||||||
|
name: 's_x',
|
||||||
|
fieldLabel: 'Meassured Start Offset Sx (mm)',
|
||||||
|
allowBlank: false,
|
||||||
|
labelWidth: 200,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
value: 'a4',
|
||||||
|
name: 'd_x',
|
||||||
|
fieldLabel: 'Meassured Length Dx (mm)',
|
||||||
|
allowBlank: false,
|
||||||
|
labelWidth: 200,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
value: 'a4',
|
||||||
|
name: 's_y',
|
||||||
|
fieldLabel: 'Meassured Start Offset Sy (mm)',
|
||||||
|
allowBlank: false,
|
||||||
|
labelWidth: 200,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
value: 'a4',
|
||||||
|
name: 'd_y',
|
||||||
|
fieldLabel: 'Meassured Length Dy (mm)',
|
||||||
|
allowBlank: false,
|
||||||
|
labelWidth: 200,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
})
|
167
docs/lto-barcode/page-layout.js
Normal file
167
docs/lto-barcode/page-layout.js
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
Ext.define('PageLayoutPanel', {
|
||||||
|
extend: 'Ext.panel.Panel',
|
||||||
|
alias: 'widget.pageLayoutPanel',
|
||||||
|
|
||||||
|
layout: {
|
||||||
|
type: 'hbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
|
||||||
|
getValues: function() {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
let values = {};
|
||||||
|
|
||||||
|
Ext.Array.each(me.query('[isFormField]'), function(field) {
|
||||||
|
if (field.isValid()) {
|
||||||
|
let data = field.getSubmitData();
|
||||||
|
Ext.Object.each(data, function(name, val) {
|
||||||
|
values[name] = val;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let paper_size = values.paper_size || 'a4';
|
||||||
|
|
||||||
|
let param = Ext.apply({}, paper_sizes[paper_size]);
|
||||||
|
if (param === undefined) {
|
||||||
|
throw `unknown paper size ${paper_size}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
param.paper_size = paper_size;
|
||||||
|
|
||||||
|
Ext.Object.each(values, function(name, val) {
|
||||||
|
let parsed = parseFloat(val, 10);
|
||||||
|
param[name] = isNaN(parsed) ? val : parsed;
|
||||||
|
});
|
||||||
|
|
||||||
|
return param;
|
||||||
|
},
|
||||||
|
|
||||||
|
controller: {
|
||||||
|
xclass: 'Ext.app.ViewController',
|
||||||
|
|
||||||
|
control: {
|
||||||
|
'paperSize': {
|
||||||
|
change: function(field, paper_size) {
|
||||||
|
let view = this.getView();
|
||||||
|
let defaults = paper_sizes[paper_size];
|
||||||
|
|
||||||
|
let names = [
|
||||||
|
'label_width',
|
||||||
|
'label_height',
|
||||||
|
'margin_left',
|
||||||
|
'margin_top',
|
||||||
|
'column_spacing',
|
||||||
|
'row_spacing',
|
||||||
|
];
|
||||||
|
for (i = 0; i < names.length; i++) {
|
||||||
|
let name = names[i];
|
||||||
|
let f = view.down(`field[name=${name}]`);
|
||||||
|
let v = defaults[name];
|
||||||
|
if (v != undefined) {
|
||||||
|
f.setValue(v);
|
||||||
|
f.setDisabled(defaults.fixed);
|
||||||
|
} else {
|
||||||
|
f.setDisabled(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
'field': {
|
||||||
|
change: function() {
|
||||||
|
let view = this.getView();
|
||||||
|
let param = view.getValues();
|
||||||
|
view.fireEvent("pagechanged", param);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
border: false,
|
||||||
|
layout: {
|
||||||
|
type: 'vbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'paperSize',
|
||||||
|
name: 'paper_size',
|
||||||
|
value: 'a4',
|
||||||
|
fieldLabel: 'Paper Size',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
name: 'label_width',
|
||||||
|
fieldLabel: 'Label width',
|
||||||
|
minValue: 70,
|
||||||
|
allowBlank: false,
|
||||||
|
value: 70,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
name: 'label_height',
|
||||||
|
fieldLabel: 'Label height',
|
||||||
|
minValue: 17,
|
||||||
|
allowBlank: false,
|
||||||
|
value: 17,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'checkbox',
|
||||||
|
name: 'label_borders',
|
||||||
|
fieldLabel: 'Label borders',
|
||||||
|
value: true,
|
||||||
|
inputValue: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
border: false,
|
||||||
|
margin: '0 0 0 10',
|
||||||
|
layout: {
|
||||||
|
type: 'vbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
name: 'margin_left',
|
||||||
|
fieldLabel: 'Left margin',
|
||||||
|
minValue: 0,
|
||||||
|
allowBlank: false,
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
name: 'margin_top',
|
||||||
|
fieldLabel: 'Top margin',
|
||||||
|
minValue: 0,
|
||||||
|
allowBlank: false,
|
||||||
|
value: 4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
name: 'column_spacing',
|
||||||
|
fieldLabel: 'Column spacing',
|
||||||
|
minValue: 0,
|
||||||
|
allowBlank: false,
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
name: 'row_spacing',
|
||||||
|
fieldLabel: 'Row spacing',
|
||||||
|
minValue: 0,
|
||||||
|
allowBlank: false,
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
|
||||||
|
});
|
49
docs/lto-barcode/paper-size.js
Normal file
49
docs/lto-barcode/paper-size.js
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
let paper_sizes = {
|
||||||
|
a4: {
|
||||||
|
comment: 'A4 (plain)',
|
||||||
|
page_width: 210,
|
||||||
|
page_height: 297,
|
||||||
|
},
|
||||||
|
letter: {
|
||||||
|
comment: 'Letter (plain)',
|
||||||
|
page_width: 215.9,
|
||||||
|
page_height: 279.4,
|
||||||
|
},
|
||||||
|
avery3420: {
|
||||||
|
fixed: true,
|
||||||
|
comment: 'Avery Zweckform 3420',
|
||||||
|
page_width: 210,
|
||||||
|
page_height: 297,
|
||||||
|
label_width: 70,
|
||||||
|
label_height: 17,
|
||||||
|
margin_left: 0,
|
||||||
|
margin_top: 4,
|
||||||
|
column_spacing: 0,
|
||||||
|
row_spacing: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
function paper_size_combo_data() {
|
||||||
|
let data = [];
|
||||||
|
|
||||||
|
for (let [key, value] of Object.entries(paper_sizes)) {
|
||||||
|
data.push({ value: key, text: value.comment });
|
||||||
|
}
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ext.define('PaperSize', {
|
||||||
|
extend: 'Ext.form.field.ComboBox',
|
||||||
|
alias: 'widget.paperSize',
|
||||||
|
|
||||||
|
editable: false,
|
||||||
|
|
||||||
|
displayField: 'text',
|
||||||
|
valueField: 'value',
|
||||||
|
queryMode: 'local',
|
||||||
|
|
||||||
|
store: {
|
||||||
|
field: ['value', 'text'],
|
||||||
|
data: paper_size_combo_data(),
|
||||||
|
},
|
||||||
|
});
|
15
docs/lto-barcode/prefix-field.js
Normal file
15
docs/lto-barcode/prefix-field.js
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
Ext.define('PrefixField', {
|
||||||
|
extend: 'Ext.form.field.Text',
|
||||||
|
alias: 'widget.prefixfield',
|
||||||
|
|
||||||
|
maxLength: 6,
|
||||||
|
allowBlank: false,
|
||||||
|
|
||||||
|
maskRe: /([A-Za-z]+)$/,
|
||||||
|
|
||||||
|
listeners: {
|
||||||
|
change: function(field) {
|
||||||
|
field.setValue(field.getValue().toUpperCase());
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
23
docs/lto-barcode/tape-type.js
Normal file
23
docs/lto-barcode/tape-type.js
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
Ext.define('LtoTapeType', {
|
||||||
|
extend: 'Ext.form.field.ComboBox',
|
||||||
|
alias: 'widget.ltoTapeType',
|
||||||
|
|
||||||
|
editable: false,
|
||||||
|
|
||||||
|
displayField: 'text',
|
||||||
|
valueField: 'value',
|
||||||
|
queryMode: 'local',
|
||||||
|
|
||||||
|
store: {
|
||||||
|
field: ['value', 'text'],
|
||||||
|
data: [
|
||||||
|
{ value: 'L8', text: "LTO-8" },
|
||||||
|
{ value: 'L7', text: "LTO-7" },
|
||||||
|
{ value: 'L6', text: "LTO-6" },
|
||||||
|
{ value: 'L5', text: "LTO-5" },
|
||||||
|
{ value: 'L4', text: "LTO-4" },
|
||||||
|
{ value: 'L3', text: "LTO-3" },
|
||||||
|
{ value: 'CU', text: "Cleaning Unit" },
|
||||||
|
],
|
||||||
|
},
|
||||||
|
});
|
6
docs/pmtx/description.rst
Normal file
6
docs/pmtx/description.rst
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
Description
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
The ``pmtx`` command controls SCSI media changer devices (tape
|
||||||
|
autoloader).
|
||||||
|
|
28
docs/pmtx/man1.rst
Normal file
28
docs/pmtx/man1.rst
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
==========================
|
||||||
|
pmtx
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../epilog.rst
|
||||||
|
|
||||||
|
-------------------------------------------------------------
|
||||||
|
Control SCSI media changer devices (tape autoloaders)
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 1
|
||||||
|
|
||||||
|
|
||||||
|
Synopsis
|
||||||
|
==========
|
||||||
|
|
||||||
|
.. include:: synopsis.rst
|
||||||
|
|
||||||
|
Description
|
||||||
|
============
|
||||||
|
|
||||||
|
.. include:: description.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../pbs-copyright.rst
|
||||||
|
|
@ -5,7 +5,7 @@ proxmox-backup-client
|
|||||||
.. include:: ../epilog.rst
|
.. include:: ../epilog.rst
|
||||||
|
|
||||||
-------------------------------------------------------------
|
-------------------------------------------------------------
|
||||||
Command line toot for Backup and Restore
|
Command line tool for Backup and Restore
|
||||||
-------------------------------------------------------------
|
-------------------------------------------------------------
|
||||||
|
|
||||||
:Author: |AUTHOR|
|
:Author: |AUTHOR|
|
||||||
|
@ -449,11 +449,8 @@ Ext.onReady(function() {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
// ordering here and iterating backwards through days
|
// sort recent times first, backups array below is ordered now -> past
|
||||||
// ensures that everything is ordered
|
timesOnSingleDay.sort((a, b) => b - a);
|
||||||
timesOnSingleDay.sort(function(a, b) {
|
|
||||||
return a < b;
|
|
||||||
});
|
|
||||||
|
|
||||||
let backups = [];
|
let backups = [];
|
||||||
|
|
||||||
@ -485,16 +482,17 @@ Ext.onReady(function() {
|
|||||||
|
|
||||||
backups.forEach(function(backup) {
|
backups.forEach(function(backup) {
|
||||||
let mark = backup.mark;
|
let mark = backup.mark;
|
||||||
|
if (mark && mark === 'keep') {
|
||||||
let id = idFunc(backup);
|
let id = idFunc(backup);
|
||||||
|
|
||||||
if (finished || alreadyIncluded[id]) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mark) {
|
|
||||||
if (mark === 'keep') {
|
|
||||||
alreadyIncluded[id] = true;
|
alreadyIncluded[id] = true;
|
||||||
}
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
backups.forEach(function(backup) {
|
||||||
|
let mark = backup.mark;
|
||||||
|
let id = idFunc(backup);
|
||||||
|
|
||||||
|
if (finished || alreadyIncluded[id] || mark) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
732
docs/tape-backup.rst
Normal file
732
docs/tape-backup.rst
Normal file
@ -0,0 +1,732 @@
|
|||||||
|
Tape Backup
|
||||||
|
===========
|
||||||
|
|
||||||
|
Proxmox tape backup provides an easy way to store datastore content
|
||||||
|
onto magnetic tapes. This increases data safety because you get:
|
||||||
|
|
||||||
|
- an additional copy of the data
|
||||||
|
- to a different media type (tape)
|
||||||
|
- to an additional location (you can move tapes offsite)
|
||||||
|
|
||||||
|
In most restore jobs, only data from the last backup job is restored.
|
||||||
|
Restore requests further decline the older the data
|
||||||
|
gets. Considering this, tape backup may also help to reduce disk
|
||||||
|
usage, because you can safely remove data from disk once archived on
|
||||||
|
tape. This is especially true if you need to keep data for several
|
||||||
|
years.
|
||||||
|
|
||||||
|
Tape backups do not provide random access to the stored data. Instead,
|
||||||
|
you need to restore the data to disk before you can access it
|
||||||
|
again. Also, if you store your tapes offsite (using some kind of tape
|
||||||
|
vaulting service), you need to bring them onsite before you can do any
|
||||||
|
restore. So please consider that restores from tapes can take much
|
||||||
|
longer than restores from disk.
|
||||||
|
|
||||||
|
|
||||||
|
Tape Technology Primer
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
.. _Linear Tape Open: https://en.wikipedia.org/wiki/Linear_Tape-Open
|
||||||
|
|
||||||
|
As of 2021, the only broadly available tape technology standard is
|
||||||
|
`Linear Tape Open`_, and different vendors offers LTO Ultrium tape
|
||||||
|
drives, autoloaders and LTO tape cartridges.
|
||||||
|
|
||||||
|
There are a few vendors offering proprietary drives with
|
||||||
|
slight advantages in performance and capacity, but they have
|
||||||
|
significant disadvantages:
|
||||||
|
|
||||||
|
- proprietary (single vendor)
|
||||||
|
- a much higher purchase cost
|
||||||
|
|
||||||
|
So we currently do not test such drives.
|
||||||
|
|
||||||
|
In general, LTO tapes offer the following advantages:
|
||||||
|
|
||||||
|
- Durable (30 years)
|
||||||
|
- High Capacity (12 TB)
|
||||||
|
- Relatively low cost per TB
|
||||||
|
- Cold Media
|
||||||
|
- Movable (storable inside vault)
|
||||||
|
- Multiple vendors (for both media and drives)
|
||||||
|
- Build in AES-CGM Encryption engine
|
||||||
|
|
||||||
|
Please note that `Proxmox Backup Server` already stores compressed
|
||||||
|
data, so we do not need/use the tape compression feature.
|
||||||
|
|
||||||
|
|
||||||
|
Supported Hardware
|
||||||
|
------------------
|
||||||
|
|
||||||
|
Proxmox Backup Server supports `Linear Tape Open`_ genertion 4 (LTO4)
|
||||||
|
or later. In general, all SCSI2 tape drives supported by the Linux
|
||||||
|
kernel should work, but feature like hardware encryptions needs LTO4
|
||||||
|
or later.
|
||||||
|
|
||||||
|
Tape changer support is done using the Linux 'mtx' command line
|
||||||
|
tool. So any changer device supported by that tool should work.
|
||||||
|
|
||||||
|
|
||||||
|
Drive Performance
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Current LTO-8 tapes provide read/write speeds up to 360MB/s. This means,
|
||||||
|
that it still takes a minimum of 9 hours to completely write or
|
||||||
|
read a single tape (even at maximum speed).
|
||||||
|
|
||||||
|
The only way to speed up that data rate is to use more than one
|
||||||
|
drive. That way you can run several backup jobs in parallel, or run
|
||||||
|
restore jobs while the other dives are used for backups.
|
||||||
|
|
||||||
|
Also consider that you need to read data first from your datastore
|
||||||
|
(disk). But a single spinning disk is unable to deliver data at this
|
||||||
|
rate. We measured a maximum rate of about 60MB/s to 100MB/s in practice,
|
||||||
|
so it takes 33 hours to read 12TB to fill up an LTO-8 tape. If you want
|
||||||
|
to run your tape at full speed, please make sure that the source
|
||||||
|
datastore is able to deliver that performance (e.g, by using SSDs).
|
||||||
|
|
||||||
|
|
||||||
|
Terminology
|
||||||
|
-----------
|
||||||
|
|
||||||
|
:Tape Labels: are used to uniquely indentify a tape. You normally use
|
||||||
|
some sticky paper labels and apply them on the front of the
|
||||||
|
cartridge. We additionally store the label text magnetically on the
|
||||||
|
tape (first file on tape).
|
||||||
|
|
||||||
|
.. _Code 39: https://en.wikipedia.org/wiki/Code_39
|
||||||
|
|
||||||
|
.. _LTO Ultrium Cartridge Label Specification: https://www.ibm.com/support/pages/ibm-lto-ultrium-cartridge-label-specification
|
||||||
|
|
||||||
|
.. _LTO Barcode Generator: lto-barcode/index.html
|
||||||
|
|
||||||
|
:Barcodes: are a special form of tape labels, which are electronically
|
||||||
|
readable. Most LTO tape robots use an 8 character string encoded as
|
||||||
|
`Code 39`_, as definded in the `LTO Ultrium Cartridge Label
|
||||||
|
Specification`_.
|
||||||
|
|
||||||
|
You can either buy such barcode labels from your cartridge vendor,
|
||||||
|
or print them yourself. You can use our `LTO Barcode Generator`_ App
|
||||||
|
for that.
|
||||||
|
|
||||||
|
.. Note:: Physical labels and the associated adhesive shall have an
|
||||||
|
environmental performance to match or exceed the environmental
|
||||||
|
specifications of the cartridge to which it is applied.
|
||||||
|
|
||||||
|
:Media Pools: A media pool is a logical container for tapes. A backup
|
||||||
|
job targets one media pool, so a job only uses tapes from that
|
||||||
|
pool. The pool additionally defines how long a backup job can
|
||||||
|
append data to tapes (allocation policy) and how long you want to
|
||||||
|
keep the data (retention policy).
|
||||||
|
|
||||||
|
:Media Set: A group of continuously written tapes (all from the same
|
||||||
|
media pool).
|
||||||
|
|
||||||
|
:Tape drive: The decive used to read and write data to the tape. There
|
||||||
|
are standalone drives, but drives often ship within tape libraries.
|
||||||
|
|
||||||
|
:Tape changer: A device which can change the tapes inside a tape drive
|
||||||
|
(tape robot). They are usually part of a tape library.
|
||||||
|
|
||||||
|
.. _Tape Library: https://en.wikipedia.org/wiki/Tape_library
|
||||||
|
|
||||||
|
:`Tape library`_: A storage device that contains one or more tape drives,
|
||||||
|
a number of slots to hold tape cartridges, a barcode reader to
|
||||||
|
identify tape cartridges and an automated method for loading tapes
|
||||||
|
(a robot).
|
||||||
|
|
||||||
|
People als call this 'autoloader', 'tape robot' or 'tape jukebox'.
|
||||||
|
|
||||||
|
:Inventory: The inventory stores the list of known tapes (with
|
||||||
|
additional status information).
|
||||||
|
|
||||||
|
:Catalog: A media catalog stores information about the media content.
|
||||||
|
|
||||||
|
|
||||||
|
Tape Quickstart
|
||||||
|
---------------
|
||||||
|
|
||||||
|
1. Configure your tape hardware (drives and changers)
|
||||||
|
|
||||||
|
2. Configure one or more media pools
|
||||||
|
|
||||||
|
3. Label your tape cartridges.
|
||||||
|
|
||||||
|
4. Start your first tape backup job ...
|
||||||
|
|
||||||
|
|
||||||
|
Configuration
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Please note that you can configure anything using the graphical user
|
||||||
|
interface or the command line interface. Both methods results in the
|
||||||
|
same configuration.
|
||||||
|
|
||||||
|
|
||||||
|
Tape changers
|
||||||
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Tape changers (robots) are part of a `Tape Library`_. You can skip
|
||||||
|
this step if you are using a standalone drive.
|
||||||
|
|
||||||
|
Linux is able to auto detect those devices, and you can get a list
|
||||||
|
of available devices using::
|
||||||
|
|
||||||
|
# proxmox-tape changer scan
|
||||||
|
┌─────────────────────────────┬─────────┬──────────────┬────────┐
|
||||||
|
│ path │ vendor │ model │ serial │
|
||||||
|
╞═════════════════════════════╪═════════╪══════════════╪════════╡
|
||||||
|
│ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
||||||
|
└─────────────────────────────┴─────────┴──────────────┴────────┘
|
||||||
|
|
||||||
|
In order to use that device with Proxmox, you need to create a
|
||||||
|
configuration entry::
|
||||||
|
|
||||||
|
# proxmox-tape changer create sl3 --path /dev/tape/by-id/scsi-CC2C52
|
||||||
|
|
||||||
|
Where ``sl3`` is an arbitrary name you can choose.
|
||||||
|
|
||||||
|
.. Note:: Please use stable device path names from inside
|
||||||
|
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
|
||||||
|
different device after reboot, and that is not what you want.
|
||||||
|
|
||||||
|
You can show the final configuration with::
|
||||||
|
|
||||||
|
# proxmox-tape changer config sl3
|
||||||
|
┌──────┬─────────────────────────────┐
|
||||||
|
│ Name │ Value │
|
||||||
|
╞══════╪═════════════════════════════╡
|
||||||
|
│ name │ sl3 │
|
||||||
|
├──────┼─────────────────────────────┤
|
||||||
|
│ path │ /dev/tape/by-id/scsi-CC2C52 │
|
||||||
|
└──────┴─────────────────────────────┘
|
||||||
|
|
||||||
|
Or simply list all configured changer devices::
|
||||||
|
|
||||||
|
# proxmox-tape changer list
|
||||||
|
┌──────┬─────────────────────────────┬─────────┬──────────────┬────────────┐
|
||||||
|
│ name │ path │ vendor │ model │ serial │
|
||||||
|
╞══════╪═════════════════════════════╪═════════╪══════════════╪════════════╡
|
||||||
|
│ sl3 │ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
||||||
|
└──────┴─────────────────────────────┴─────────┴──────────────┴────────────┘
|
||||||
|
|
||||||
|
The Vendor, Model and Serial number are auto detected, but only shown
|
||||||
|
if the device is online.
|
||||||
|
|
||||||
|
To test your setup, please query the status of the changer device with::
|
||||||
|
|
||||||
|
# proxmox-tape changer status sl3
|
||||||
|
┌───────────────┬──────────┬────────────┬─────────────┐
|
||||||
|
│ entry-kind │ entry-id │ changer-id │ loaded-slot │
|
||||||
|
╞═══════════════╪══════════╪════════════╪═════════════╡
|
||||||
|
│ drive │ 0 │ vtape1 │ 1 │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 1 │ │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 2 │ vtape2 │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ ... │ ... │ │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 16 │ │ │
|
||||||
|
└───────────────┴──────────┴────────────┴─────────────┘
|
||||||
|
|
||||||
|
Tape libraries usually provide some special import/export slots (also
|
||||||
|
called "mail slots"). Tapes inside those slots are acessible from
|
||||||
|
outside, making it easy to add/remove tapes to/from the library. Those
|
||||||
|
tapes are considered to be "offline", so backup jobs will not use
|
||||||
|
them. Those special slots are auto-detected and marked as
|
||||||
|
``import-export`` slot in the status command.
|
||||||
|
|
||||||
|
It's worth noting that some of the smaller tape libraries don't have
|
||||||
|
such slots. While they have something called "Mail Slot", that slot
|
||||||
|
is just a way to grab the tape from the gripper. But they are unable
|
||||||
|
to hold media while the robot does other things. They also do not
|
||||||
|
expose that "Mail Slot" over the SCSI interface, so you wont see them in
|
||||||
|
the status output.
|
||||||
|
|
||||||
|
As a workaround, you can mark some of the normal slots as export
|
||||||
|
slot. The software treats those slots like real ``import-export``
|
||||||
|
slots, and the media inside those slots is considered to be 'offline'
|
||||||
|
(not available for backup)::
|
||||||
|
|
||||||
|
# proxmox-tape changer update sl3 --export-slots 15,16
|
||||||
|
|
||||||
|
After that, you can see those artificial ``import-export`` slots in
|
||||||
|
the status output::
|
||||||
|
|
||||||
|
# proxmox-tape changer status sl3
|
||||||
|
┌───────────────┬──────────┬────────────┬─────────────┐
|
||||||
|
│ entry-kind │ entry-id │ changer-id │ loaded-slot │
|
||||||
|
╞═══════════════╪══════════╪════════════╪═════════════╡
|
||||||
|
│ drive │ 0 │ vtape1 │ 1 │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ import-export │ 15 │ │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ import-export │ 16 │ │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 1 │ │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 2 │ vtape2 │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ ... │ ... │ │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 14 │ │ │
|
||||||
|
└───────────────┴──────────┴────────────┴─────────────┘
|
||||||
|
|
||||||
|
|
||||||
|
Tape drives
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
Linux is able to auto detect tape drives, and you can get a list
|
||||||
|
of available tape drives using::
|
||||||
|
|
||||||
|
# proxmox-tape drive scan
|
||||||
|
┌────────────────────────────────┬────────┬─────────────┬────────┐
|
||||||
|
│ path │ vendor │ model │ serial │
|
||||||
|
╞════════════════════════════════╪════════╪═════════════╪════════╡
|
||||||
|
│ /dev/tape/by-id/scsi-12345-nst │ IBM │ ULT3580-TD4 │ 12345 │
|
||||||
|
└────────────────────────────────┴────────┴─────────────┴────────┘
|
||||||
|
|
||||||
|
In order to use that drive with Proxmox, you need to create a
|
||||||
|
configuration entry::
|
||||||
|
|
||||||
|
# proxmox-tape drive create mydrive --path /dev/tape/by-id/scsi-12345-nst
|
||||||
|
|
||||||
|
.. Note:: Please use stable device path names from inside
|
||||||
|
``/dev/tape/by-id/``. Names like ``/dev/nst0`` may point to a
|
||||||
|
different device after reboot, and that is not what you want.
|
||||||
|
|
||||||
|
If you have a tape library, you also need to set the associated
|
||||||
|
changer device::
|
||||||
|
|
||||||
|
# proxmox-tape drive update mydrive --changer sl3 --changer-drivenum 0
|
||||||
|
|
||||||
|
The ``--changer-drivenum`` is only necessary if the tape library
|
||||||
|
includes more than one drive (The changer status command lists all
|
||||||
|
drivenums).
|
||||||
|
|
||||||
|
You can show the final configuration with::
|
||||||
|
|
||||||
|
# proxmox-tape drive config mydrive
|
||||||
|
┌─────────┬────────────────────────────────┐
|
||||||
|
│ Name │ Value │
|
||||||
|
╞═════════╪════════════════════════════════╡
|
||||||
|
│ name │ mydrive │
|
||||||
|
├─────────┼────────────────────────────────┤
|
||||||
|
│ path │ /dev/tape/by-id/scsi-12345-nst │
|
||||||
|
├─────────┼────────────────────────────────┤
|
||||||
|
│ changer │ sl3 │
|
||||||
|
└─────────┴────────────────────────────────┘
|
||||||
|
|
||||||
|
.. NOTE:: The ``changer-drivenum`` value 0 is not stored in the
|
||||||
|
configuration, because that is the default.
|
||||||
|
|
||||||
|
To list all configured drives use::
|
||||||
|
|
||||||
|
# proxmox-tape drive list
|
||||||
|
┌──────────┬────────────────────────────────┬─────────┬────────┬─────────────┬────────┐
|
||||||
|
│ name │ path │ changer │ vendor │ model │ serial │
|
||||||
|
╞══════════╪════════════════════════════════╪═════════╪════════╪═════════════╪════════╡
|
||||||
|
│ mydrive │ /dev/tape/by-id/scsi-12345-nst │ sl3 │ IBM │ ULT3580-TD4 │ 12345 │
|
||||||
|
└──────────┴────────────────────────────────┴─────────┴────────┴─────────────┴────────┘
|
||||||
|
|
||||||
|
The Vendor, Model and Serial number are auto detected, but only shown
|
||||||
|
if the device is online.
|
||||||
|
|
||||||
|
For testing, you can simply query the drive status with::
|
||||||
|
|
||||||
|
# proxmox-tape status --drive mydrive
|
||||||
|
┌───────────┬────────────────────────┐
|
||||||
|
│ Name │ Value │
|
||||||
|
╞═══════════╪════════════════════════╡
|
||||||
|
│ blocksize │ 0 │
|
||||||
|
├───────────┼────────────────────────┤
|
||||||
|
│ status │ DRIVE_OPEN | IM_REP_EN │
|
||||||
|
└───────────┴────────────────────────┘
|
||||||
|
|
||||||
|
.. NOTE:: Blocksize should always be 0 (variable block size
|
||||||
|
mode). This is the default anyways.
|
||||||
|
|
||||||
|
|
||||||
|
Media Pools
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
A media pool is a logical container for tapes. A backup job targets
|
||||||
|
one media pool, so a job only uses tapes from that pool.
|
||||||
|
|
||||||
|
.. topic:: Media Set
|
||||||
|
|
||||||
|
A media set is a group of continuously written tapes, used to split
|
||||||
|
the larger pool into smaller, restorable units. One or more backup
|
||||||
|
jobs write to a media set, producing an ordered group of
|
||||||
|
tapes. Media sets are identified by an unique ID. That ID and the
|
||||||
|
sequence number is stored on each tape of that set (tape label).
|
||||||
|
|
||||||
|
Media sets are the basic unit for restore tasks, i.e. you need all
|
||||||
|
tapes in the set to restore the media set content. Data is fully
|
||||||
|
deduplicated inside a media set.
|
||||||
|
|
||||||
|
|
||||||
|
.. topic:: Media Set Allocation Policy
|
||||||
|
|
||||||
|
The pool additionally defines how long backup jobs can append data
|
||||||
|
to a media set. The following settings are possible:
|
||||||
|
|
||||||
|
- Try to use the current media set.
|
||||||
|
|
||||||
|
This setting produce one large media set. While this is very
|
||||||
|
space efficient (deduplication, no unused space), it can lead to
|
||||||
|
long restore times, because restore jobs needs to read all tapes in the
|
||||||
|
set.
|
||||||
|
|
||||||
|
.. NOTE:: Data is fully deduplicated inside a media set. That
|
||||||
|
also means that data is randomly distributed over the tapes in
|
||||||
|
the set. So even if you restore a single VM, this may have to
|
||||||
|
read data from all tapes inside the media set.
|
||||||
|
|
||||||
|
Larger media sets are also more error prone, because a single
|
||||||
|
damaged media makes the restore fail.
|
||||||
|
|
||||||
|
Usage scenario: Mostly used with tape libraries, and you manually
|
||||||
|
trigger new set creation by running a backup job with the
|
||||||
|
``--export`` option.
|
||||||
|
|
||||||
|
.. NOTE:: Retention period starts with the existence of a newer
|
||||||
|
media set.
|
||||||
|
|
||||||
|
- Always create a new media set.
|
||||||
|
|
||||||
|
With this setting each backup job creates a new media set. This
|
||||||
|
is less space efficient, because the last media from the last set
|
||||||
|
may not be fully written, leaving the remaining space unused.
|
||||||
|
|
||||||
|
The advantage is that this procudes media sets of minimal
|
||||||
|
size. Small set are easier to handle, you can move sets to an
|
||||||
|
off-site vault, and restore is much faster.
|
||||||
|
|
||||||
|
.. NOTE:: Retention period starts with the creation time of the
|
||||||
|
media set.
|
||||||
|
|
||||||
|
- Create a new set when the specified Calendar Event triggers.
|
||||||
|
|
||||||
|
.. _systemd.time manpage: https://manpages.debian.org/buster/systemd/systemd.time.7.en.html
|
||||||
|
|
||||||
|
This allows you to specify points in time by using systemd like
|
||||||
|
Calendar Event specifications (see `systemd.time manpage`_).
|
||||||
|
|
||||||
|
For example, the value ``weekly`` (or ``Mon *-*-* 00:00:00``)
|
||||||
|
will create a new set each week.
|
||||||
|
|
||||||
|
This balances between space efficency and media count.
|
||||||
|
|
||||||
|
.. NOTE:: Retention period starts when the calendar event
|
||||||
|
triggers.
|
||||||
|
|
||||||
|
Additionally, the following events may allocate a new media set:
|
||||||
|
|
||||||
|
- Required tape is offline (and you use a tape library).
|
||||||
|
|
||||||
|
- Current set contains damaged of retired tapes.
|
||||||
|
|
||||||
|
- Media pool encryption changed
|
||||||
|
|
||||||
|
- Database consistency errors, e.g. if the inventory does not
|
||||||
|
contain required media info, or contain conflicting infos
|
||||||
|
(outdated data).
|
||||||
|
|
||||||
|
.. topic:: Retention Policy
|
||||||
|
|
||||||
|
Defines how long we want to keep the data.
|
||||||
|
|
||||||
|
- Always overwrite media.
|
||||||
|
|
||||||
|
- Protect data for the duration specified.
|
||||||
|
|
||||||
|
We use systemd like time spans to specify durations, e.g. ``2
|
||||||
|
weeks`` (see `systemd.time manpage`_).
|
||||||
|
|
||||||
|
- Never overwrite data.
|
||||||
|
|
||||||
|
.. topic:: Hardware Encryption
|
||||||
|
|
||||||
|
LTO4 (or later) tape drives support hardware encryption. If you
|
||||||
|
configure the media pool to use encryption, all data written to the
|
||||||
|
tapes is encrypted using the configured key.
|
||||||
|
|
||||||
|
That way, unauthorized users cannot read data from the media,
|
||||||
|
e.g. if you loose a media while shipping to an offsite location.
|
||||||
|
|
||||||
|
.. Note:: If the backup client also encrypts data, data on tape
|
||||||
|
will be double encrypted.
|
||||||
|
|
||||||
|
The password protected key is stored on each media, so it is
|
||||||
|
possbible to `restore the key <restore_encryption_key_>`_ using the password. Please make sure
|
||||||
|
you remember the password in case you need to restore the key.
|
||||||
|
|
||||||
|
|
||||||
|
.. NOTE:: FIXME: Add note about global content namespace. (We do not store
|
||||||
|
the source datastore, so it is impossible to distinguish
|
||||||
|
store1:/vm/100 from store2:/vm/100. Please use different media
|
||||||
|
pools if the source is from a different name space)
|
||||||
|
|
||||||
|
|
||||||
|
The following command creates a new media pool::
|
||||||
|
|
||||||
|
// proxmox-tape pool create <name> --drive <string> [OPTIONS]
|
||||||
|
|
||||||
|
# proxmox-tape pool create daily --drive mydrive
|
||||||
|
|
||||||
|
|
||||||
|
Additional option can be set later using the update command::
|
||||||
|
|
||||||
|
# proxmox-tape pool update daily --allocation daily --retention 7days
|
||||||
|
|
||||||
|
|
||||||
|
To list all configured pools use::
|
||||||
|
|
||||||
|
# proxmox-tape pool list
|
||||||
|
┌───────┬──────────┬────────────┬───────────┬──────────┐
|
||||||
|
│ name │ drive │ allocation │ retention │ template │
|
||||||
|
╞═══════╪══════════╪════════════╪═══════════╪══════════╡
|
||||||
|
│ daily │ mydrive │ daily │ 7days │ │
|
||||||
|
└───────┴──────────┴────────────┴───────────┴──────────┘
|
||||||
|
|
||||||
|
|
||||||
|
Tape Jobs
|
||||||
|
~~~~~~~~~
|
||||||
|
|
||||||
|
|
||||||
|
Administration
|
||||||
|
--------------
|
||||||
|
|
||||||
|
Many sub-command of the ``proxmox-tape`` command line tools take a
|
||||||
|
parameter called ``--drive``, which specifies the tape drive you want
|
||||||
|
to work on. For convenience, you can set that in an environment
|
||||||
|
variable::
|
||||||
|
|
||||||
|
# export PROXMOX_TAPE_DRIVE=mydrive
|
||||||
|
|
||||||
|
You can then omit the ``--drive`` parameter from the command. If the
|
||||||
|
drive has an associated changer device, you may also omit the changer
|
||||||
|
parameter from commands that needs a changer device, for example::
|
||||||
|
|
||||||
|
# proxmox-tape changer status
|
||||||
|
|
||||||
|
Should displays the changer status of the changer device associated with
|
||||||
|
drive ``mydrive``.
|
||||||
|
|
||||||
|
|
||||||
|
Label Tapes
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
By default, tape cartidges all looks the same, so you need to put a
|
||||||
|
label on them for unique identification. So first, put a sticky paper
|
||||||
|
label with some human readable text on the cartridge.
|
||||||
|
|
||||||
|
If you use a `Tape Library`_, you should use an 8 character string
|
||||||
|
encoded as `Code 39`_, as definded in the `LTO Ultrium Cartridge Label
|
||||||
|
Specification`_. You can either bye such barcode labels from your
|
||||||
|
cartidge vendor, or print them yourself. You can use our `LTO Barcode
|
||||||
|
Generator`_ App for that.
|
||||||
|
|
||||||
|
Next, you need to write that same label text to the tape, so that the
|
||||||
|
software can uniquely identify the tape too.
|
||||||
|
|
||||||
|
For a standalone drive, manually insert the new tape cartidge into the
|
||||||
|
drive and run::
|
||||||
|
|
||||||
|
# proxmox-tape label --changer-id <label-text> [--pool <pool-name>]
|
||||||
|
|
||||||
|
You may omit the ``--pool`` argument to allow the tape to be used by any pool.
|
||||||
|
|
||||||
|
.. Note:: For safety reasons, this command fails if the tape contain
|
||||||
|
any data. If you want to overwrite it anways, erase the tape first.
|
||||||
|
|
||||||
|
You can verify success by reading back the label::
|
||||||
|
|
||||||
|
# proxmox-tape read-label
|
||||||
|
┌─────────────────┬──────────────────────────────────────┐
|
||||||
|
│ Name │ Value │
|
||||||
|
╞═════════════════╪══════════════════════════════════════╡
|
||||||
|
│ changer-id │ vtape1 │
|
||||||
|
├─────────────────┼──────────────────────────────────────┤
|
||||||
|
│ uuid │ 7f42c4dd-9626-4d89-9f2b-c7bc6da7d533 │
|
||||||
|
├─────────────────┼──────────────────────────────────────┤
|
||||||
|
│ ctime │ Wed Jan 6 09:07:51 2021 │
|
||||||
|
├─────────────────┼──────────────────────────────────────┤
|
||||||
|
│ pool │ daily │
|
||||||
|
├─────────────────┼──────────────────────────────────────┤
|
||||||
|
│ media-set-uuid │ 00000000-0000-0000-0000-000000000000 │
|
||||||
|
├─────────────────┼──────────────────────────────────────┤
|
||||||
|
│ media-set-ctime │ Wed Jan 6 09:07:51 2021 │
|
||||||
|
└─────────────────┴──────────────────────────────────────┘
|
||||||
|
|
||||||
|
.. NOTE:: The ``media-set-uuid`` using all zeros indicates an empty
|
||||||
|
tape (not used by any media set).
|
||||||
|
|
||||||
|
If you have a tape library, apply the sticky barcode label to the tape
|
||||||
|
cartridges first. Then load those empty tapes into the library. You
|
||||||
|
can then label all unlabeled tapes with a single command::
|
||||||
|
|
||||||
|
# proxmox-tape barcode-label [--pool <pool-name>]
|
||||||
|
|
||||||
|
|
||||||
|
Run Tape Backups
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
To manually run a backup job use::
|
||||||
|
|
||||||
|
# proxmox-tape backup <store> <pool> [OPTIONS]
|
||||||
|
|
||||||
|
The following options are available:
|
||||||
|
|
||||||
|
--eject-media Eject media upon job completion.
|
||||||
|
|
||||||
|
It is normally good practice to eject the tape after use. This unmounts the
|
||||||
|
tape from the drive and prevents the tape from getting dirty with dust.
|
||||||
|
|
||||||
|
--export-media-set Export media set upon job completion.
|
||||||
|
|
||||||
|
After a sucessful backup job, this moves all tapes from the used
|
||||||
|
media set into import-export slots. The operator can then pick up
|
||||||
|
those tapes and move them to a media vault.
|
||||||
|
|
||||||
|
|
||||||
|
Restore from Tape
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Restore is done at media-set granularity, so you first need to find
|
||||||
|
out which media set contains the data you want to restore. This
|
||||||
|
information is stored in the media catalog. If you do not have media
|
||||||
|
catalogs, you need to restore them first. Please note that you need
|
||||||
|
the catalog to find your data, but restoring a complete media-set does
|
||||||
|
not need media catalogs.
|
||||||
|
|
||||||
|
The following command shows the media content (from catalog)::
|
||||||
|
|
||||||
|
# proxmox-tape media content
|
||||||
|
┌────────────┬──────┬──────────────────────────┬────────┬────────────────────────────────┬──────────────────────────────────────┐
|
||||||
|
│ label-text │ pool │ media-set-name │ seq-nr │ snapshot │ media-set-uuid │
|
||||||
|
╞════════════╪══════╪══════════════════════════╪════════╪════════════════════════════════╪══════════════════════════════════════╡
|
||||||
|
│ TEST01L8 │ p2 │ Wed Jan 13 13:55:55 2021 │ 0 │ vm/201/2021-01-11T10:43:48Z │ 9da37a55-aac7-4deb-91c6-482b3b675f30 │
|
||||||
|
├────────────┼──────┼──────────────────────────┼────────┼────────────────────────────────┼──────────────────────────────────────┤
|
||||||
|
│ ... │ ... │ ... │ ... │ ... │ ... │
|
||||||
|
└────────────┴──────┴──────────────────────────┴────────┴────────────────────────────────┴──────────────────────────────────────┘
|
||||||
|
|
||||||
|
|
||||||
|
A restore job reads the data from the media set and moves it back to
|
||||||
|
data disk (datastore)::
|
||||||
|
|
||||||
|
// proxmox-tape restore <media-set-uuid> <datastore>
|
||||||
|
|
||||||
|
# proxmox-tape restore 9da37a55-aac7-4deb-91c6-482b3b675f30 mystore
|
||||||
|
|
||||||
|
|
||||||
|
Update Inventory
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
|
||||||
|
Restore Catalog
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
|
||||||
|
Encryption Key Management
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Creating a new encryption key::
|
||||||
|
|
||||||
|
# proxmox-tape key create --hint "tape pw 2020"
|
||||||
|
Tape Encryption Key Password: **********
|
||||||
|
Verify Password: **********
|
||||||
|
"14:f8:79:b9:f5:13:e5:dc:bf:b6:f9:88:48:51:81:dc:79:bf:a0:22:68:47:d1:73:35:2d:b6:20:e1:7f:f5:0f"
|
||||||
|
|
||||||
|
List existing encryption keys::
|
||||||
|
|
||||||
|
# proxmox-tape key list
|
||||||
|
┌───────────────────────────────────────────────────┬───────────────┐
|
||||||
|
│ fingerprint │ hint │
|
||||||
|
╞═══════════════════════════════════════════════════╪═══════════════╡
|
||||||
|
│ 14:f8:79:b9:f5:13:e5:dc: ... :b6:20:e1:7f:f5:0f │ tape pw 2020 │
|
||||||
|
└───────────────────────────────────────────────────┴───────────────┘
|
||||||
|
|
||||||
|
To show encryption key details::
|
||||||
|
|
||||||
|
# proxmox-tape key show 14:f8:79:b9:f5:13:e5:dc:...:b6:20:e1:7f:f5:0f
|
||||||
|
┌─────────────┬───────────────────────────────────────────────┐
|
||||||
|
│ Name │ Value │
|
||||||
|
╞═════════════╪═══════════════════════════════════════════════╡
|
||||||
|
│ kdf │ scrypt │
|
||||||
|
├─────────────┼───────────────────────────────────────────────┤
|
||||||
|
│ created │ Sat Jan 23 14:47:21 2021 │
|
||||||
|
├─────────────┼───────────────────────────────────────────────┤
|
||||||
|
│ modified │ Sat Jan 23 14:47:21 2021 │
|
||||||
|
├─────────────┼───────────────────────────────────────────────┤
|
||||||
|
│ fingerprint │ 14:f8:79:b9:f5:13:e5:dc:...:b6:20:e1:7f:f5:0f │
|
||||||
|
├─────────────┼───────────────────────────────────────────────┤
|
||||||
|
│ hint │ tape pw 2020 │
|
||||||
|
└─────────────┴───────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
The ``paperkey`` subcommand can be used to create a QR encoded
|
||||||
|
version of a tape encryption key. The following command sends the output of the
|
||||||
|
``paperkey`` command to a text file, for easy printing::
|
||||||
|
|
||||||
|
proxmox-tape key paperkey <fingerprint> --output-format text > qrkey.txt
|
||||||
|
|
||||||
|
|
||||||
|
.. _restore_encryption_key:
|
||||||
|
|
||||||
|
Restoring Encryption Keys
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
You can restore the encryption key from the tape, using the password
|
||||||
|
used to generate the key. First, load the tape you want to restore
|
||||||
|
into the drive. Then run::
|
||||||
|
|
||||||
|
# proxmox-tape key restore
|
||||||
|
Tepe Encryption Key Password: ***********
|
||||||
|
|
||||||
|
If the password is correct, the key will get imported to the
|
||||||
|
database. Further restore jobs automatically use any availbale key.
|
||||||
|
|
||||||
|
|
||||||
|
Tape Cleaning
|
||||||
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
LTO tape drives requires regular cleaning. This is done by loading a
|
||||||
|
cleaning cartridge into the drive, which is a manual task for
|
||||||
|
standalone drives.
|
||||||
|
|
||||||
|
For tape libraries, cleaning cartridges are identified using special
|
||||||
|
labels starting with letters "CLN". For example, our tape library has a
|
||||||
|
cleaning cartridge inside slot 3::
|
||||||
|
|
||||||
|
# proxmox-tape changer status sl3
|
||||||
|
┌───────────────┬──────────┬────────────┬─────────────┐
|
||||||
|
│ entry-kind │ entry-id │ changer-id │ loaded-slot │
|
||||||
|
╞═══════════════╪══════════╪════════════╪═════════════╡
|
||||||
|
│ drive │ 0 │ vtape1 │ 1 │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 1 │ │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 2 │ vtape2 │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 3 │ CLN001CU │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ ... │ ... │ │ │
|
||||||
|
└───────────────┴──────────┴────────────┴─────────────┘
|
||||||
|
|
||||||
|
To initiate a cleaning operation simply run::
|
||||||
|
|
||||||
|
# proxmox-tape clean
|
||||||
|
|
||||||
|
This command does the following:
|
||||||
|
|
||||||
|
- find the cleaning tape (in slot 3)
|
||||||
|
|
||||||
|
- unload the current media from the drive (back to slot1)
|
||||||
|
|
||||||
|
- load the cleaning tape into the drive
|
||||||
|
|
||||||
|
- run drive cleaning operation
|
||||||
|
|
||||||
|
- unload the cleaning tape (to slot 3)
|
@ -284,3 +284,104 @@ you can use the ``proxmox-backup-manager user permission`` command:
|
|||||||
|
|
||||||
Path: /datastore/store1
|
Path: /datastore/store1
|
||||||
- Datastore.Backup (*)
|
- Datastore.Backup (*)
|
||||||
|
|
||||||
|
.. _user_tfa:
|
||||||
|
Two-factor authentication
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
Introduction
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Simple authentication requires only secret piece of evidence (one factor) that
|
||||||
|
a user can successfully claim a identiy (authenticate), for example, that you
|
||||||
|
are allowed to login as `root@pam` on a specific Proxmox Backup Server.
|
||||||
|
If the password gets stolen, or leaked in another way, anybody can use it to
|
||||||
|
login - even if they should not be allowed to do so.
|
||||||
|
|
||||||
|
With Two-factor authentication (TFA) a user is asked for an additional factor,
|
||||||
|
to proof his authenticity. The extra factor is different from a password
|
||||||
|
(something only the user knows), it is something only the user has, for example
|
||||||
|
a piece of hardware (security key) or an secret saved on the users smartphone.
|
||||||
|
|
||||||
|
This means that a remote user can never get hold on such a physical object. So,
|
||||||
|
even if that user would know your password they cannot successfully
|
||||||
|
authenticate as you, as your second factor is missing.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tfa-login.png
|
||||||
|
:align: right
|
||||||
|
:alt: Add a new user
|
||||||
|
|
||||||
|
Available Second Factors
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
You can setup more than one second factor to avoid that losing your smartphone
|
||||||
|
or security key permanently locks you out from your account.
|
||||||
|
|
||||||
|
There are three different two-factor authentication methods supported:
|
||||||
|
|
||||||
|
* TOTP (`Time-based One-Time Password <https://en.wikipedia.org/wiki/Time-based_One-Time_Password>`_).
|
||||||
|
A short code derived from a shared secret and the current time, it switches
|
||||||
|
every 30 seconds.
|
||||||
|
|
||||||
|
* WebAuthn (`Web Authentication <https://en.wikipedia.org/wiki/WebAuthn>`_).
|
||||||
|
A general standard for authentication. It is implemented by various security
|
||||||
|
devices like hardware keys or trusted platform modules (TPM) from a computer
|
||||||
|
or smart phone.
|
||||||
|
|
||||||
|
* Single use Recovery Keys. A list of keys which should either be printed out
|
||||||
|
and locked in a secure fault or saved digitally in a electronic vault.
|
||||||
|
Each key can be used only once, they are perfect for ensuring you are not
|
||||||
|
locked out even if all of your other second factors are lost or corrupt.
|
||||||
|
|
||||||
|
|
||||||
|
Setup
|
||||||
|
~~~~~
|
||||||
|
|
||||||
|
.. _user_tfa_setup_totp:
|
||||||
|
TOTP
|
||||||
|
^^^^
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tfa-add-totp.png
|
||||||
|
:align: right
|
||||||
|
:alt: Add a new user
|
||||||
|
|
||||||
|
There is not server setup required, simply install a TOTP app on your
|
||||||
|
smartphone (for example, `FreeOTP <https://freeotp.github.io/>`_) and use the
|
||||||
|
Proxmox Backup Server web-interface to add a TOTP factor.
|
||||||
|
|
||||||
|
.. _user_tfa_setup_webauthn:
|
||||||
|
WebAuthn
|
||||||
|
^^^^^^^^
|
||||||
|
|
||||||
|
For WebAuthn to work you need to have two things:
|
||||||
|
|
||||||
|
* a trusted HTTPS certificate (for example, by using `Let's Encrypt
|
||||||
|
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_)
|
||||||
|
|
||||||
|
* setup the WebAuthn configuration (see *Configuration -> Authentication* in the
|
||||||
|
Proxmox Backup Server web-interface). This can be auto-filled in most setups.
|
||||||
|
|
||||||
|
Once you fullfilled both of those requirements, you can add a WebAuthn
|
||||||
|
configuration in the *Access Control* panel.
|
||||||
|
|
||||||
|
.. _user_tfa_setup_recovery_keys:
|
||||||
|
Recovery Keys
|
||||||
|
^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tfa-add-recovery-keys.png
|
||||||
|
:align: right
|
||||||
|
:alt: Add a new user
|
||||||
|
|
||||||
|
Recovery key codes do not need any preparation, you can simply create a set of
|
||||||
|
recovery keys in the *Access Control* panel.
|
||||||
|
|
||||||
|
.. note:: There can only be one set of single-use recovery keys per user at any
|
||||||
|
time.
|
||||||
|
|
||||||
|
TFA and Automated Access
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Two-factor authentication is only implemented for the web-interface, you should
|
||||||
|
use :ref:`API Tokens <user_tokens>` for all other use cases, especially
|
||||||
|
non-interactive ones (for example, adding a Proxmox Backup server to Proxmox VE
|
||||||
|
as a storage).
|
||||||
|
@ -28,7 +28,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
|
|
||||||
let auth_id = Authid::root_auth_id();
|
let auth_id = Authid::root_auth_id();
|
||||||
|
|
||||||
let options = HttpClientOptions::new()
|
let options = HttpClientOptions::default()
|
||||||
.interactive(true)
|
.interactive(true)
|
||||||
.ticket_cache(true);
|
.ticket_cache(true);
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@ use std::future::Future;
|
|||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
use anyhow::{Error};
|
use anyhow::Error;
|
||||||
use futures::future::TryFutureExt;
|
use futures::future::TryFutureExt;
|
||||||
use futures::stream::Stream;
|
use futures::stream::Stream;
|
||||||
use tokio::net::TcpStream;
|
use tokio::net::TcpStream;
|
||||||
@ -38,11 +38,11 @@ impl Future for Process {
|
|||||||
this.body.flow_control().release_capacity(chunk.len())?;
|
this.body.flow_control().release_capacity(chunk.len())?;
|
||||||
this.bytes += chunk.len();
|
this.bytes += chunk.len();
|
||||||
// println!("GOT FRAME {}", chunk.len());
|
// println!("GOT FRAME {}", chunk.len());
|
||||||
},
|
}
|
||||||
Some(Err(err)) => return Poll::Ready(Err(Error::from(err))),
|
Some(Err(err)) => return Poll::Ready(Err(Error::from(err))),
|
||||||
None => {
|
None => {
|
||||||
this.trailers = true;
|
this.trailers = true;
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -52,7 +52,6 @@ impl Future for Process {
|
|||||||
fn send_request(
|
fn send_request(
|
||||||
mut client: h2::client::SendRequest<bytes::Bytes>,
|
mut client: h2::client::SendRequest<bytes::Bytes>,
|
||||||
) -> impl Future<Output = Result<usize, Error>> {
|
) -> impl Future<Output = Result<usize, Error>> {
|
||||||
|
|
||||||
println!("sending request");
|
println!("sending request");
|
||||||
|
|
||||||
let request = http::Request::builder()
|
let request = http::Request::builder()
|
||||||
@ -62,10 +61,10 @@ fn send_request(
|
|||||||
|
|
||||||
let (response, _stream) = client.send_request(request, true).unwrap();
|
let (response, _stream) = client.send_request(request, true).unwrap();
|
||||||
|
|
||||||
response
|
response.map_err(Error::from).and_then(|response| Process {
|
||||||
.map_err(Error::from)
|
body: response.into_body(),
|
||||||
.and_then(|response| {
|
trailers: false,
|
||||||
Process { body: response.into_body(), trailers: false, bytes: 0 }
|
bytes: 0,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -74,16 +73,15 @@ fn main() -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
|
|
||||||
let start = std::time::SystemTime::now();
|
let start = std::time::SystemTime::now();
|
||||||
|
|
||||||
let conn = TcpStream::connect(std::net::SocketAddr::from(([127,0,0,1], 8008)))
|
let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
|
||||||
.await?;
|
conn.set_nodelay(true).unwrap();
|
||||||
|
|
||||||
let (client, h2) = h2::client::Builder::new()
|
let (client, h2) = h2::client::Builder::new()
|
||||||
.initial_connection_window_size(1024*1024*1024)
|
.initial_connection_window_size(1024 * 1024 * 1024)
|
||||||
.initial_window_size(1024*1024*1024)
|
.initial_window_size(1024 * 1024 * 1024)
|
||||||
.max_frame_size(4*1024*1024)
|
.max_frame_size(4 * 1024 * 1024)
|
||||||
.handshake(conn)
|
.handshake(conn)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
@ -99,10 +97,13 @@ async fn run() -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let elapsed = start.elapsed().unwrap();
|
let elapsed = start.elapsed().unwrap();
|
||||||
let elapsed = (elapsed.as_secs() as f64) +
|
let elapsed = (elapsed.as_secs() as f64) + (elapsed.subsec_millis() as f64) / 1000.0;
|
||||||
(elapsed.subsec_millis() as f64)/1000.0;
|
|
||||||
|
|
||||||
println!("Downloaded {} bytes, {} MB/s", bytes, (bytes as f64)/(elapsed*1024.0*1024.0));
|
println!(
|
||||||
|
"Downloaded {} bytes, {} MB/s",
|
||||||
|
bytes,
|
||||||
|
(bytes as f64) / (elapsed * 1024.0 * 1024.0)
|
||||||
|
);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ use std::task::{Context, Poll};
|
|||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
use futures::future::TryFutureExt;
|
use futures::future::TryFutureExt;
|
||||||
use futures::stream::Stream;
|
use futures::stream::Stream;
|
||||||
|
use tokio::net::TcpStream;
|
||||||
|
|
||||||
// Simple H2 client to test H2 download speed using h2s-server.rs
|
// Simple H2 client to test H2 download speed using h2s-server.rs
|
||||||
|
|
||||||
@ -37,11 +38,11 @@ impl Future for Process {
|
|||||||
this.body.flow_control().release_capacity(chunk.len())?;
|
this.body.flow_control().release_capacity(chunk.len())?;
|
||||||
this.bytes += chunk.len();
|
this.bytes += chunk.len();
|
||||||
// println!("GOT FRAME {}", chunk.len());
|
// println!("GOT FRAME {}", chunk.len());
|
||||||
},
|
}
|
||||||
Some(Err(err)) => return Poll::Ready(Err(Error::from(err))),
|
Some(Err(err)) => return Poll::Ready(Err(Error::from(err))),
|
||||||
None => {
|
None => {
|
||||||
this.trailers = true;
|
this.trailers = true;
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -60,10 +61,10 @@ fn send_request(
|
|||||||
|
|
||||||
let (response, _stream) = client.send_request(request, true).unwrap();
|
let (response, _stream) = client.send_request(request, true).unwrap();
|
||||||
|
|
||||||
response
|
response.map_err(Error::from).and_then(|response| Process {
|
||||||
.map_err(Error::from)
|
body: response.into_body(),
|
||||||
.and_then(|response| {
|
trailers: false,
|
||||||
Process { body: response.into_body(), trailers: false, bytes: 0 }
|
bytes: 0,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -74,57 +75,51 @@ fn main() -> Result<(), Error> {
|
|||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
let start = std::time::SystemTime::now();
|
let start = std::time::SystemTime::now();
|
||||||
|
|
||||||
let conn =
|
let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
|
||||||
tokio::net::TcpStream::connect(std::net::SocketAddr::from(([127,0,0,1], 8008))).await?;
|
|
||||||
|
|
||||||
conn.set_nodelay(true).unwrap();
|
conn.set_nodelay(true).unwrap();
|
||||||
conn.set_recv_buffer_size(1024*1024).unwrap();
|
|
||||||
|
|
||||||
use openssl::ssl::{SslConnector, SslMethod};
|
use openssl::ssl::{SslConnector, SslMethod};
|
||||||
|
|
||||||
let mut ssl_connector_builder = SslConnector::builder(SslMethod::tls()).unwrap();
|
let mut ssl_connector_builder = SslConnector::builder(SslMethod::tls()).unwrap();
|
||||||
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
|
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
|
||||||
let conn =
|
let ssl = ssl_connector_builder
|
||||||
tokio_openssl::connect(
|
.build()
|
||||||
ssl_connector_builder.build().configure()?,
|
.configure()?
|
||||||
"localhost",
|
.into_ssl("localhost")?;
|
||||||
conn,
|
|
||||||
)
|
let conn = tokio_openssl::SslStream::new(ssl, conn)?;
|
||||||
|
let mut conn = Box::pin(conn);
|
||||||
|
conn.as_mut()
|
||||||
|
.connect()
|
||||||
.await
|
.await
|
||||||
.map_err(|err| format_err!("connect failed - {}", err))?;
|
.map_err(|err| format_err!("connect failed - {}", err))?;
|
||||||
|
|
||||||
let (client, h2) = h2::client::Builder::new()
|
let (client, h2) = h2::client::Builder::new()
|
||||||
.initial_connection_window_size(1024*1024*1024)
|
.initial_connection_window_size(1024 * 1024 * 1024)
|
||||||
.initial_window_size(1024*1024*1024)
|
.initial_window_size(1024 * 1024 * 1024)
|
||||||
.max_frame_size(4*1024*1024)
|
.max_frame_size(4 * 1024 * 1024)
|
||||||
.handshake(conn)
|
.handshake(conn)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
// Spawn a task to run the conn...
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
if let Err(e) = h2.await {
|
if let Err(err) = h2.await {
|
||||||
println!("GOT ERR={:?}", e);
|
println!("GOT ERR={:?}", err);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut bytes = 0;
|
let mut bytes = 0;
|
||||||
for _ in 0..100 {
|
for _ in 0..2000 {
|
||||||
match send_request(client.clone()).await {
|
bytes += send_request(client.clone()).await?;
|
||||||
Ok(b) => {
|
|
||||||
bytes += b;
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
println!("ERROR {}", e);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let elapsed = start.elapsed().unwrap();
|
let elapsed = start.elapsed().unwrap();
|
||||||
let elapsed = (elapsed.as_secs() as f64) +
|
let elapsed = (elapsed.as_secs() as f64) + (elapsed.subsec_millis() as f64) / 1000.0;
|
||||||
(elapsed.subsec_millis() as f64)/1000.0;
|
|
||||||
|
|
||||||
println!("Downloaded {} bytes, {} MB/s", bytes, (bytes as f64)/(elapsed*1024.0*1024.0));
|
println!(
|
||||||
|
"Downloaded {} bytes, {} MB/s",
|
||||||
|
bytes,
|
||||||
|
(bytes as f64) / (elapsed * 1024.0 * 1024.0)
|
||||||
|
);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -2,14 +2,12 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
use hyper::{Request, Response, Body};
|
use hyper::{Body, Request, Response};
|
||||||
use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
|
use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
|
||||||
use tokio::net::{TcpListener, TcpStream};
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
|
|
||||||
use proxmox_backup::configdir;
|
use proxmox_backup::configdir;
|
||||||
|
|
||||||
// Simple H2 server to test H2 speed with h2s-client.rs
|
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_backup::tools::runtime::main(run())
|
proxmox_backup::tools::runtime::main(run())
|
||||||
}
|
}
|
||||||
@ -19,22 +17,23 @@ async fn run() -> Result<(), Error> {
|
|||||||
let cert_path = configdir!("/proxy.pem");
|
let cert_path = configdir!("/proxy.pem");
|
||||||
|
|
||||||
let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
|
let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
|
||||||
acceptor.set_private_key_file(key_path, SslFiletype::PEM)
|
acceptor
|
||||||
|
.set_private_key_file(key_path, SslFiletype::PEM)
|
||||||
.map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
|
.map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
|
||||||
acceptor.set_certificate_chain_file(cert_path)
|
acceptor
|
||||||
|
.set_certificate_chain_file(cert_path)
|
||||||
.map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?;
|
.map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?;
|
||||||
acceptor.check_private_key().unwrap();
|
acceptor.check_private_key().unwrap();
|
||||||
|
|
||||||
let acceptor = Arc::new(acceptor.build());
|
let acceptor = Arc::new(acceptor.build());
|
||||||
|
|
||||||
let mut listener = TcpListener::bind(std::net::SocketAddr::from(([127,0,0,1], 8008))).await?;
|
let listener = TcpListener::bind(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
|
||||||
|
|
||||||
println!("listening on {:?}", listener.local_addr());
|
println!("listening on {:?}", listener.local_addr());
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let (socket, _addr) = listener.accept().await?;
|
let (socket, _addr) = listener.accept().await?;
|
||||||
tokio::spawn(handle_connection(socket, Arc::clone(&acceptor))
|
tokio::spawn(handle_connection(socket, Arc::clone(&acceptor)).map(|res| {
|
||||||
.map(|res| {
|
|
||||||
if let Err(err) = res {
|
if let Err(err) = res {
|
||||||
eprintln!("Error: {}", err);
|
eprintln!("Error: {}", err);
|
||||||
}
|
}
|
||||||
@ -42,15 +41,14 @@ async fn run() -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_connection(
|
async fn handle_connection(socket: TcpStream, acceptor: Arc<SslAcceptor>) -> Result<(), Error> {
|
||||||
socket: TcpStream,
|
|
||||||
acceptor: Arc<SslAcceptor>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
socket.set_nodelay(true).unwrap();
|
socket.set_nodelay(true).unwrap();
|
||||||
socket.set_send_buffer_size(1024*1024).unwrap();
|
|
||||||
socket.set_recv_buffer_size(1024*1024).unwrap();
|
|
||||||
|
|
||||||
let socket = tokio_openssl::accept(acceptor.as_ref(), socket).await?;
|
let ssl = openssl::ssl::Ssl::new(acceptor.context())?;
|
||||||
|
let stream = tokio_openssl::SslStream::new(ssl, socket)?;
|
||||||
|
let mut stream = Box::pin(stream);
|
||||||
|
|
||||||
|
stream.as_mut().accept().await?;
|
||||||
|
|
||||||
let mut http = hyper::server::conn::Http::new();
|
let mut http = hyper::server::conn::Http::new();
|
||||||
http.http2_only(true);
|
http.http2_only(true);
|
||||||
@ -61,7 +59,7 @@ async fn handle_connection(
|
|||||||
|
|
||||||
let service = hyper::service::service_fn(|_req: Request<Body>| {
|
let service = hyper::service::service_fn(|_req: Request<Body>| {
|
||||||
println!("Got request");
|
println!("Got request");
|
||||||
let buffer = vec![65u8; 1024*1024]; // nonsense [A,A,A,A...]
|
let buffer = vec![65u8; 4 * 1024 * 1024]; // nonsense [A,A,A,A...]
|
||||||
let body = Body::from(buffer);
|
let body = Body::from(buffer);
|
||||||
|
|
||||||
let response = Response::builder()
|
let response = Response::builder()
|
||||||
@ -72,7 +70,7 @@ async fn handle_connection(
|
|||||||
future::ok::<_, Error>(response)
|
future::ok::<_, Error>(response)
|
||||||
});
|
});
|
||||||
|
|
||||||
http.serve_connection(socket, service)
|
http.serve_connection(stream, service)
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -1,26 +1,21 @@
|
|||||||
use anyhow::{Error};
|
use anyhow::Error;
|
||||||
use futures::*;
|
use futures::*;
|
||||||
|
use hyper::{Body, Request, Response};
|
||||||
|
|
||||||
// Simple H2 server to test H2 speed with h2client.rs
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
|
|
||||||
use tokio::net::TcpListener;
|
|
||||||
use tokio::io::{AsyncRead, AsyncWrite};
|
|
||||||
|
|
||||||
use proxmox_backup::client::pipe_to_stream::PipeToSendStream;
|
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_backup::tools::runtime::main(run())
|
proxmox_backup::tools::runtime::main(run())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
let mut listener = TcpListener::bind(std::net::SocketAddr::from(([127,0,0,1], 8008))).await?;
|
let listener = TcpListener::bind(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
|
||||||
|
|
||||||
println!("listening on {:?}", listener.local_addr());
|
println!("listening on {:?}", listener.local_addr());
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let (socket, _addr) = listener.accept().await?;
|
let (socket, _addr) = listener.accept().await?;
|
||||||
tokio::spawn(handle_connection(socket)
|
tokio::spawn(handle_connection(socket).map(|res| {
|
||||||
.map(|res| {
|
|
||||||
if let Err(err) = res {
|
if let Err(err) = res {
|
||||||
eprintln!("Error: {}", err);
|
eprintln!("Error: {}", err);
|
||||||
}
|
}
|
||||||
@ -28,24 +23,33 @@ async fn run() -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_connection<T: AsyncRead + AsyncWrite + Unpin>(socket: T) -> Result<(), Error> {
|
async fn handle_connection(socket: TcpStream) -> Result<(), Error> {
|
||||||
let mut conn = h2::server::handshake(socket).await?;
|
socket.set_nodelay(true).unwrap();
|
||||||
|
|
||||||
println!("H2 connection bound");
|
let mut http = hyper::server::conn::Http::new();
|
||||||
|
http.http2_only(true);
|
||||||
|
// increase window size: todo - find optiomal size
|
||||||
|
let max_window_size = (1 << 31) - 2;
|
||||||
|
http.http2_initial_stream_window_size(max_window_size);
|
||||||
|
http.http2_initial_connection_window_size(max_window_size);
|
||||||
|
|
||||||
while let Some((request, mut respond)) = conn.try_next().await? {
|
let service = hyper::service::service_fn(|_req: Request<Body>| {
|
||||||
println!("GOT request: {:?}", request);
|
println!("Got request");
|
||||||
|
let buffer = vec![65u8; 4 * 1024 * 1024]; // nonsense [A,A,A,A...]
|
||||||
|
let body = Body::from(buffer);
|
||||||
|
|
||||||
let response = http::Response::builder()
|
let response = Response::builder()
|
||||||
.status(http::StatusCode::OK)
|
.status(http::StatusCode::OK)
|
||||||
.body(())
|
.header(http::header::CONTENT_TYPE, "application/octet-stream")
|
||||||
|
.body(body)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
future::ok::<_, Error>(response)
|
||||||
|
});
|
||||||
|
|
||||||
let send = respond.send_response(response, false).unwrap();
|
http.serve_connection(socket, service)
|
||||||
let data = vec![65u8; 1024*1024];
|
.map_err(Error::from)
|
||||||
PipeToSendStream::new(bytes::Bytes::from(data), send).await?;
|
.await?;
|
||||||
println!("DATA SENT");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
println!("H2 connection CLOSE !");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -10,7 +10,7 @@ async fn upload_speed() -> Result<f64, Error> {
|
|||||||
|
|
||||||
let auth_id = Authid::root_auth_id();
|
let auth_id = Authid::root_auth_id();
|
||||||
|
|
||||||
let options = HttpClientOptions::new()
|
let options = HttpClientOptions::default()
|
||||||
.interactive(true)
|
.interactive(true)
|
||||||
.ticket_cache(true);
|
.ticket_cache(true);
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//! The Proxmox Backup Server API
|
||||||
|
|
||||||
pub mod access;
|
pub mod access;
|
||||||
pub mod admin;
|
pub mod admin;
|
||||||
pub mod backup;
|
pub mod backup;
|
||||||
@ -9,6 +11,7 @@ pub mod types;
|
|||||||
pub mod version;
|
pub mod version;
|
||||||
pub mod ping;
|
pub mod ping;
|
||||||
pub mod pull;
|
pub mod pull;
|
||||||
|
pub mod tape;
|
||||||
mod helpers;
|
mod helpers;
|
||||||
|
|
||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
@ -17,7 +20,7 @@ use proxmox::list_subdirs_api_method;
|
|||||||
|
|
||||||
const NODES_ROUTER: Router = Router::new().match_all("node", &node::ROUTER);
|
const NODES_ROUTER: Router = Router::new().match_all("node", &node::ROUTER);
|
||||||
|
|
||||||
pub const SUBDIRS: SubdirMap = &[
|
const SUBDIRS: SubdirMap = &[
|
||||||
("access", &access::ROUTER),
|
("access", &access::ROUTER),
|
||||||
("admin", &admin::ROUTER),
|
("admin", &admin::ROUTER),
|
||||||
("backup", &backup::ROUTER),
|
("backup", &backup::ROUTER),
|
||||||
@ -27,6 +30,7 @@ pub const SUBDIRS: SubdirMap = &[
|
|||||||
("pull", &pull::ROUTER),
|
("pull", &pull::ROUTER),
|
||||||
("reader", &reader::ROUTER),
|
("reader", &reader::ROUTER),
|
||||||
("status", &status::ROUTER),
|
("status", &status::ROUTER),
|
||||||
|
("tape", &tape::ROUTER),
|
||||||
("version", &version::ROUTER),
|
("version", &version::ROUTER),
|
||||||
];
|
];
|
||||||
|
|
||||||
|
@ -1,36 +1,52 @@
|
|||||||
|
//! Access control (Users, Permissions and Authentication)
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use proxmox::api::{api, RpcEnvironment, Permission};
|
|
||||||
use proxmox::api::router::{Router, SubdirMap};
|
use proxmox::api::router::{Router, SubdirMap};
|
||||||
use proxmox::{sortable, identity};
|
use proxmox::api::{api, Permission, RpcEnvironment};
|
||||||
use proxmox::{http_err, list_subdirs_api_method};
|
use proxmox::{http_err, list_subdirs_api_method};
|
||||||
|
use proxmox::{identity, sortable};
|
||||||
|
|
||||||
use crate::tools::ticket::{self, Empty, Ticket};
|
|
||||||
use crate::auth_helpers::*;
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
|
use crate::auth_helpers::*;
|
||||||
|
use crate::server::ticket::ApiTicket;
|
||||||
|
use crate::tools::ticket::{self, Empty, Ticket};
|
||||||
|
|
||||||
use crate::config::acl as acl_config;
|
use crate::config::acl as acl_config;
|
||||||
use crate::config::acl::{PRIVILEGES, PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
|
use crate::config::acl::{PRIVILEGES, PRIV_PERMISSIONS_MODIFY, PRIV_SYS_AUDIT};
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
|
use crate::config::tfa::TfaChallenge;
|
||||||
|
|
||||||
pub mod user;
|
|
||||||
pub mod domain;
|
|
||||||
pub mod acl;
|
pub mod acl;
|
||||||
|
pub mod domain;
|
||||||
pub mod role;
|
pub mod role;
|
||||||
|
pub mod tfa;
|
||||||
|
pub mod user;
|
||||||
|
|
||||||
|
#[allow(clippy::large_enum_variant)]
|
||||||
|
enum AuthResult {
|
||||||
|
/// Successful authentication which does not require a new ticket.
|
||||||
|
Success,
|
||||||
|
|
||||||
|
/// Successful authentication which requires a ticket to be created.
|
||||||
|
CreateTicket,
|
||||||
|
|
||||||
|
/// A partial ticket which requires a 2nd factor will be created.
|
||||||
|
Partial(TfaChallenge),
|
||||||
|
}
|
||||||
|
|
||||||
/// returns Ok(true) if a ticket has to be created
|
|
||||||
/// and Ok(false) if not
|
|
||||||
fn authenticate_user(
|
fn authenticate_user(
|
||||||
userid: &Userid,
|
userid: &Userid,
|
||||||
password: &str,
|
password: &str,
|
||||||
path: Option<String>,
|
path: Option<String>,
|
||||||
privs: Option<String>,
|
privs: Option<String>,
|
||||||
port: Option<u16>,
|
port: Option<u16>,
|
||||||
) -> Result<bool, Error> {
|
tfa_challenge: Option<String>,
|
||||||
|
) -> Result<AuthResult, Error> {
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let auth_id = Authid::from(userid.clone());
|
let auth_id = Authid::from(userid.clone());
|
||||||
@ -38,12 +54,16 @@ fn authenticate_user(
|
|||||||
bail!("user account disabled or expired.");
|
bail!("user account disabled or expired.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(tfa_challenge) = tfa_challenge {
|
||||||
|
return authenticate_2nd(userid, &tfa_challenge, password);
|
||||||
|
}
|
||||||
|
|
||||||
if password.starts_with("PBS:") {
|
if password.starts_with("PBS:") {
|
||||||
if let Ok(ticket_userid) = Ticket::<Userid>::parse(password)
|
if let Ok(ticket_userid) = Ticket::<Userid>::parse(password)
|
||||||
.and_then(|ticket| ticket.verify(public_auth_key(), "PBS", None))
|
.and_then(|ticket| ticket.verify(public_auth_key(), "PBS", None))
|
||||||
{
|
{
|
||||||
if *userid == ticket_userid {
|
if *userid == ticket_userid {
|
||||||
return Ok(true);
|
return Ok(AuthResult::CreateTicket);
|
||||||
}
|
}
|
||||||
bail!("ticket login failed - wrong userid");
|
bail!("ticket login failed - wrong userid");
|
||||||
}
|
}
|
||||||
@ -53,17 +73,17 @@ fn authenticate_user(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let path = path.ok_or_else(|| format_err!("missing path for termproxy ticket"))?;
|
let path = path.ok_or_else(|| format_err!("missing path for termproxy ticket"))?;
|
||||||
let privilege_name = privs
|
let privilege_name =
|
||||||
.ok_or_else(|| format_err!("missing privilege name for termproxy ticket"))?;
|
privs.ok_or_else(|| format_err!("missing privilege name for termproxy ticket"))?;
|
||||||
let port = port.ok_or_else(|| format_err!("missing port for termproxy ticket"))?;
|
let port = port.ok_or_else(|| format_err!("missing port for termproxy ticket"))?;
|
||||||
|
|
||||||
if let Ok(Empty) = Ticket::parse(password)
|
if let Ok(Empty) = Ticket::parse(password).and_then(|ticket| {
|
||||||
.and_then(|ticket| ticket.verify(
|
ticket.verify(
|
||||||
public_auth_key(),
|
public_auth_key(),
|
||||||
ticket::TERM_PREFIX,
|
ticket::TERM_PREFIX,
|
||||||
Some(&ticket::term_aad(userid, &path, port)),
|
Some(&ticket::term_aad(userid, &path, port)),
|
||||||
))
|
)
|
||||||
{
|
}) {
|
||||||
for (name, privilege) in PRIVILEGES {
|
for (name, privilege) in PRIVILEGES {
|
||||||
if *name == privilege_name {
|
if *name == privilege_name {
|
||||||
let mut path_vec = Vec::new();
|
let mut path_vec = Vec::new();
|
||||||
@ -73,7 +93,7 @@ fn authenticate_user(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
user_info.check_privs(&auth_id, &path_vec, *privilege, false)?;
|
user_info.check_privs(&auth_id, &path_vec, *privilege, false)?;
|
||||||
return Ok(false);
|
return Ok(AuthResult::Success);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -81,8 +101,26 @@ fn authenticate_user(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let _ = crate::auth::authenticate_user(userid, password)?;
|
let _: () = crate::auth::authenticate_user(userid, password)?;
|
||||||
Ok(true)
|
|
||||||
|
Ok(match crate::config::tfa::login_challenge(userid)? {
|
||||||
|
None => AuthResult::CreateTicket,
|
||||||
|
Some(challenge) => AuthResult::Partial(challenge),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn authenticate_2nd(
|
||||||
|
userid: &Userid,
|
||||||
|
challenge_ticket: &str,
|
||||||
|
response: &str,
|
||||||
|
) -> Result<AuthResult, Error> {
|
||||||
|
let challenge: TfaChallenge = Ticket::<ApiTicket>::parse(&challenge_ticket)?
|
||||||
|
.verify_with_time_frame(public_auth_key(), "PBS", Some(userid.as_str()), -60..600)?
|
||||||
|
.require_partial()?;
|
||||||
|
|
||||||
|
let _: () = crate::config::tfa::verify_challenge(userid, &challenge, response.parse()?)?;
|
||||||
|
|
||||||
|
Ok(AuthResult::CreateTicket)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -109,6 +147,11 @@ fn authenticate_user(
|
|||||||
description: "Port for verifying terminal tickets.",
|
description: "Port for verifying terminal tickets.",
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
"tfa-challenge": {
|
||||||
|
type: String,
|
||||||
|
description: "The signed TFA challenge string the user wants to respond to.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
@ -123,7 +166,9 @@ fn authenticate_user(
|
|||||||
},
|
},
|
||||||
CSRFPreventionToken: {
|
CSRFPreventionToken: {
|
||||||
type: String,
|
type: String,
|
||||||
description: "Cross Site Request Forgery Prevention Token.",
|
description:
|
||||||
|
"Cross Site Request Forgery Prevention Token. \
|
||||||
|
For partial tickets this is the string \"invalid\".",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -135,21 +180,24 @@ fn authenticate_user(
|
|||||||
/// Create or verify authentication ticket.
|
/// Create or verify authentication ticket.
|
||||||
///
|
///
|
||||||
/// Returns: An authentication ticket with additional infos.
|
/// Returns: An authentication ticket with additional infos.
|
||||||
fn create_ticket(
|
pub fn create_ticket(
|
||||||
username: Userid,
|
username: Userid,
|
||||||
password: String,
|
password: String,
|
||||||
path: Option<String>,
|
path: Option<String>,
|
||||||
privs: Option<String>,
|
privs: Option<String>,
|
||||||
port: Option<u16>,
|
port: Option<u16>,
|
||||||
|
tfa_challenge: Option<String>,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
match authenticate_user(&username, &password, path, privs, port) {
|
match authenticate_user(&username, &password, path, privs, port, tfa_challenge) {
|
||||||
Ok(true) => {
|
Ok(AuthResult::Success) => Ok(json!({ "username": username })),
|
||||||
let ticket = Ticket::new("PBS", &username)?.sign(private_auth_key(), None)?;
|
Ok(AuthResult::CreateTicket) => {
|
||||||
|
let api_ticket = ApiTicket::full(username.clone());
|
||||||
|
let ticket = Ticket::new("PBS", &api_ticket)?.sign(private_auth_key(), None)?;
|
||||||
let token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
let token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
||||||
|
|
||||||
crate::server::rest::auth_logger()?.log(format!("successful auth for user '{}'", username));
|
crate::server::rest::auth_logger()?
|
||||||
|
.log(format!("successful auth for user '{}'", username));
|
||||||
|
|
||||||
Ok(json!({
|
Ok(json!({
|
||||||
"username": username,
|
"username": username,
|
||||||
@ -157,9 +205,16 @@ fn create_ticket(
|
|||||||
"CSRFPreventionToken": token,
|
"CSRFPreventionToken": token,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
Ok(false) => Ok(json!({
|
Ok(AuthResult::Partial(challenge)) => {
|
||||||
|
let api_ticket = ApiTicket::partial(challenge);
|
||||||
|
let ticket = Ticket::new("PBS", &api_ticket)?
|
||||||
|
.sign(private_auth_key(), Some(username.as_str()))?;
|
||||||
|
Ok(json!({
|
||||||
"username": username,
|
"username": username,
|
||||||
})),
|
"ticket": ticket,
|
||||||
|
"CSRFPreventionToken": "invalid",
|
||||||
|
}))
|
||||||
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let client_ip = match rpcenv.get_client_ip().map(|addr| addr.ip()) {
|
let client_ip = match rpcenv.get_client_ip().map(|addr| addr.ip()) {
|
||||||
Some(ip) => format!("{}", ip),
|
Some(ip) => format!("{}", ip),
|
||||||
@ -181,6 +236,7 @@ fn create_ticket(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
|
protected: true,
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
userid: {
|
userid: {
|
||||||
@ -192,36 +248,42 @@ fn create_ticket(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
description: "Anybody is allowed to change there own password. In addition, users with 'Permissions:Modify' privilege may change any password.",
|
description: "Everybody is allowed to change their own password. In addition, users with 'Permissions:Modify' privilege may change any password on @pbs realm.",
|
||||||
permission: &Permission::Anybody,
|
permission: &Permission::Anybody,
|
||||||
},
|
},
|
||||||
|
|
||||||
)]
|
)]
|
||||||
/// Change user password
|
/// Change user password
|
||||||
///
|
///
|
||||||
/// Each user is allowed to change his own password. Superuser
|
/// Each user is allowed to change his own password. Superuser
|
||||||
/// can change all passwords.
|
/// can change all passwords.
|
||||||
fn change_password(
|
pub fn change_password(
|
||||||
userid: Userid,
|
userid: Userid,
|
||||||
password: String,
|
password: String,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
let current_auth: Authid = rpcenv
|
||||||
let current_user: Userid = rpcenv
|
|
||||||
.get_auth_id()
|
.get_auth_id()
|
||||||
.ok_or_else(|| format_err!("unknown user"))?
|
.ok_or_else(|| format_err!("no authid available"))?
|
||||||
.parse()?;
|
.parse()?;
|
||||||
let current_auth = Authid::from(current_user.clone());
|
|
||||||
|
|
||||||
let mut allowed = userid == current_user;
|
if current_auth.is_token() {
|
||||||
|
bail!("API tokens cannot access this API endpoint");
|
||||||
|
}
|
||||||
|
|
||||||
if userid == "root@pam" { allowed = true; }
|
let current_user = current_auth.user();
|
||||||
|
|
||||||
|
let mut allowed = userid == *current_user;
|
||||||
|
|
||||||
if !allowed {
|
if !allowed {
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let privs = user_info.lookup_privs(¤t_auth, &[]);
|
let privs = user_info.lookup_privs(¤t_auth, &[]);
|
||||||
if (privs & PRIV_PERMISSIONS_MODIFY) != 0 { allowed = true; }
|
if user_info.is_superuser(¤t_auth) {
|
||||||
|
allowed = true;
|
||||||
}
|
}
|
||||||
|
if (privs & PRIV_PERMISSIONS_MODIFY) != 0 && userid.realm() != "pam" {
|
||||||
|
allowed = true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
if !allowed {
|
if !allowed {
|
||||||
bail!("you are not authorized to change the password.");
|
bail!("you are not authorized to change the password.");
|
||||||
@ -270,33 +332,26 @@ pub fn list_permissions(
|
|||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let user_privs = user_info.lookup_privs(¤t_auth_id, &["access"]);
|
let user_privs = user_info.lookup_privs(¤t_auth_id, &["access"]);
|
||||||
|
|
||||||
let auth_id = if user_privs & PRIV_SYS_AUDIT == 0 {
|
let auth_id = match auth_id {
|
||||||
match auth_id {
|
Some(auth_id) if auth_id == current_auth_id => current_auth_id,
|
||||||
Some(auth_id) => {
|
Some(auth_id) => {
|
||||||
if auth_id == current_auth_id {
|
if user_privs & PRIV_SYS_AUDIT != 0
|
||||||
auth_id
|
|| (auth_id.is_token()
|
||||||
} else if auth_id.is_token()
|
|
||||||
&& !current_auth_id.is_token()
|
&& !current_auth_id.is_token()
|
||||||
&& auth_id.user() == current_auth_id.user() {
|
&& auth_id.user() == current_auth_id.user())
|
||||||
|
{
|
||||||
auth_id
|
auth_id
|
||||||
} else {
|
} else {
|
||||||
bail!("not allowed to list permissions of {}", auth_id);
|
bail!("not allowed to list permissions of {}", auth_id);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
None => current_auth_id,
|
None => current_auth_id,
|
||||||
}
|
|
||||||
} else {
|
|
||||||
match auth_id {
|
|
||||||
Some(auth_id) => auth_id,
|
|
||||||
None => current_auth_id,
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
fn populate_acl_paths(
|
fn populate_acl_paths(
|
||||||
mut paths: HashSet<String>,
|
mut paths: HashSet<String>,
|
||||||
node: acl_config::AclTreeNode,
|
node: acl_config::AclTreeNode,
|
||||||
path: &str
|
path: &str,
|
||||||
) -> HashSet<String> {
|
) -> HashSet<String> {
|
||||||
for (sub_path, child_node) in node.children {
|
for (sub_path, child_node) in node.children {
|
||||||
let sub_path = format!("{}/{}", path, &sub_path);
|
let sub_path = format!("{}/{}", path, &sub_path);
|
||||||
@ -311,7 +366,7 @@ pub fn list_permissions(
|
|||||||
let mut paths = HashSet::new();
|
let mut paths = HashSet::new();
|
||||||
paths.insert(path);
|
paths.insert(path);
|
||||||
paths
|
paths
|
||||||
},
|
}
|
||||||
None => {
|
None => {
|
||||||
let mut paths = HashSet::new();
|
let mut paths = HashSet::new();
|
||||||
|
|
||||||
@ -326,31 +381,35 @@ pub fn list_permissions(
|
|||||||
paths.insert("/system".to_string());
|
paths.insert("/system".to_string());
|
||||||
|
|
||||||
paths
|
paths
|
||||||
},
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let map = paths
|
let map = paths.into_iter().fold(
|
||||||
.into_iter()
|
HashMap::new(),
|
||||||
.fold(HashMap::new(), |mut map: HashMap<String, HashMap<String, bool>>, path: String| {
|
|mut map: HashMap<String, HashMap<String, bool>>, path: String| {
|
||||||
let split_path = acl_config::split_acl_path(path.as_str());
|
let split_path = acl_config::split_acl_path(path.as_str());
|
||||||
let (privs, propagated_privs) = user_info.lookup_privs_details(&auth_id, &split_path);
|
let (privs, propagated_privs) = user_info.lookup_privs_details(&auth_id, &split_path);
|
||||||
|
|
||||||
match privs {
|
match privs {
|
||||||
0 => map, // Don't leak ACL paths where we don't have any privileges
|
0 => map, // Don't leak ACL paths where we don't have any privileges
|
||||||
_ => {
|
_ => {
|
||||||
let priv_map = PRIVILEGES
|
let priv_map =
|
||||||
|
PRIVILEGES
|
||||||
.iter()
|
.iter()
|
||||||
.fold(HashMap::new(), |mut priv_map, (name, value)| {
|
.fold(HashMap::new(), |mut priv_map, (name, value)| {
|
||||||
if value & privs != 0 {
|
if value & privs != 0 {
|
||||||
priv_map.insert(name.to_string(), value & propagated_privs != 0);
|
priv_map
|
||||||
|
.insert(name.to_string(), value & propagated_privs != 0);
|
||||||
}
|
}
|
||||||
priv_map
|
priv_map
|
||||||
});
|
});
|
||||||
|
|
||||||
map.insert(path, priv_map);
|
map.insert(path, priv_map);
|
||||||
map
|
map
|
||||||
|
}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
}});
|
);
|
||||||
|
|
||||||
Ok(map)
|
Ok(map)
|
||||||
}
|
}
|
||||||
@ -358,21 +417,16 @@ pub fn list_permissions(
|
|||||||
#[sortable]
|
#[sortable]
|
||||||
const SUBDIRS: SubdirMap = &sorted!([
|
const SUBDIRS: SubdirMap = &sorted!([
|
||||||
("acl", &acl::ROUTER),
|
("acl", &acl::ROUTER),
|
||||||
|
("password", &Router::new().put(&API_METHOD_CHANGE_PASSWORD)),
|
||||||
(
|
(
|
||||||
"password", &Router::new()
|
"permissions",
|
||||||
.put(&API_METHOD_CHANGE_PASSWORD)
|
&Router::new().get(&API_METHOD_LIST_PERMISSIONS)
|
||||||
),
|
|
||||||
(
|
|
||||||
"permissions", &Router::new()
|
|
||||||
.get(&API_METHOD_LIST_PERMISSIONS)
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"ticket", &Router::new()
|
|
||||||
.post(&API_METHOD_CREATE_TICKET)
|
|
||||||
),
|
),
|
||||||
|
("ticket", &Router::new().post(&API_METHOD_CREATE_TICKET)),
|
||||||
("domains", &domain::ROUTER),
|
("domains", &domain::ROUTER),
|
||||||
("roles", &role::ROUTER),
|
("roles", &role::ROUTER),
|
||||||
("users", &user::ROUTER),
|
("users", &user::ROUTER),
|
||||||
|
("tfa", &tfa::ROUTER),
|
||||||
]);
|
]);
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
|
//! Manage Access Control Lists
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use ::serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||||
use proxmox::tools::fs::open_file_locked;
|
use proxmox::tools::fs::open_file_locked;
|
||||||
@ -9,36 +10,6 @@ use crate::config::acl;
|
|||||||
use crate::config::acl::{Role, PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
|
use crate::config::acl::{Role, PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
propagate: {
|
|
||||||
schema: ACL_PROPAGATE_SCHEMA,
|
|
||||||
},
|
|
||||||
path: {
|
|
||||||
schema: ACL_PATH_SCHEMA,
|
|
||||||
},
|
|
||||||
ugid_type: {
|
|
||||||
schema: ACL_UGID_TYPE_SCHEMA,
|
|
||||||
},
|
|
||||||
ugid: {
|
|
||||||
type: String,
|
|
||||||
description: "User or Group ID.",
|
|
||||||
},
|
|
||||||
roleid: {
|
|
||||||
type: Role,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
/// ACL list entry.
|
|
||||||
pub struct AclListItem {
|
|
||||||
path: String,
|
|
||||||
ugid: String,
|
|
||||||
ugid_type: String,
|
|
||||||
propagate: bool,
|
|
||||||
roleid: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn extract_acl_node_data(
|
fn extract_acl_node_data(
|
||||||
node: &acl::AclTreeNode,
|
node: &acl::AclTreeNode,
|
||||||
path: &str,
|
path: &str,
|
||||||
@ -72,7 +43,7 @@ fn extract_acl_node_data(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (group, roles) in &node.groups {
|
for (group, roles) in &node.groups {
|
||||||
if let Some(_) = token_user {
|
if token_user.is_some() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -194,6 +165,7 @@ pub fn read_acl(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Update Access Control List (ACLs).
|
/// Update Access Control List (ACLs).
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn update_acl(
|
pub fn update_acl(
|
||||||
path: String,
|
path: String,
|
||||||
role: String,
|
role: String,
|
||||||
@ -210,7 +182,7 @@ pub fn update_acl(
|
|||||||
|
|
||||||
let top_level_privs = user_info.lookup_privs(¤t_auth_id, &["access", "acl"]);
|
let top_level_privs = user_info.lookup_privs(¤t_auth_id, &["access", "acl"]);
|
||||||
if top_level_privs & PRIV_PERMISSIONS_MODIFY == 0 {
|
if top_level_privs & PRIV_PERMISSIONS_MODIFY == 0 {
|
||||||
if let Some(_) = group {
|
if group.is_some() {
|
||||||
bail!("Unprivileged users are not allowed to create group ACL item.");
|
bail!("Unprivileged users are not allowed to create group ACL item.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//! List Authentication domains/realms
|
||||||
|
|
||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
|
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//! Manage Roles with privileges
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
|
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
@ -46,7 +48,7 @@ fn list_roles() -> Result<Value, Error> {
|
|||||||
let mut priv_list = Vec::new();
|
let mut priv_list = Vec::new();
|
||||||
for (name, privilege) in PRIVILEGES.iter() {
|
for (name, privilege) in PRIVILEGES.iter() {
|
||||||
if privs & privilege > 0 {
|
if privs & privilege > 0 {
|
||||||
priv_list.push(name.clone());
|
priv_list.push(name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
list.push(json!({ "roleid": role, "privs": priv_list, "comment": comment }));
|
list.push(json!({ "roleid": role, "privs": priv_list, "comment": comment }));
|
||||||
|
594
src/api2/access/tfa.rs
Normal file
594
src/api2/access/tfa.rs
Normal file
@ -0,0 +1,594 @@
|
|||||||
|
//! Two Factor Authentication
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::{api, Permission, Router, RpcEnvironment};
|
||||||
|
use proxmox::tools::tfa::totp::Totp;
|
||||||
|
use proxmox::{http_bail, http_err};
|
||||||
|
|
||||||
|
use crate::api2::types::{Authid, Userid, PASSWORD_SCHEMA};
|
||||||
|
use crate::config::acl::{PRIV_PERMISSIONS_MODIFY, PRIV_SYS_AUDIT};
|
||||||
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
|
use crate::config::tfa::{TfaInfo, TfaUserData};
|
||||||
|
|
||||||
|
/// Perform first-factor (password) authentication only. Ignore password for the root user.
|
||||||
|
/// Otherwise check the current user's password.
|
||||||
|
///
|
||||||
|
/// This means that user admins need to type in their own password while editing a user, and
|
||||||
|
/// regular users, which can only change their own TFA settings (checked at the API level), can
|
||||||
|
/// change their own settings using their own password.
|
||||||
|
fn tfa_update_auth(
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
userid: &Userid,
|
||||||
|
password: Option<String>,
|
||||||
|
must_exist: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let authid: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
if authid.user() != Userid::root_userid() {
|
||||||
|
let password = password.ok_or_else(|| http_err!(UNAUTHORIZED, "missing password"))?;
|
||||||
|
let _: () = crate::auth::authenticate_user(authid.user(), &password)
|
||||||
|
.map_err(|err| http_err!(UNAUTHORIZED, "{}", err))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// After authentication, verify that the to-be-modified user actually exists:
|
||||||
|
if must_exist && authid.user() != userid {
|
||||||
|
let (config, _digest) = crate::config::user::config()?;
|
||||||
|
|
||||||
|
if config
|
||||||
|
.lookup::<crate::config::user::User>("user", userid.as_str())
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
http_bail!(UNAUTHORIZED, "user '{}' does not exists.", userid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api]
|
||||||
|
/// A TFA entry type.
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
enum TfaType {
|
||||||
|
/// A TOTP entry type.
|
||||||
|
Totp,
|
||||||
|
/// A U2F token entry.
|
||||||
|
U2f,
|
||||||
|
/// A Webauthn token entry.
|
||||||
|
Webauthn,
|
||||||
|
/// Recovery tokens.
|
||||||
|
Recovery,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
type: { type: TfaType },
|
||||||
|
info: { type: TfaInfo },
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// A TFA entry for a user.
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
|
struct TypedTfaInfo {
|
||||||
|
#[serde(rename = "type")]
|
||||||
|
pub ty: TfaType,
|
||||||
|
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub info: TfaInfo,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_data(data: TfaUserData) -> Vec<TypedTfaInfo> {
|
||||||
|
let mut out = Vec::with_capacity(
|
||||||
|
data.totp.len()
|
||||||
|
+ data.u2f.len()
|
||||||
|
+ data.webauthn.len()
|
||||||
|
+ if data.recovery().is_some() { 1 } else { 0 },
|
||||||
|
);
|
||||||
|
if let Some(recovery) = data.recovery() {
|
||||||
|
out.push(TypedTfaInfo {
|
||||||
|
ty: TfaType::Recovery,
|
||||||
|
info: TfaInfo::recovery(recovery.created),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for entry in data.totp {
|
||||||
|
out.push(TypedTfaInfo {
|
||||||
|
ty: TfaType::Totp,
|
||||||
|
info: entry.info,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
for entry in data.webauthn {
|
||||||
|
out.push(TypedTfaInfo {
|
||||||
|
ty: TfaType::Webauthn,
|
||||||
|
info: entry.info,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
for entry in data.u2f {
|
||||||
|
out.push(TypedTfaInfo {
|
||||||
|
ty: TfaType::U2f,
|
||||||
|
info: entry.info,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterate through tuples of `(type, index, id)`.
|
||||||
|
fn tfa_id_iter(data: &TfaUserData) -> impl Iterator<Item = (TfaType, usize, &str)> {
|
||||||
|
data.totp
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, entry)| (TfaType::Totp, i, entry.info.id.as_str()))
|
||||||
|
.chain(
|
||||||
|
data.webauthn
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, entry)| (TfaType::Webauthn, i, entry.info.id.as_str())),
|
||||||
|
)
|
||||||
|
.chain(
|
||||||
|
data.u2f
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, entry)| (TfaType::U2f, i, entry.info.id.as_str())),
|
||||||
|
)
|
||||||
|
.chain(
|
||||||
|
data.recovery
|
||||||
|
.iter()
|
||||||
|
.map(|_| (TfaType::Recovery, 0, "recovery")),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: { userid: { type: Userid } },
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Or(&[
|
||||||
|
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||||
|
&Permission::UserParam("userid"),
|
||||||
|
]),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Add a TOTP secret to the user.
|
||||||
|
fn list_user_tfa(userid: Userid) -> Result<Vec<TypedTfaInfo>, Error> {
|
||||||
|
let _lock = crate::config::tfa::read_lock()?;
|
||||||
|
|
||||||
|
Ok(match crate::config::tfa::read()?.users.remove(&userid) {
|
||||||
|
Some(data) => to_data(data),
|
||||||
|
None => Vec::new(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
userid: { type: Userid },
|
||||||
|
id: { description: "the tfa entry id" }
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Or(&[
|
||||||
|
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||||
|
&Permission::UserParam("userid"),
|
||||||
|
]),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get a single TFA entry.
|
||||||
|
fn get_tfa_entry(userid: Userid, id: String) -> Result<TypedTfaInfo, Error> {
|
||||||
|
let _lock = crate::config::tfa::read_lock()?;
|
||||||
|
|
||||||
|
if let Some(user_data) = crate::config::tfa::read()?.users.remove(&userid) {
|
||||||
|
match {
|
||||||
|
// scope to prevent the temprary iter from borrowing across the whole match
|
||||||
|
let entry = tfa_id_iter(&user_data).find(|(_ty, _index, entry_id)| id == *entry_id);
|
||||||
|
entry.map(|(ty, index, _)| (ty, index))
|
||||||
|
} {
|
||||||
|
Some((TfaType::Recovery, _)) => {
|
||||||
|
if let Some(recovery) = user_data.recovery() {
|
||||||
|
return Ok(TypedTfaInfo {
|
||||||
|
ty: TfaType::Recovery,
|
||||||
|
info: TfaInfo::recovery(recovery.created),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some((TfaType::Totp, index)) => {
|
||||||
|
return Ok(TypedTfaInfo {
|
||||||
|
ty: TfaType::Totp,
|
||||||
|
// `into_iter().nth()` to *move* out of it
|
||||||
|
info: user_data.totp.into_iter().nth(index).unwrap().info,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Some((TfaType::Webauthn, index)) => {
|
||||||
|
return Ok(TypedTfaInfo {
|
||||||
|
ty: TfaType::Webauthn,
|
||||||
|
info: user_data.webauthn.into_iter().nth(index).unwrap().info,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Some((TfaType::U2f, index)) => {
|
||||||
|
return Ok(TypedTfaInfo {
|
||||||
|
ty: TfaType::U2f,
|
||||||
|
info: user_data.u2f.into_iter().nth(index).unwrap().info,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
None => (),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
http_bail!(NOT_FOUND, "no such tfa entry: {}/{}", userid, id);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
userid: { type: Userid },
|
||||||
|
id: {
|
||||||
|
description: "the tfa entry id",
|
||||||
|
},
|
||||||
|
password: {
|
||||||
|
schema: PASSWORD_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Or(&[
|
||||||
|
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||||
|
&Permission::UserParam("userid"),
|
||||||
|
]),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get a single TFA entry.
|
||||||
|
fn delete_tfa(
|
||||||
|
userid: Userid,
|
||||||
|
id: String,
|
||||||
|
password: Option<String>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
tfa_update_auth(rpcenv, &userid, password, false)?;
|
||||||
|
|
||||||
|
let _lock = crate::config::tfa::write_lock()?;
|
||||||
|
|
||||||
|
let mut data = crate::config::tfa::read()?;
|
||||||
|
|
||||||
|
let user_data = data
|
||||||
|
.users
|
||||||
|
.get_mut(&userid)
|
||||||
|
.ok_or_else(|| http_err!(NOT_FOUND, "no such entry: {}/{}", userid, id))?;
|
||||||
|
|
||||||
|
match {
|
||||||
|
// scope to prevent the temprary iter from borrowing across the whole match
|
||||||
|
let entry = tfa_id_iter(&user_data).find(|(_, _, entry_id)| id == *entry_id);
|
||||||
|
entry.map(|(ty, index, _)| (ty, index))
|
||||||
|
} {
|
||||||
|
Some((TfaType::Recovery, _)) => user_data.recovery = None,
|
||||||
|
Some((TfaType::Totp, index)) => drop(user_data.totp.remove(index)),
|
||||||
|
Some((TfaType::Webauthn, index)) => drop(user_data.webauthn.remove(index)),
|
||||||
|
Some((TfaType::U2f, index)) => drop(user_data.u2f.remove(index)),
|
||||||
|
None => http_bail!(NOT_FOUND, "no such tfa entry: {}/{}", userid, id),
|
||||||
|
}
|
||||||
|
|
||||||
|
if user_data.is_empty() {
|
||||||
|
data.users.remove(&userid);
|
||||||
|
}
|
||||||
|
|
||||||
|
crate::config::tfa::write(&data)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"userid": { type: Userid },
|
||||||
|
"entries": {
|
||||||
|
type: Array,
|
||||||
|
items: { type: TypedTfaInfo },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
|
/// Over the API we only provide the descriptions for TFA data.
|
||||||
|
struct TfaUser {
|
||||||
|
/// The user this entry belongs to.
|
||||||
|
userid: Userid,
|
||||||
|
|
||||||
|
/// TFA entries.
|
||||||
|
entries: Vec<TypedTfaInfo>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
description: "Returns all or just the logged-in user, depending on privileges.",
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "The list tuples of user and TFA entries.",
|
||||||
|
type: Array,
|
||||||
|
items: { type: TfaUser }
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List user TFA configuration.
|
||||||
|
fn list_tfa(rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<TfaUser>, Error> {
|
||||||
|
let authid: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
let top_level_privs = user_info.lookup_privs(&authid, &["access", "users"]);
|
||||||
|
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
|
||||||
|
|
||||||
|
let _lock = crate::config::tfa::read_lock()?;
|
||||||
|
let tfa_data = crate::config::tfa::read()?.users;
|
||||||
|
|
||||||
|
let mut out = Vec::<TfaUser>::new();
|
||||||
|
if top_level_allowed {
|
||||||
|
for (user, data) in tfa_data {
|
||||||
|
out.push(TfaUser {
|
||||||
|
userid: user,
|
||||||
|
entries: to_data(data),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} else if let Some(data) = { tfa_data }.remove(authid.user()) {
|
||||||
|
out.push(TfaUser {
|
||||||
|
userid: authid.into(),
|
||||||
|
entries: to_data(data),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
recovery: {
|
||||||
|
description: "A list of recovery codes as integers.",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: Integer,
|
||||||
|
description: "A one-time usable recovery code entry.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// The result returned when adding TFA entries to a user.
|
||||||
|
#[derive(Default, Serialize)]
|
||||||
|
struct TfaUpdateInfo {
|
||||||
|
/// The id if a newly added TFA entry.
|
||||||
|
id: Option<String>,
|
||||||
|
|
||||||
|
/// When adding u2f entries, this contains a challenge the user must respond to in order to
|
||||||
|
/// finish the registration.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
challenge: Option<String>,
|
||||||
|
|
||||||
|
/// When adding recovery codes, this contains the list of codes to be displayed to the user
|
||||||
|
/// this one time.
|
||||||
|
#[serde(skip_serializing_if = "Vec::is_empty", default)]
|
||||||
|
recovery: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TfaUpdateInfo {
|
||||||
|
fn id(id: String) -> Self {
|
||||||
|
Self {
|
||||||
|
id: Some(id),
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
userid: { type: Userid },
|
||||||
|
description: {
|
||||||
|
description: "A description to distinguish multiple entries from one another",
|
||||||
|
type: String,
|
||||||
|
max_length: 255,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"type": { type: TfaType },
|
||||||
|
totp: {
|
||||||
|
description: "A totp URI.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
value: {
|
||||||
|
description:
|
||||||
|
"The current value for the provided totp URI, or a Webauthn/U2F challenge response",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
challenge: {
|
||||||
|
description: "When responding to a u2f challenge: the original challenge string",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
password: {
|
||||||
|
schema: PASSWORD_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: { type: TfaUpdateInfo },
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Or(&[
|
||||||
|
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||||
|
&Permission::UserParam("userid"),
|
||||||
|
]),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Add a TFA entry to the user.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
fn add_tfa_entry(
|
||||||
|
userid: Userid,
|
||||||
|
description: Option<String>,
|
||||||
|
totp: Option<String>,
|
||||||
|
value: Option<String>,
|
||||||
|
challenge: Option<String>,
|
||||||
|
password: Option<String>,
|
||||||
|
r#type: TfaType,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<TfaUpdateInfo, Error> {
|
||||||
|
tfa_update_auth(rpcenv, &userid, password, true)?;
|
||||||
|
|
||||||
|
let need_description =
|
||||||
|
move || description.ok_or_else(|| format_err!("'description' is required for new entries"));
|
||||||
|
|
||||||
|
match r#type {
|
||||||
|
TfaType::Totp => match (totp, value) {
|
||||||
|
(Some(totp), Some(value)) => {
|
||||||
|
if challenge.is_some() {
|
||||||
|
bail!("'challenge' parameter is invalid for 'totp' entries");
|
||||||
|
}
|
||||||
|
let description = need_description()?;
|
||||||
|
|
||||||
|
let totp: Totp = totp.parse()?;
|
||||||
|
if totp
|
||||||
|
.verify(&value, std::time::SystemTime::now(), -1..=1)?
|
||||||
|
.is_none()
|
||||||
|
{
|
||||||
|
bail!("failed to verify TOTP challenge");
|
||||||
|
}
|
||||||
|
crate::config::tfa::add_totp(&userid, description, totp).map(TfaUpdateInfo::id)
|
||||||
|
}
|
||||||
|
_ => bail!("'totp' type requires both 'totp' and 'value' parameters"),
|
||||||
|
},
|
||||||
|
TfaType::Webauthn => {
|
||||||
|
if totp.is_some() {
|
||||||
|
bail!("'totp' parameter is invalid for 'totp' entries");
|
||||||
|
}
|
||||||
|
|
||||||
|
match challenge {
|
||||||
|
None => crate::config::tfa::add_webauthn_registration(&userid, need_description()?)
|
||||||
|
.map(|c| TfaUpdateInfo {
|
||||||
|
challenge: Some(c),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
Some(challenge) => {
|
||||||
|
let value = value.ok_or_else(|| {
|
||||||
|
format_err!(
|
||||||
|
"missing 'value' parameter (webauthn challenge response missing)"
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
crate::config::tfa::finish_webauthn_registration(&userid, &challenge, &value)
|
||||||
|
.map(TfaUpdateInfo::id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
TfaType::U2f => {
|
||||||
|
if totp.is_some() {
|
||||||
|
bail!("'totp' parameter is invalid for 'totp' entries");
|
||||||
|
}
|
||||||
|
|
||||||
|
match challenge {
|
||||||
|
None => crate::config::tfa::add_u2f_registration(&userid, need_description()?).map(
|
||||||
|
|c| TfaUpdateInfo {
|
||||||
|
challenge: Some(c),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
),
|
||||||
|
Some(challenge) => {
|
||||||
|
let value = value.ok_or_else(|| {
|
||||||
|
format_err!("missing 'value' parameter (u2f challenge response missing)")
|
||||||
|
})?;
|
||||||
|
crate::config::tfa::finish_u2f_registration(&userid, &challenge, &value)
|
||||||
|
.map(TfaUpdateInfo::id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
TfaType::Recovery => {
|
||||||
|
if totp.or(value).or(challenge).is_some() {
|
||||||
|
bail!("generating recovery tokens does not allow additional parameters");
|
||||||
|
}
|
||||||
|
|
||||||
|
let recovery = crate::config::tfa::add_recovery(&userid)?;
|
||||||
|
|
||||||
|
Ok(TfaUpdateInfo {
|
||||||
|
id: Some("recovery".to_string()),
|
||||||
|
recovery,
|
||||||
|
..Default::default()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
userid: { type: Userid },
|
||||||
|
id: {
|
||||||
|
description: "the tfa entry id",
|
||||||
|
},
|
||||||
|
description: {
|
||||||
|
description: "A description to distinguish multiple entries from one another",
|
||||||
|
type: String,
|
||||||
|
max_length: 255,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
enable: {
|
||||||
|
description: "Whether this entry should currently be enabled or disabled",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
password: {
|
||||||
|
schema: PASSWORD_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Or(&[
|
||||||
|
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||||
|
&Permission::UserParam("userid"),
|
||||||
|
]),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update user's TFA entry description.
|
||||||
|
fn update_tfa_entry(
|
||||||
|
userid: Userid,
|
||||||
|
id: String,
|
||||||
|
description: Option<String>,
|
||||||
|
enable: Option<bool>,
|
||||||
|
password: Option<String>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
tfa_update_auth(rpcenv, &userid, password, true)?;
|
||||||
|
|
||||||
|
let _lock = crate::config::tfa::write_lock()?;
|
||||||
|
|
||||||
|
let mut data = crate::config::tfa::read()?;
|
||||||
|
|
||||||
|
let mut entry = data
|
||||||
|
.users
|
||||||
|
.get_mut(&userid)
|
||||||
|
.and_then(|user| user.find_entry_mut(&id))
|
||||||
|
.ok_or_else(|| http_err!(NOT_FOUND, "no such entry: {}/{}", userid, id))?;
|
||||||
|
|
||||||
|
if let Some(description) = description {
|
||||||
|
entry.description = description;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(enable) = enable {
|
||||||
|
entry.enable = enable;
|
||||||
|
}
|
||||||
|
|
||||||
|
crate::config::tfa::write(&data)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_TFA)
|
||||||
|
.match_all("userid", &USER_ROUTER);
|
||||||
|
|
||||||
|
const USER_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_USER_TFA)
|
||||||
|
.post(&API_METHOD_ADD_TFA_ENTRY)
|
||||||
|
.match_all("id", &ITEM_ROUTER);
|
||||||
|
|
||||||
|
const ITEM_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_GET_TFA_ENTRY)
|
||||||
|
.put(&API_METHOD_UPDATE_TFA_ENTRY)
|
||||||
|
.delete(&API_METHOD_DELETE_TFA);
|
@ -1,4 +1,6 @@
|
|||||||
use anyhow::{bail, Error};
|
//! User Management
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde::{Serialize, Deserialize};
|
use serde::{Serialize, Deserialize};
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
@ -94,7 +96,6 @@ impl UserWithTokens {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
@ -113,7 +114,7 @@ impl UserWithTokens {
|
|||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Anybody,
|
permission: &Permission::Anybody,
|
||||||
description: "Returns all or just the logged-in user, depending on privileges.",
|
description: "Returns all or just the logged-in user (/API token owner), depending on privileges.",
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// List users
|
/// List users
|
||||||
@ -125,9 +126,12 @@ pub fn list_users(
|
|||||||
|
|
||||||
let (config, digest) = user::config()?;
|
let (config, digest) = user::config()?;
|
||||||
|
|
||||||
// intentionally user only for now
|
let auth_id: Authid = rpcenv
|
||||||
let userid: Userid = rpcenv.get_auth_id().unwrap().parse()?;
|
.get_auth_id()
|
||||||
let auth_id = Authid::from(userid.clone());
|
.ok_or_else(|| format_err!("no authid available"))?
|
||||||
|
.parse()?;
|
||||||
|
|
||||||
|
let userid = auth_id.user();
|
||||||
|
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
@ -135,7 +139,7 @@ pub fn list_users(
|
|||||||
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
|
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
|
||||||
|
|
||||||
let filter_by_privs = |user: &user::User| {
|
let filter_by_privs = |user: &user::User| {
|
||||||
top_level_allowed || user.userid == userid
|
top_level_allowed || user.userid == *userid
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -167,7 +171,7 @@ pub fn list_users(
|
|||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
} else {
|
} else {
|
||||||
iter.map(|user: user::User| UserWithTokens::new(user))
|
iter.map(UserWithTokens::new)
|
||||||
.collect()
|
.collect()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -216,7 +220,11 @@ pub fn list_users(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Create new user.
|
/// Create new user.
|
||||||
pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error> {
|
pub fn create_user(
|
||||||
|
password: Option<String>,
|
||||||
|
param: Value,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
@ -224,17 +232,25 @@ pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error>
|
|||||||
|
|
||||||
let (mut config, _digest) = user::config()?;
|
let (mut config, _digest) = user::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(user.userid.as_str()) {
|
if config.sections.get(user.userid.as_str()).is_some() {
|
||||||
bail!("user '{}' already exists.", user.userid);
|
bail!("user '{}' already exists.", user.userid);
|
||||||
}
|
}
|
||||||
|
|
||||||
let authenticator = crate::auth::lookup_authenticator(&user.userid.realm())?;
|
|
||||||
|
|
||||||
config.set_data(user.userid.as_str(), "user", &user)?;
|
config.set_data(user.userid.as_str(), "user", &user)?;
|
||||||
|
|
||||||
|
let realm = user.userid.realm();
|
||||||
|
|
||||||
|
// Fails if realm does not exist!
|
||||||
|
let authenticator = crate::auth::lookup_authenticator(realm)?;
|
||||||
|
|
||||||
user::save_config(&config)?;
|
user::save_config(&config)?;
|
||||||
|
|
||||||
if let Some(password) = password {
|
if let Some(password) = password {
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
let current_auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
if realm == "pam" && !user_info.is_superuser(¤t_auth_id) {
|
||||||
|
bail!("only superuser can edit pam credentials!");
|
||||||
|
}
|
||||||
authenticator.store_password(user.userid.name(), &password)?;
|
authenticator.store_password(user.userid.name(), &password)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -249,10 +265,7 @@ pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error>
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: { type: user::User },
|
||||||
description: "The user configuration (with config digest).",
|
|
||||||
type: user::User,
|
|
||||||
},
|
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Or(&[
|
permission: &Permission::Or(&[
|
||||||
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
||||||
@ -340,6 +353,7 @@ pub enum DeletableProperty {
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Update user configuration.
|
/// Update user configuration.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn update_user(
|
pub fn update_user(
|
||||||
userid: Userid,
|
userid: Userid,
|
||||||
comment: Option<String>,
|
comment: Option<String>,
|
||||||
@ -351,6 +365,7 @@ pub fn update_user(
|
|||||||
email: Option<String>,
|
email: Option<String>,
|
||||||
delete: Option<Vec<DeletableProperty>>,
|
delete: Option<Vec<DeletableProperty>>,
|
||||||
digest: Option<String>,
|
digest: Option<String>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
@ -393,6 +408,13 @@ pub fn update_user(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(password) = password {
|
if let Some(password) = password {
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
let current_auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let self_service = current_auth_id.user() == &userid;
|
||||||
|
let target_realm = userid.realm();
|
||||||
|
if !self_service && target_realm == "pam" && !user_info.is_superuser(¤t_auth_id) {
|
||||||
|
bail!("only superuser can edit pam credentials!");
|
||||||
|
}
|
||||||
let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
|
let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
|
||||||
authenticator.store_password(userid.name(), &password)?;
|
authenticator.store_password(userid.name(), &password)?;
|
||||||
}
|
}
|
||||||
@ -438,6 +460,7 @@ pub fn update_user(
|
|||||||
/// Remove a user from the configuration file.
|
/// Remove a user from the configuration file.
|
||||||
pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
|
pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _tfa_lock = crate::config::tfa::write_lock()?;
|
||||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = user::config()?;
|
let (mut config, expected_digest) = user::config()?;
|
||||||
@ -454,6 +477,19 @@ pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error>
|
|||||||
|
|
||||||
user::save_config(&config)?;
|
user::save_config(&config)?;
|
||||||
|
|
||||||
|
match crate::config::tfa::read().and_then(|mut cfg| {
|
||||||
|
let _: bool = cfg.remove_user(&userid);
|
||||||
|
crate::config::tfa::write(&cfg)
|
||||||
|
}) {
|
||||||
|
Ok(()) => (),
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!(
|
||||||
|
"error updating TFA config after deleting user {:?}: {}",
|
||||||
|
userid, err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -468,10 +504,7 @@ pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error>
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: { type: user::ApiToken },
|
||||||
description: "Get API token metadata (with config digest).",
|
|
||||||
type: user::ApiToken,
|
|
||||||
},
|
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Or(&[
|
permission: &Permission::Or(&[
|
||||||
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
||||||
@ -565,7 +598,7 @@ pub fn generate_token(
|
|||||||
let tokenid = Authid::from((userid.clone(), Some(tokenname.clone())));
|
let tokenid = Authid::from((userid.clone(), Some(tokenname.clone())));
|
||||||
let tokenid_string = tokenid.to_string();
|
let tokenid_string = tokenid.to_string();
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&tokenid_string) {
|
if config.sections.get(&tokenid_string).is_some() {
|
||||||
bail!("token '{}' for user '{}' already exists.", tokenname.as_str(), userid);
|
bail!("token '{}' for user '{}' already exists.", tokenname.as_str(), userid);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -573,7 +606,7 @@ pub fn generate_token(
|
|||||||
token_shadow::set_secret(&tokenid, &secret)?;
|
token_shadow::set_secret(&tokenid, &secret)?;
|
||||||
|
|
||||||
let token = user::ApiToken {
|
let token = user::ApiToken {
|
||||||
tokenid: tokenid.clone(),
|
tokenid,
|
||||||
comment,
|
comment,
|
||||||
enable,
|
enable,
|
||||||
expire,
|
expire,
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//! Backup Server Administration
|
||||||
|
|
||||||
use proxmox::api::router::{Router, SubdirMap};
|
use proxmox::api::router::{Router, SubdirMap};
|
||||||
use proxmox::list_subdirs_api_method;
|
use proxmox::list_subdirs_api_method;
|
||||||
|
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
|
//! Datastore Management
|
||||||
|
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::ffi::OsStr;
|
use std::ffi::OsStr;
|
||||||
use std::os::unix::ffi::OsStrExt;
|
use std::os::unix::ffi::OsStrExt;
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
|
|
||||||
@ -10,12 +11,13 @@ use futures::*;
|
|||||||
use hyper::http::request::Parts;
|
use hyper::http::request::Parts;
|
||||||
use hyper::{header, Body, Response, StatusCode};
|
use hyper::{header, Body, Response, StatusCode};
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
use tokio_stream::wrappers::ReceiverStream;
|
||||||
|
|
||||||
use proxmox::api::{
|
use proxmox::api::{
|
||||||
api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
|
api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
|
||||||
RpcEnvironment, RpcEnvironmentType, Permission
|
RpcEnvironment, RpcEnvironmentType, Permission
|
||||||
};
|
};
|
||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::{ReturnType, SubdirMap};
|
||||||
use proxmox::api::schema::*;
|
use proxmox::api::schema::*;
|
||||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||||
use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
|
use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
|
||||||
@ -148,7 +150,7 @@ fn get_all_snapshot_files(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// List backup groups.
|
/// List backup groups.
|
||||||
fn list_groups(
|
pub fn list_groups(
|
||||||
store: String,
|
store: String,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<GroupListItem>, Error> {
|
) -> Result<Vec<GroupListItem>, Error> {
|
||||||
@ -298,7 +300,7 @@ pub fn list_snapshot_files(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Delete backup snapshot.
|
/// Delete backup snapshot.
|
||||||
fn delete_snapshot(
|
pub fn delete_snapshot(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: String,
|
backup_type: String,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
@ -439,8 +441,8 @@ pub fn list_snapshots (
|
|||||||
let files = info
|
let files = info
|
||||||
.files
|
.files
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|x| BackupContent {
|
.map(|filename| BackupContent {
|
||||||
filename: x.to_string(),
|
filename,
|
||||||
size: None,
|
size: None,
|
||||||
crypt_mode: None,
|
crypt_mode: None,
|
||||||
})
|
})
|
||||||
@ -661,25 +663,20 @@ pub fn verify(
|
|||||||
_ => bail!("parameters do not specify a backup group or snapshot"),
|
_ => bail!("parameters do not specify a backup group or snapshot"),
|
||||||
}
|
}
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
worker_type,
|
worker_type,
|
||||||
Some(worker_id.clone()),
|
Some(worker_id),
|
||||||
auth_id.clone(),
|
auth_id.clone(),
|
||||||
to_stdout,
|
to_stdout,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
|
||||||
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
|
||||||
|
|
||||||
let failed_dirs = if let Some(backup_dir) = backup_dir {
|
let failed_dirs = if let Some(backup_dir) = backup_dir {
|
||||||
let mut res = Vec::new();
|
let mut res = Vec::new();
|
||||||
if !verify_backup_dir(
|
if !verify_backup_dir(
|
||||||
datastore,
|
&verify_worker,
|
||||||
&backup_dir,
|
&backup_dir,
|
||||||
verified_chunks,
|
|
||||||
corrupt_chunks,
|
|
||||||
worker.clone(),
|
|
||||||
worker.upid().clone(),
|
worker.upid().clone(),
|
||||||
None,
|
None,
|
||||||
)? {
|
)? {
|
||||||
@ -687,13 +684,10 @@ pub fn verify(
|
|||||||
}
|
}
|
||||||
res
|
res
|
||||||
} else if let Some(backup_group) = backup_group {
|
} else if let Some(backup_group) = backup_group {
|
||||||
let (_count, failed_dirs) = verify_backup_group(
|
let failed_dirs = verify_backup_group(
|
||||||
datastore,
|
&verify_worker,
|
||||||
&backup_group,
|
&backup_group,
|
||||||
verified_chunks,
|
&mut StoreProgress::new(1),
|
||||||
corrupt_chunks,
|
|
||||||
None,
|
|
||||||
worker.clone(),
|
|
||||||
worker.upid(),
|
worker.upid(),
|
||||||
None,
|
None,
|
||||||
)?;
|
)?;
|
||||||
@ -708,9 +702,9 @@ pub fn verify(
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)?
|
verify_all_backups(&verify_worker, worker.upid(), owner, None)?
|
||||||
};
|
};
|
||||||
if failed_dirs.len() > 0 {
|
if !failed_dirs.is_empty() {
|
||||||
worker.log("Failed to verify the following snapshots/groups:");
|
worker.log("Failed to verify the following snapshots/groups:");
|
||||||
for dir in failed_dirs {
|
for dir in failed_dirs {
|
||||||
worker.log(format!("\t{}", dir));
|
worker.log(format!("\t{}", dir));
|
||||||
@ -772,7 +766,7 @@ pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
|
|||||||
&PruneListItem::API_SCHEMA
|
&PruneListItem::API_SCHEMA
|
||||||
).schema();
|
).schema();
|
||||||
|
|
||||||
const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
|
pub const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
|
||||||
&ApiHandler::Sync(&prune),
|
&ApiHandler::Sync(&prune),
|
||||||
&ObjectSchema::new(
|
&ObjectSchema::new(
|
||||||
"Prune the datastore.",
|
"Prune the datastore.",
|
||||||
@ -787,14 +781,14 @@ const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
|
|||||||
("store", false, &DATASTORE_SCHEMA),
|
("store", false, &DATASTORE_SCHEMA),
|
||||||
])
|
])
|
||||||
))
|
))
|
||||||
.returns(&API_RETURN_SCHEMA_PRUNE)
|
.returns(ReturnType::new(false, &API_RETURN_SCHEMA_PRUNE))
|
||||||
.access(None, &Permission::Privilege(
|
.access(None, &Permission::Privilege(
|
||||||
&["datastore", "{store}"],
|
&["datastore", "{store}"],
|
||||||
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
|
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
|
||||||
true)
|
true)
|
||||||
);
|
);
|
||||||
|
|
||||||
fn prune(
|
pub fn prune(
|
||||||
param: Value,
|
param: Value,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
@ -854,7 +848,7 @@ fn prune(
|
|||||||
|
|
||||||
|
|
||||||
// We use a WorkerTask just to have a task log, but run synchrounously
|
// We use a WorkerTask just to have a task log, but run synchrounously
|
||||||
let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?;
|
let worker = WorkerTask::new("prune", Some(worker_id), auth_id, true)?;
|
||||||
|
|
||||||
if keep_all {
|
if keep_all {
|
||||||
worker.log("No prune selection - keeping all files.");
|
worker.log("No prune selection - keeping all files.");
|
||||||
@ -922,7 +916,7 @@ fn prune(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Start garbage collection.
|
/// Start garbage collection.
|
||||||
fn start_garbage_collection(
|
pub fn start_garbage_collection(
|
||||||
store: String,
|
store: String,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
@ -934,7 +928,7 @@ fn start_garbage_collection(
|
|||||||
let job = Job::new("garbage_collection", &store)
|
let job = Job::new("garbage_collection", &store)
|
||||||
.map_err(|_| format_err!("garbage collection already running"))?;
|
.map_err(|_| format_err!("garbage collection already running"))?;
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
|
let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
|
||||||
.map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
|
.map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
|
||||||
@ -975,17 +969,14 @@ pub fn garbage_collection_status(
|
|||||||
returns: {
|
returns: {
|
||||||
description: "List the accessible datastores.",
|
description: "List the accessible datastores.",
|
||||||
type: Array,
|
type: Array,
|
||||||
items: {
|
items: { type: DataStoreListItem },
|
||||||
description: "Datastore name and description.",
|
|
||||||
type: DataStoreListItem,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Anybody,
|
permission: &Permission::Anybody,
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Datastore list
|
/// Datastore list
|
||||||
fn get_datastore_list(
|
pub fn get_datastore_list(
|
||||||
_param: Value,
|
_param: Value,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
@ -1011,7 +1002,7 @@ fn get_datastore_list(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(list.into())
|
Ok(list)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[sortable]
|
#[sortable]
|
||||||
@ -1033,7 +1024,7 @@ pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
|
|||||||
true)
|
true)
|
||||||
);
|
);
|
||||||
|
|
||||||
fn download_file(
|
pub fn download_file(
|
||||||
_parts: Parts,
|
_parts: Parts,
|
||||||
_req_body: Body,
|
_req_body: Body,
|
||||||
param: Value,
|
param: Value,
|
||||||
@ -1068,7 +1059,7 @@ fn download_file(
|
|||||||
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
|
||||||
|
|
||||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
|
.map_ok(|bytes| bytes.freeze())
|
||||||
.map_err(move |err| {
|
.map_err(move |err| {
|
||||||
eprintln!("error during streaming of '{:?}' - {}", &path, err);
|
eprintln!("error during streaming of '{:?}' - {}", &path, err);
|
||||||
err
|
err
|
||||||
@ -1103,7 +1094,7 @@ pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
|
|||||||
true)
|
true)
|
||||||
);
|
);
|
||||||
|
|
||||||
fn download_file_decoded(
|
pub fn download_file_decoded(
|
||||||
_parts: Parts,
|
_parts: Parts,
|
||||||
_req_body: Body,
|
_req_body: Body,
|
||||||
param: Value,
|
param: Value,
|
||||||
@ -1217,7 +1208,7 @@ pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
|
|||||||
&Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
|
&Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
|
||||||
);
|
);
|
||||||
|
|
||||||
fn upload_backup_log(
|
pub fn upload_backup_log(
|
||||||
_parts: Parts,
|
_parts: Parts,
|
||||||
req_body: Body,
|
req_body: Body,
|
||||||
param: Value,
|
param: Value,
|
||||||
@ -1296,14 +1287,12 @@ fn upload_backup_log(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Get the entries of the given path of the catalog
|
/// Get the entries of the given path of the catalog
|
||||||
fn catalog(
|
pub fn catalog(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: String,
|
backup_type: String,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
filepath: String,
|
filepath: String,
|
||||||
_param: Value,
|
|
||||||
_info: &ApiMethod,
|
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
@ -1343,10 +1332,10 @@ fn catalog(
|
|||||||
|
|
||||||
if filepath != "root" {
|
if filepath != "root" {
|
||||||
components = base64::decode(filepath)?;
|
components = base64::decode(filepath)?;
|
||||||
if components.len() > 0 && components[0] == '/' as u8 {
|
if !components.is_empty() && components[0] == b'/' {
|
||||||
components.remove(0);
|
components.remove(0);
|
||||||
}
|
}
|
||||||
for component in components.split(|c| *c == '/' as u8) {
|
for component in components.split(|c| *c == b'/') {
|
||||||
if let Some(entry) = catalog_reader.lookup(¤t, component)? {
|
if let Some(entry) = catalog_reader.lookup(¤t, component)? {
|
||||||
current = entry;
|
current = entry;
|
||||||
} else {
|
} else {
|
||||||
@ -1359,7 +1348,7 @@ fn catalog(
|
|||||||
|
|
||||||
for direntry in catalog_reader.read_dir(¤t)? {
|
for direntry in catalog_reader.read_dir(¤t)? {
|
||||||
let mut components = components.clone();
|
let mut components = components.clone();
|
||||||
components.push('/' as u8);
|
components.push(b'/');
|
||||||
components.extend(&direntry.name);
|
components.extend(&direntry.name);
|
||||||
let path = base64::encode(components);
|
let path = base64::encode(components);
|
||||||
let text = String::from_utf8_lossy(&direntry.name);
|
let text = String::from_utf8_lossy(&direntry.name);
|
||||||
@ -1464,7 +1453,7 @@ pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
|
|||||||
true)
|
true)
|
||||||
);
|
);
|
||||||
|
|
||||||
fn pxar_file_download(
|
pub fn pxar_file_download(
|
||||||
_parts: Parts,
|
_parts: Parts,
|
||||||
_req_body: Body,
|
_req_body: Body,
|
||||||
param: Value,
|
param: Value,
|
||||||
@ -1489,13 +1478,13 @@ fn pxar_file_download(
|
|||||||
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
|
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
|
||||||
|
|
||||||
let mut components = base64::decode(&filepath)?;
|
let mut components = base64::decode(&filepath)?;
|
||||||
if components.len() > 0 && components[0] == '/' as u8 {
|
if !components.is_empty() && components[0] == b'/' {
|
||||||
components.remove(0);
|
components.remove(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut split = components.splitn(2, |c| *c == '/' as u8);
|
let mut split = components.splitn(2, |c| *c == b'/');
|
||||||
let pxar_name = std::str::from_utf8(split.next().unwrap())?;
|
let pxar_name = std::str::from_utf8(split.next().unwrap())?;
|
||||||
let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
|
let file_path = split.next().ok_or_else(|| format_err!("filepath looks strange '{}'", filepath))?;
|
||||||
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
|
||||||
for file in files {
|
for file in files {
|
||||||
if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
|
||||||
@ -1522,7 +1511,7 @@ fn pxar_file_download(
|
|||||||
let root = decoder.open_root().await?;
|
let root = decoder.open_root().await?;
|
||||||
let file = root
|
let file = root
|
||||||
.lookup(OsStr::from_bytes(file_path)).await?
|
.lookup(OsStr::from_bytes(file_path)).await?
|
||||||
.ok_or(format_err!("error opening '{:?}'", file_path))?;
|
.ok_or_else(|| format_err!("error opening '{:?}'", file_path))?;
|
||||||
|
|
||||||
let body = match file.kind() {
|
let body = match file.kind() {
|
||||||
EntryKind::File { .. } => Body::wrap_stream(
|
EntryKind::File { .. } => Body::wrap_stream(
|
||||||
@ -1565,7 +1554,7 @@ fn pxar_file_download(
|
|||||||
.map_err(|err| eprintln!("error during finishing of zip: {}", err))
|
.map_err(|err| eprintln!("error during finishing of zip: {}", err))
|
||||||
});
|
});
|
||||||
|
|
||||||
Body::wrap_stream(receiver.map_err(move |err| {
|
Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
|
||||||
eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
|
eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
|
||||||
err
|
err
|
||||||
}))
|
}))
|
||||||
@ -1601,7 +1590,7 @@ fn pxar_file_download(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Read datastore stats
|
/// Read datastore stats
|
||||||
fn get_rrd_stats(
|
pub fn get_rrd_stats(
|
||||||
store: String,
|
store: String,
|
||||||
timeframe: RRDTimeFrameResolution,
|
timeframe: RRDTimeFrameResolution,
|
||||||
cf: RRDMode,
|
cf: RRDMode,
|
||||||
@ -1643,7 +1632,7 @@ fn get_rrd_stats(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Get "notes" for a specific backup
|
/// Get "notes" for a specific backup
|
||||||
fn get_notes(
|
pub fn get_notes(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: String,
|
backup_type: String,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
@ -1693,7 +1682,7 @@ fn get_notes(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Set "notes" for a specific backup
|
/// Set "notes" for a specific backup
|
||||||
fn set_notes(
|
pub fn set_notes(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: String,
|
backup_type: String,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
@ -1738,7 +1727,7 @@ fn set_notes(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Change owner of a backup group
|
/// Change owner of a backup group
|
||||||
fn set_backup_owner(
|
pub fn set_backup_owner(
|
||||||
store: String,
|
store: String,
|
||||||
backup_type: String,
|
backup_type: String,
|
||||||
backup_id: String,
|
backup_id: String,
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//! Datastore Syncronization Job Management
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
@ -58,7 +60,7 @@ pub fn list_sync_jobs(
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
.filter(|job: &SyncJobStatus| {
|
.filter(|job: &SyncJobStatus| {
|
||||||
let as_config: SyncJobConfig = job.clone().into();
|
let as_config: SyncJobConfig = job.into();
|
||||||
check_sync_job_read_access(&user_info, &auth_id, &as_config)
|
check_sync_job_read_access(&user_info, &auth_id, &as_config)
|
||||||
}).collect();
|
}).collect();
|
||||||
|
|
||||||
@ -81,13 +83,13 @@ pub fn list_sync_jobs(
|
|||||||
job.last_run_state = state;
|
job.last_run_state = state;
|
||||||
job.last_run_endtime = endtime;
|
job.last_run_endtime = endtime;
|
||||||
|
|
||||||
let last = job.last_run_endtime.unwrap_or_else(|| starttime);
|
let last = job.last_run_endtime.unwrap_or(starttime);
|
||||||
|
|
||||||
job.next_run = (|| -> Option<i64> {
|
job.next_run = (|| -> Option<i64> {
|
||||||
let schedule = job.schedule.as_ref()?;
|
let schedule = job.schedule.as_ref()?;
|
||||||
let event = parse_calendar_event(&schedule).ok()?;
|
let event = parse_calendar_event(&schedule).ok()?;
|
||||||
// ignore errors
|
// ignore errors
|
||||||
compute_next_event(&event, last, false).unwrap_or_else(|_| None)
|
compute_next_event(&event, last, false).unwrap_or(None)
|
||||||
})();
|
})();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,7 +112,7 @@ pub fn list_sync_jobs(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Runs the sync jobs manually.
|
/// Runs the sync jobs manually.
|
||||||
fn run_sync_job(
|
pub fn run_sync_job(
|
||||||
id: String,
|
id: String,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//! Datastore Verify Job Management
|
||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
|
|
||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
@ -86,13 +88,13 @@ pub fn list_verification_jobs(
|
|||||||
job.last_run_state = state;
|
job.last_run_state = state;
|
||||||
job.last_run_endtime = endtime;
|
job.last_run_endtime = endtime;
|
||||||
|
|
||||||
let last = job.last_run_endtime.unwrap_or_else(|| starttime);
|
let last = job.last_run_endtime.unwrap_or(starttime);
|
||||||
|
|
||||||
job.next_run = (|| -> Option<i64> {
|
job.next_run = (|| -> Option<i64> {
|
||||||
let schedule = job.schedule.as_ref()?;
|
let schedule = job.schedule.as_ref()?;
|
||||||
let event = parse_calendar_event(&schedule).ok()?;
|
let event = parse_calendar_event(&schedule).ok()?;
|
||||||
// ignore errors
|
// ignore errors
|
||||||
compute_next_event(&event, last, false).unwrap_or_else(|_| None)
|
compute_next_event(&event, last, false).unwrap_or(None)
|
||||||
})();
|
})();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,7 +117,7 @@ pub fn list_verification_jobs(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Runs a verification job manually.
|
/// Runs a verification job manually.
|
||||||
fn run_verification_job(
|
pub fn run_verification_job(
|
||||||
id: String,
|
id: String,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
|
//! Backup protocol (HTTP2 upgrade)
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
use hyper::header::{HeaderValue, UPGRADE};
|
use hyper::header::{HeaderValue, UPGRADE};
|
||||||
use hyper::http::request::Parts;
|
use hyper::http::request::Parts;
|
||||||
use hyper::{Body, Response, StatusCode};
|
use hyper::{Body, Response, Request, StatusCode};
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
use proxmox::{sortable, identity, list_subdirs_api_method};
|
use proxmox::{sortable, identity, list_subdirs_api_method};
|
||||||
@ -138,7 +140,7 @@ async move {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let backup_dir = BackupDir::with_group(backup_group.clone(), backup_time)?;
|
let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
|
||||||
|
|
||||||
let _last_guard = if let Some(last) = &last_backup {
|
let _last_guard = if let Some(last) = &last_backup {
|
||||||
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
|
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
|
||||||
@ -171,8 +173,7 @@ async move {
|
|||||||
|
|
||||||
let env2 = env.clone();
|
let env2 = env.clone();
|
||||||
|
|
||||||
let mut req_fut = req_body
|
let mut req_fut = hyper::upgrade::on(Request::from_parts(parts, req_body))
|
||||||
.on_upgrade()
|
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
.and_then(move |conn| {
|
.and_then(move |conn| {
|
||||||
env2.debug("protocol upgrade done");
|
env2.debug("protocol upgrade done");
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::HashMap;
|
||||||
use nix::dir::Dir;
|
use nix::dir::Dir;
|
||||||
|
|
||||||
use ::serde::{Serialize};
|
use ::serde::{Serialize};
|
||||||
@ -185,7 +185,9 @@ impl BackupEnvironment {
|
|||||||
|
|
||||||
if size > data.chunk_size {
|
if size > data.chunk_size {
|
||||||
bail!("fixed writer '{}' - got large chunk ({} > {}", data.name, size, data.chunk_size);
|
bail!("fixed writer '{}' - got large chunk ({} > {}", data.name, size, data.chunk_size);
|
||||||
} else if size < data.chunk_size {
|
}
|
||||||
|
|
||||||
|
if size < data.chunk_size {
|
||||||
data.small_chunk_count += 1;
|
data.small_chunk_count += 1;
|
||||||
if data.small_chunk_count > 1 {
|
if data.small_chunk_count > 1 {
|
||||||
bail!("fixed writer '{}' - detected multiple end chunks (chunk size too small)");
|
bail!("fixed writer '{}' - detected multiple end chunks (chunk size too small)");
|
||||||
@ -465,7 +467,7 @@ impl BackupEnvironment {
|
|||||||
state.ensure_unfinished()?;
|
state.ensure_unfinished()?;
|
||||||
|
|
||||||
// test if all writer are correctly closed
|
// test if all writer are correctly closed
|
||||||
if state.dynamic_writers.len() != 0 || state.fixed_writers.len() != 0 {
|
if !state.dynamic_writers.is_empty() || !state.fixed_writers.is_empty() {
|
||||||
bail!("found open index writer - unable to finish backup");
|
bail!("found open index writer - unable to finish backup");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -523,15 +525,11 @@ impl BackupEnvironment {
|
|||||||
move |worker| {
|
move |worker| {
|
||||||
worker.log("Automatically verifying newly added snapshot");
|
worker.log("Automatically verifying newly added snapshot");
|
||||||
|
|
||||||
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
|
||||||
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
|
||||||
|
|
||||||
|
let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
|
||||||
if !verify_backup_dir_with_lock(
|
if !verify_backup_dir_with_lock(
|
||||||
datastore,
|
&verify_worker,
|
||||||
&backup_dir,
|
&backup_dir,
|
||||||
verified_chunks,
|
|
||||||
corrupt_chunks,
|
|
||||||
worker.clone(),
|
|
||||||
worker.upid().clone(),
|
worker.upid().clone(),
|
||||||
None,
|
None,
|
||||||
snap_lock,
|
snap_lock,
|
||||||
|
@ -1,16 +1,28 @@
|
|||||||
|
//! Backup Server Configuration
|
||||||
|
|
||||||
use proxmox::api::router::{Router, SubdirMap};
|
use proxmox::api::router::{Router, SubdirMap};
|
||||||
use proxmox::list_subdirs_api_method;
|
use proxmox::list_subdirs_api_method;
|
||||||
|
|
||||||
|
pub mod access;
|
||||||
pub mod datastore;
|
pub mod datastore;
|
||||||
pub mod remote;
|
pub mod remote;
|
||||||
pub mod sync;
|
pub mod sync;
|
||||||
pub mod verify;
|
pub mod verify;
|
||||||
|
pub mod drive;
|
||||||
|
pub mod changer;
|
||||||
|
pub mod media_pool;
|
||||||
|
pub mod tape_encryption_keys;
|
||||||
|
|
||||||
const SUBDIRS: SubdirMap = &[
|
const SUBDIRS: SubdirMap = &[
|
||||||
|
("access", &access::ROUTER),
|
||||||
|
("changer", &changer::ROUTER),
|
||||||
("datastore", &datastore::ROUTER),
|
("datastore", &datastore::ROUTER),
|
||||||
|
("drive", &drive::ROUTER),
|
||||||
|
("media-pool", &media_pool::ROUTER),
|
||||||
("remote", &remote::ROUTER),
|
("remote", &remote::ROUTER),
|
||||||
("sync", &sync::ROUTER),
|
("sync", &sync::ROUTER),
|
||||||
("verify", &verify::ROUTER)
|
("tape-encryption-keys", &tape_encryption_keys::ROUTER),
|
||||||
|
("verify", &verify::ROUTER),
|
||||||
];
|
];
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
|
10
src/api2/config/access/mod.rs
Normal file
10
src/api2/config/access/mod.rs
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
use proxmox::api::{Router, SubdirMap};
|
||||||
|
use proxmox::list_subdirs_api_method;
|
||||||
|
|
||||||
|
pub mod tfa;
|
||||||
|
|
||||||
|
const SUBDIRS: SubdirMap = &[("tfa", &tfa::ROUTER)];
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||||
|
.subdirs(SUBDIRS);
|
84
src/api2/config/access/tfa/mod.rs
Normal file
84
src/api2/config/access/tfa/mod.rs
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
//! For now this only has the TFA subdir, which is in this file.
|
||||||
|
//! If we add more, it should be moved into a sub module.
|
||||||
|
|
||||||
|
use anyhow::Error;
|
||||||
|
|
||||||
|
use crate::api2::types::PROXMOX_CONFIG_DIGEST_SCHEMA;
|
||||||
|
use proxmox::api::{api, Permission, Router, RpcEnvironment, SubdirMap};
|
||||||
|
use proxmox::list_subdirs_api_method;
|
||||||
|
|
||||||
|
use crate::config::tfa::{self, WebauthnConfig, WebauthnConfigUpdater};
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||||
|
.subdirs(SUBDIRS);
|
||||||
|
|
||||||
|
const SUBDIRS: SubdirMap = &[("webauthn", &WEBAUTHN_ROUTER)];
|
||||||
|
|
||||||
|
const WEBAUTHN_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_GET_WEBAUTHN_CONFIG)
|
||||||
|
.put(&API_METHOD_UPDATE_WEBAUTHN_CONFIG);
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: WebauthnConfig,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get the TFA configuration.
|
||||||
|
pub fn get_webauthn_config(
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Option<WebauthnConfig>, Error> {
|
||||||
|
let (config, digest) = match tfa::webauthn_config()? {
|
||||||
|
Some(c) => c,
|
||||||
|
None => return Ok(None),
|
||||||
|
};
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
Ok(Some(config))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
webauthn: {
|
||||||
|
flatten: true,
|
||||||
|
type: WebauthnConfigUpdater,
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
optional: true,
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update the TFA configuration.
|
||||||
|
pub fn update_webauthn_config(
|
||||||
|
webauthn: WebauthnConfigUpdater,
|
||||||
|
digest: Option<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let _lock = tfa::write_lock();
|
||||||
|
|
||||||
|
let mut tfa = tfa::read()?;
|
||||||
|
|
||||||
|
if let Some(wa) = &mut tfa.webauthn {
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &wa.digest()?)?;
|
||||||
|
}
|
||||||
|
webauthn.apply_to(wa);
|
||||||
|
} else {
|
||||||
|
tfa.webauthn = Some(webauthn.build()?);
|
||||||
|
}
|
||||||
|
|
||||||
|
tfa::write(&tfa)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
295
src/api2/config/changer.rs
Normal file
295
src/api2/config/changer.rs
Normal file
@ -0,0 +1,295 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox::api::{
|
||||||
|
api,
|
||||||
|
Router,
|
||||||
|
RpcEnvironment,
|
||||||
|
schema::parse_property_string,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
config,
|
||||||
|
api2::types::{
|
||||||
|
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
CHANGER_NAME_SCHEMA,
|
||||||
|
LINUX_DRIVE_PATH_SCHEMA,
|
||||||
|
SLOT_ARRAY_SCHEMA,
|
||||||
|
EXPORT_SLOT_LIST_SCHEMA,
|
||||||
|
ScsiTapeChanger,
|
||||||
|
LinuxTapeDrive,
|
||||||
|
},
|
||||||
|
tape::{
|
||||||
|
linux_tape_changer_list,
|
||||||
|
check_drive_path,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
schema: LINUX_DRIVE_PATH_SCHEMA,
|
||||||
|
},
|
||||||
|
"export-slots": {
|
||||||
|
schema: EXPORT_SLOT_LIST_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Create a new changer device
|
||||||
|
pub fn create_changer(
|
||||||
|
name: String,
|
||||||
|
path: String,
|
||||||
|
export_slots: Option<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::drive::lock()?;
|
||||||
|
|
||||||
|
let (mut config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
let linux_changers = linux_tape_changer_list();
|
||||||
|
|
||||||
|
check_drive_path(&linux_changers, &path)?;
|
||||||
|
|
||||||
|
let existing: Vec<ScsiTapeChanger> = config.convert_to_typed_array("changer")?;
|
||||||
|
|
||||||
|
for changer in existing {
|
||||||
|
if changer.name == name {
|
||||||
|
bail!("Entry '{}' already exists", name);
|
||||||
|
}
|
||||||
|
|
||||||
|
if changer.path == path {
|
||||||
|
bail!("Path '{}' already in use by '{}'", path, changer.name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let item = ScsiTapeChanger {
|
||||||
|
name: name.clone(),
|
||||||
|
path,
|
||||||
|
export_slots,
|
||||||
|
};
|
||||||
|
|
||||||
|
config.set_data(&name, "changer", &item)?;
|
||||||
|
|
||||||
|
config::drive::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: ScsiTapeChanger,
|
||||||
|
},
|
||||||
|
|
||||||
|
)]
|
||||||
|
/// Get tape changer configuration
|
||||||
|
pub fn get_config(
|
||||||
|
name: String,
|
||||||
|
_param: Value,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<ScsiTapeChanger, Error> {
|
||||||
|
|
||||||
|
let (config, digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
let data: ScsiTapeChanger = config.lookup("changer", &name)?;
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "The list of configured changers (with config digest).",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: ScsiTapeChanger,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List changers
|
||||||
|
pub fn list_changers(
|
||||||
|
_param: Value,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<ScsiTapeChanger>, Error> {
|
||||||
|
|
||||||
|
let (config, digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
let list: Vec<ScsiTapeChanger> = config.convert_to_typed_array("changer")?;
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Deletable property name
|
||||||
|
pub enum DeletableProperty {
|
||||||
|
/// Delete export-slots.
|
||||||
|
export_slots,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
schema: LINUX_DRIVE_PATH_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"export-slots": {
|
||||||
|
schema: EXPORT_SLOT_LIST_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
delete: {
|
||||||
|
description: "List of properties to delete.",
|
||||||
|
type: Array,
|
||||||
|
optional: true,
|
||||||
|
items: {
|
||||||
|
type: DeletableProperty,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update a tape changer configuration
|
||||||
|
pub fn update_changer(
|
||||||
|
name: String,
|
||||||
|
path: Option<String>,
|
||||||
|
export_slots: Option<String>,
|
||||||
|
delete: Option<Vec<DeletableProperty>>,
|
||||||
|
digest: Option<String>,
|
||||||
|
_param: Value,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::drive::lock()?;
|
||||||
|
|
||||||
|
let (mut config, expected_digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut data: ScsiTapeChanger = config.lookup("changer", &name)?;
|
||||||
|
|
||||||
|
if let Some(delete) = delete {
|
||||||
|
for delete_prop in delete {
|
||||||
|
match delete_prop {
|
||||||
|
DeletableProperty::export_slots => {
|
||||||
|
data.export_slots = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(path) = path {
|
||||||
|
let changers = linux_tape_changer_list();
|
||||||
|
check_drive_path(&changers, &path)?;
|
||||||
|
data.path = path;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(export_slots) = export_slots {
|
||||||
|
let slots: Value = parse_property_string(
|
||||||
|
&export_slots, &SLOT_ARRAY_SCHEMA
|
||||||
|
)?;
|
||||||
|
let mut slots: Vec<String> = slots
|
||||||
|
.as_array()
|
||||||
|
.unwrap()
|
||||||
|
.iter()
|
||||||
|
.map(|v| v.to_string())
|
||||||
|
.collect();
|
||||||
|
slots.sort();
|
||||||
|
|
||||||
|
if slots.is_empty() {
|
||||||
|
data.export_slots = None;
|
||||||
|
} else {
|
||||||
|
let slots = slots.join(",");
|
||||||
|
data.export_slots = Some(slots);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config.set_data(&name, "changer", &data)?;
|
||||||
|
|
||||||
|
config::drive::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Delete a tape changer configuration
|
||||||
|
pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::drive::lock()?;
|
||||||
|
|
||||||
|
let (mut config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
match config.sections.get(&name) {
|
||||||
|
Some((section_type, _)) => {
|
||||||
|
if section_type != "changer" {
|
||||||
|
bail!("Entry '{}' exists, but is not a changer device", name);
|
||||||
|
}
|
||||||
|
config.sections.remove(&name);
|
||||||
|
},
|
||||||
|
None => bail!("Delete changer '{}' failed - no such entry", name),
|
||||||
|
}
|
||||||
|
|
||||||
|
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
||||||
|
for drive in drive_list {
|
||||||
|
if let Some(changer) = drive.changer {
|
||||||
|
if changer == name {
|
||||||
|
bail!("Delete changer '{}' failed - used by drive '{}'", name, drive.name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config::drive::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
const ITEM_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_GET_CONFIG)
|
||||||
|
.put(&API_METHOD_UPDATE_CHANGER)
|
||||||
|
.delete(&API_METHOD_DELETE_CHANGER);
|
||||||
|
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_CHANGERS)
|
||||||
|
.post(&API_METHOD_CREATE_CHANGER)
|
||||||
|
.match_all("name", &ITEM_ROUTER);
|
@ -120,11 +120,11 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let datastore: datastore::DataStoreConfig = serde_json::from_value(param.clone())?;
|
let datastore: datastore::DataStoreConfig = serde_json::from_value(param)?;
|
||||||
|
|
||||||
let (mut config, _digest) = datastore::config()?;
|
let (mut config, _digest) = datastore::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&datastore.name) {
|
if config.sections.get(&datastore.name).is_some() {
|
||||||
bail!("datastore '{}' already exists.", datastore.name);
|
bail!("datastore '{}' already exists.", datastore.name);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -151,10 +151,7 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: { type: datastore::DataStoreConfig },
|
||||||
description: "The datastore configuration (with config digest).",
|
|
||||||
type: datastore::DataStoreConfig,
|
|
||||||
},
|
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_AUDIT, false),
|
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_AUDIT, false),
|
||||||
},
|
},
|
||||||
@ -280,6 +277,7 @@ pub enum DeletableProperty {
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Update datastore config.
|
/// Update datastore config.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn update_datastore(
|
pub fn update_datastore(
|
||||||
name: String,
|
name: String,
|
||||||
comment: Option<String>,
|
comment: Option<String>,
|
||||||
|
281
src/api2/config/drive.rs
Normal file
281
src/api2/config/drive.rs
Normal file
@ -0,0 +1,281 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox::api::{api, Router, RpcEnvironment};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
config,
|
||||||
|
api2::types::{
|
||||||
|
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
DRIVE_NAME_SCHEMA,
|
||||||
|
CHANGER_NAME_SCHEMA,
|
||||||
|
CHANGER_DRIVENUM_SCHEMA,
|
||||||
|
LINUX_DRIVE_PATH_SCHEMA,
|
||||||
|
LinuxTapeDrive,
|
||||||
|
ScsiTapeChanger,
|
||||||
|
},
|
||||||
|
tape::{
|
||||||
|
linux_tape_device_list,
|
||||||
|
check_drive_path,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
schema: LINUX_DRIVE_PATH_SCHEMA,
|
||||||
|
},
|
||||||
|
changer: {
|
||||||
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"changer-drivenum": {
|
||||||
|
schema: CHANGER_DRIVENUM_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Create a new drive
|
||||||
|
pub fn create_drive(param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::drive::lock()?;
|
||||||
|
|
||||||
|
let (mut config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
let item: LinuxTapeDrive = serde_json::from_value(param)?;
|
||||||
|
|
||||||
|
let linux_drives = linux_tape_device_list();
|
||||||
|
|
||||||
|
check_drive_path(&linux_drives, &item.path)?;
|
||||||
|
|
||||||
|
let existing: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
||||||
|
|
||||||
|
for drive in existing {
|
||||||
|
if drive.name == item.name {
|
||||||
|
bail!("Entry '{}' already exists", item.name);
|
||||||
|
}
|
||||||
|
if drive.path == item.path {
|
||||||
|
bail!("Path '{}' already used in drive '{}'", item.path, drive.name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config.set_data(&item.name, "linux", &item)?;
|
||||||
|
|
||||||
|
config::drive::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: LinuxTapeDrive,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get drive configuration
|
||||||
|
pub fn get_config(
|
||||||
|
name: String,
|
||||||
|
_param: Value,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<LinuxTapeDrive, Error> {
|
||||||
|
|
||||||
|
let (config, digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
let data: LinuxTapeDrive = config.lookup("linux", &name)?;
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "The list of configured drives (with config digest).",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: LinuxTapeDrive,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List drives
|
||||||
|
pub fn list_drives(
|
||||||
|
_param: Value,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<LinuxTapeDrive>, Error> {
|
||||||
|
|
||||||
|
let (config, digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(drive_list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Deletable property name
|
||||||
|
pub enum DeletableProperty {
|
||||||
|
/// Delete the changer property.
|
||||||
|
changer,
|
||||||
|
/// Delete the changer-drivenum property.
|
||||||
|
changer_drivenum,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
schema: LINUX_DRIVE_PATH_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
changer: {
|
||||||
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"changer-drivenum": {
|
||||||
|
schema: CHANGER_DRIVENUM_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
delete: {
|
||||||
|
description: "List of properties to delete.",
|
||||||
|
type: Array,
|
||||||
|
optional: true,
|
||||||
|
items: {
|
||||||
|
type: DeletableProperty,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update a drive configuration
|
||||||
|
pub fn update_drive(
|
||||||
|
name: String,
|
||||||
|
path: Option<String>,
|
||||||
|
changer: Option<String>,
|
||||||
|
changer_drivenum: Option<u64>,
|
||||||
|
delete: Option<Vec<DeletableProperty>>,
|
||||||
|
digest: Option<String>,
|
||||||
|
_param: Value,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::drive::lock()?;
|
||||||
|
|
||||||
|
let (mut config, expected_digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut data: LinuxTapeDrive = config.lookup("linux", &name)?;
|
||||||
|
|
||||||
|
if let Some(delete) = delete {
|
||||||
|
for delete_prop in delete {
|
||||||
|
match delete_prop {
|
||||||
|
DeletableProperty::changer => {
|
||||||
|
data.changer = None;
|
||||||
|
data.changer_drivenum = None;
|
||||||
|
},
|
||||||
|
DeletableProperty::changer_drivenum => { data.changer_drivenum = None; },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(path) = path {
|
||||||
|
let linux_drives = linux_tape_device_list();
|
||||||
|
check_drive_path(&linux_drives, &path)?;
|
||||||
|
data.path = path;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(changer) = changer {
|
||||||
|
let _: ScsiTapeChanger = config.lookup("changer", &changer)?;
|
||||||
|
data.changer = Some(changer);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(changer_drivenum) = changer_drivenum {
|
||||||
|
if changer_drivenum == 0 {
|
||||||
|
data.changer_drivenum = None;
|
||||||
|
} else {
|
||||||
|
if data.changer.is_none() {
|
||||||
|
bail!("Option 'changer-drivenum' requires option 'changer'.");
|
||||||
|
}
|
||||||
|
data.changer_drivenum = Some(changer_drivenum);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config.set_data(&name, "linux", &data)?;
|
||||||
|
|
||||||
|
config::drive::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Delete a drive configuration
|
||||||
|
pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::drive::lock()?;
|
||||||
|
|
||||||
|
let (mut config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
match config.sections.get(&name) {
|
||||||
|
Some((section_type, _)) => {
|
||||||
|
if section_type != "linux" {
|
||||||
|
bail!("Entry '{}' exists, but is not a linux tape drive", name);
|
||||||
|
}
|
||||||
|
config.sections.remove(&name);
|
||||||
|
},
|
||||||
|
None => bail!("Delete drive '{}' failed - no such drive", name),
|
||||||
|
}
|
||||||
|
|
||||||
|
config::drive::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
const ITEM_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_GET_CONFIG)
|
||||||
|
.put(&API_METHOD_UPDATE_DRIVE)
|
||||||
|
.delete(&API_METHOD_DELETE_DRIVE);
|
||||||
|
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_DRIVES)
|
||||||
|
.post(&API_METHOD_CREATE_DRIVE)
|
||||||
|
.match_all("name", &ITEM_ROUTER);
|
251
src/api2/config/media_pool.rs
Normal file
251
src/api2/config/media_pool.rs
Normal file
@ -0,0 +1,251 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::{
|
||||||
|
api::{
|
||||||
|
api,
|
||||||
|
Router,
|
||||||
|
RpcEnvironment,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
api2::types::{
|
||||||
|
MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
||||||
|
MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
||||||
|
MEDIA_RETENTION_POLICY_SCHEMA,
|
||||||
|
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
|
MediaPoolConfig,
|
||||||
|
},
|
||||||
|
config,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
allocation: {
|
||||||
|
schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
retention: {
|
||||||
|
schema: MEDIA_RETENTION_POLICY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
template: {
|
||||||
|
schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
encrypt: {
|
||||||
|
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Create a new media pool
|
||||||
|
pub fn create_pool(
|
||||||
|
name: String,
|
||||||
|
allocation: Option<String>,
|
||||||
|
retention: Option<String>,
|
||||||
|
template: Option<String>,
|
||||||
|
encrypt: Option<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::media_pool::lock()?;
|
||||||
|
|
||||||
|
let (mut config, _digest) = config::media_pool::config()?;
|
||||||
|
|
||||||
|
if config.sections.get(&name).is_some() {
|
||||||
|
bail!("Media pool '{}' already exists", name);
|
||||||
|
}
|
||||||
|
|
||||||
|
let item = MediaPoolConfig {
|
||||||
|
name: name.clone(),
|
||||||
|
allocation,
|
||||||
|
retention,
|
||||||
|
template,
|
||||||
|
encrypt,
|
||||||
|
};
|
||||||
|
|
||||||
|
config.set_data(&name, "pool", &item)?;
|
||||||
|
|
||||||
|
config::media_pool::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
returns: {
|
||||||
|
description: "The list of configured media pools (with config digest).",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: MediaPoolConfig,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List media pools
|
||||||
|
pub fn list_pools(
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<MediaPoolConfig>, Error> {
|
||||||
|
|
||||||
|
let (config, digest) = config::media_pool::config()?;
|
||||||
|
|
||||||
|
let list = config.convert_to_typed_array("pool")?;
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: MediaPoolConfig,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get media pool configuration
|
||||||
|
pub fn get_config(name: String) -> Result<MediaPoolConfig, Error> {
|
||||||
|
|
||||||
|
let (config, _digest) = config::media_pool::config()?;
|
||||||
|
|
||||||
|
let data: MediaPoolConfig = config.lookup("pool", &name)?;
|
||||||
|
|
||||||
|
Ok(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
/// Deletable property name
|
||||||
|
pub enum DeletableProperty {
|
||||||
|
/// Delete media set allocation policy.
|
||||||
|
allocation,
|
||||||
|
/// Delete pool retention policy
|
||||||
|
retention,
|
||||||
|
/// Delete media set naming template
|
||||||
|
template,
|
||||||
|
/// Delete encryption fingerprint
|
||||||
|
encrypt,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
allocation: {
|
||||||
|
schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
retention: {
|
||||||
|
schema: MEDIA_RETENTION_POLICY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
template: {
|
||||||
|
schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
encrypt: {
|
||||||
|
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
delete: {
|
||||||
|
description: "List of properties to delete.",
|
||||||
|
type: Array,
|
||||||
|
optional: true,
|
||||||
|
items: {
|
||||||
|
type: DeletableProperty,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update media pool settings
|
||||||
|
pub fn update_pool(
|
||||||
|
name: String,
|
||||||
|
allocation: Option<String>,
|
||||||
|
retention: Option<String>,
|
||||||
|
template: Option<String>,
|
||||||
|
encrypt: Option<String>,
|
||||||
|
delete: Option<Vec<DeletableProperty>>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::media_pool::lock()?;
|
||||||
|
|
||||||
|
let (mut config, _digest) = config::media_pool::config()?;
|
||||||
|
|
||||||
|
let mut data: MediaPoolConfig = config.lookup("pool", &name)?;
|
||||||
|
|
||||||
|
if let Some(delete) = delete {
|
||||||
|
for delete_prop in delete {
|
||||||
|
match delete_prop {
|
||||||
|
DeletableProperty::allocation => { data.allocation = None; },
|
||||||
|
DeletableProperty::retention => { data.retention = None; },
|
||||||
|
DeletableProperty::template => { data.template = None; },
|
||||||
|
DeletableProperty::encrypt => { data.encrypt = None; },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if allocation.is_some() { data.allocation = allocation; }
|
||||||
|
if retention.is_some() { data.retention = retention; }
|
||||||
|
if template.is_some() { data.template = template; }
|
||||||
|
if encrypt.is_some() { data.encrypt = encrypt; }
|
||||||
|
|
||||||
|
config.set_data(&name, "pool", &data)?;
|
||||||
|
|
||||||
|
config::media_pool::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Delete a media pool configuration
|
||||||
|
pub fn delete_pool(name: String) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::media_pool::lock()?;
|
||||||
|
|
||||||
|
let (mut config, _digest) = config::media_pool::config()?;
|
||||||
|
|
||||||
|
match config.sections.get(&name) {
|
||||||
|
Some(_) => { config.sections.remove(&name); },
|
||||||
|
None => bail!("delete pool '{}' failed - no such pool", name),
|
||||||
|
}
|
||||||
|
|
||||||
|
config::media_pool::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
const ITEM_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_GET_CONFIG)
|
||||||
|
.put(&API_METHOD_UPDATE_POOL)
|
||||||
|
.delete(&API_METHOD_DELETE_POOL);
|
||||||
|
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_POOLS)
|
||||||
|
.post(&API_METHOD_CREATE_POOL)
|
||||||
|
.match_all("name", &ITEM_ROUTER);
|
@ -19,10 +19,7 @@ use crate::config::acl::{PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY};
|
|||||||
returns: {
|
returns: {
|
||||||
description: "The list of configured remotes (with config digest).",
|
description: "The list of configured remotes (with config digest).",
|
||||||
type: Array,
|
type: Array,
|
||||||
items: {
|
items: { type: remote::Remote },
|
||||||
type: remote::Remote,
|
|
||||||
description: "Remote configuration (without password).",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
description: "List configured remotes filtered by Remote.Audit privileges",
|
description: "List configured remotes filtered by Remote.Audit privileges",
|
||||||
@ -99,13 +96,13 @@ pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let mut data = param.clone();
|
let mut data = param;
|
||||||
data["password"] = Value::from(base64::encode(password.as_bytes()));
|
data["password"] = Value::from(base64::encode(password.as_bytes()));
|
||||||
let remote: remote::Remote = serde_json::from_value(data)?;
|
let remote: remote::Remote = serde_json::from_value(data)?;
|
||||||
|
|
||||||
let (mut config, _digest) = remote::config()?;
|
let (mut config, _digest) = remote::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&remote.name) {
|
if config.sections.get(&remote.name).is_some() {
|
||||||
bail!("remote '{}' already exists.", remote.name);
|
bail!("remote '{}' already exists.", remote.name);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -124,10 +121,7 @@ pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: { type: remote::Remote },
|
||||||
description: "The remote configuration (with config digest).",
|
|
||||||
type: remote::Remote,
|
|
||||||
},
|
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["remote", "{name}"], PRIV_REMOTE_AUDIT, false),
|
permission: &Permission::Privilege(&["remote", "{name}"], PRIV_REMOTE_AUDIT, false),
|
||||||
}
|
}
|
||||||
@ -209,6 +203,7 @@ pub enum DeletableProperty {
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Update remote configuration.
|
/// Update remote configuration.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn update_remote(
|
pub fn update_remote(
|
||||||
name: String,
|
name: String,
|
||||||
comment: Option<String>,
|
comment: Option<String>,
|
||||||
@ -316,9 +311,7 @@ pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error>
|
|||||||
|
|
||||||
/// Helper to get client for remote.cfg entry
|
/// Helper to get client for remote.cfg entry
|
||||||
pub async fn remote_client(remote: remote::Remote) -> Result<HttpClient, Error> {
|
pub async fn remote_client(remote: remote::Remote) -> Result<HttpClient, Error> {
|
||||||
let options = HttpClientOptions::new()
|
let options = HttpClientOptions::new_non_interactive(remote.password.clone(), remote.fingerprint.clone());
|
||||||
.password(Some(remote.password.clone()))
|
|
||||||
.fingerprint(remote.fingerprint.clone());
|
|
||||||
|
|
||||||
let client = HttpClient::new(
|
let client = HttpClient::new(
|
||||||
&remote.host,
|
&remote.host,
|
||||||
@ -347,10 +340,7 @@ pub async fn remote_client(remote: remote::Remote) -> Result<HttpClient, Error>
|
|||||||
returns: {
|
returns: {
|
||||||
description: "List the accessible datastores.",
|
description: "List the accessible datastores.",
|
||||||
type: Array,
|
type: Array,
|
||||||
items: {
|
items: { type: DataStoreListItem },
|
||||||
description: "Datastore name and description.",
|
|
||||||
type: DataStoreListItem,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// List datastores of a remote.cfg entry
|
/// List datastores of a remote.cfg entry
|
||||||
|
@ -154,14 +154,14 @@ pub fn create_sync_job(
|
|||||||
|
|
||||||
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
|
let sync_job: sync::SyncJobConfig = serde_json::from_value(param)?;
|
||||||
if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) {
|
if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) {
|
||||||
bail!("permission check failed");
|
bail!("permission check failed");
|
||||||
}
|
}
|
||||||
|
|
||||||
let (mut config, _digest) = sync::config()?;
|
let (mut config, _digest) = sync::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&sync_job.id) {
|
if config.sections.get(&sync_job.id).is_some() {
|
||||||
bail!("job '{}' already exists.", sync_job.id);
|
bail!("job '{}' already exists.", sync_job.id);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -182,10 +182,7 @@ pub fn create_sync_job(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: { type: sync::SyncJobConfig },
|
||||||
description: "The sync job configuration.",
|
|
||||||
type: sync::SyncJobConfig,
|
|
||||||
},
|
|
||||||
access: {
|
access: {
|
||||||
description: "Limited to sync job entries where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
|
description: "Limited to sync job entries where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
|
||||||
permission: &Permission::Anybody,
|
permission: &Permission::Anybody,
|
||||||
@ -282,6 +279,7 @@ pub enum DeletableProperty {
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Update sync job config.
|
/// Update sync job config.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn update_sync_job(
|
pub fn update_sync_job(
|
||||||
id: String,
|
id: String,
|
||||||
store: Option<String>,
|
store: Option<String>,
|
||||||
@ -517,7 +515,7 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
|
|||||||
|
|
||||||
// unless they have Datastore.Modify as well
|
// unless they have Datastore.Modify as well
|
||||||
job.store = "localstore3".to_string();
|
job.store = "localstore3".to_string();
|
||||||
job.owner = Some(read_auth_id.clone());
|
job.owner = Some(read_auth_id);
|
||||||
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
|
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
|
||||||
job.owner = None;
|
job.owner = None;
|
||||||
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
|
assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
|
||||||
|
279
src/api2/config/tape_encryption_keys.rs
Normal file
279
src/api2/config/tape_encryption_keys.rs
Normal file
@ -0,0 +1,279 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox::{
|
||||||
|
api::{
|
||||||
|
api,
|
||||||
|
ApiMethod,
|
||||||
|
Router,
|
||||||
|
RpcEnvironment,
|
||||||
|
},
|
||||||
|
tools::fs::open_file_locked,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
config::{
|
||||||
|
tape_encryption_keys::{
|
||||||
|
TAPE_KEYS_LOCKFILE,
|
||||||
|
load_keys,
|
||||||
|
load_key_configs,
|
||||||
|
save_keys,
|
||||||
|
save_key_configs,
|
||||||
|
insert_key,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
api2::types::{
|
||||||
|
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
|
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
PASSWORD_HINT_SCHEMA,
|
||||||
|
KeyInfo,
|
||||||
|
Kdf,
|
||||||
|
},
|
||||||
|
backup::{
|
||||||
|
KeyConfig,
|
||||||
|
Fingerprint,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "The list of tape encryption keys (with config digest).",
|
||||||
|
type: Array,
|
||||||
|
items: { type: KeyInfo },
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List existing keys
|
||||||
|
pub fn list_keys(
|
||||||
|
_param: Value,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<KeyInfo>, Error> {
|
||||||
|
|
||||||
|
let (key_map, digest) = load_key_configs()?;
|
||||||
|
|
||||||
|
let mut list = Vec::new();
|
||||||
|
|
||||||
|
for (_fingerprint, item) in key_map.iter() {
|
||||||
|
list.push(item.into());
|
||||||
|
}
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
kdf: {
|
||||||
|
type: Kdf,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
fingerprint: {
|
||||||
|
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
|
},
|
||||||
|
password: {
|
||||||
|
description: "The current password.",
|
||||||
|
min_length: 5,
|
||||||
|
},
|
||||||
|
"new-password": {
|
||||||
|
description: "The new password.",
|
||||||
|
min_length: 5,
|
||||||
|
},
|
||||||
|
hint: {
|
||||||
|
schema: PASSWORD_HINT_SCHEMA,
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
optional: true,
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Change the encryption key's password (and password hint).
|
||||||
|
pub fn change_passphrase(
|
||||||
|
kdf: Option<Kdf>,
|
||||||
|
password: String,
|
||||||
|
new_password: String,
|
||||||
|
hint: String,
|
||||||
|
fingerprint: Fingerprint,
|
||||||
|
digest: Option<String>,
|
||||||
|
_rpcenv: &mut dyn RpcEnvironment
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let kdf = kdf.unwrap_or_default();
|
||||||
|
|
||||||
|
if let Kdf::None = kdf {
|
||||||
|
bail!("Please specify a key derivation funktion (none is not allowed here).");
|
||||||
|
}
|
||||||
|
|
||||||
|
let _lock = open_file_locked(
|
||||||
|
TAPE_KEYS_LOCKFILE,
|
||||||
|
std::time::Duration::new(10, 0),
|
||||||
|
true,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let (mut config_map, expected_digest) = load_key_configs()?;
|
||||||
|
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let key_config = match config_map.get(&fingerprint) {
|
||||||
|
Some(key_config) => key_config,
|
||||||
|
None => bail!("tape encryption key '{}' does not exist.", fingerprint),
|
||||||
|
};
|
||||||
|
|
||||||
|
let (key, created, fingerprint) = key_config.decrypt(&|| Ok(password.as_bytes().to_vec()))?;
|
||||||
|
let mut new_key_config = KeyConfig::with_key(&key, new_password.as_bytes(), kdf)?;
|
||||||
|
new_key_config.created = created; // keep original value
|
||||||
|
new_key_config.hint = Some(hint);
|
||||||
|
|
||||||
|
config_map.insert(fingerprint, new_key_config);
|
||||||
|
|
||||||
|
save_key_configs(config_map)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
kdf: {
|
||||||
|
type: Kdf,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
password: {
|
||||||
|
description: "A secret password.",
|
||||||
|
min_length: 5,
|
||||||
|
},
|
||||||
|
hint: {
|
||||||
|
schema: PASSWORD_HINT_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Create a new encryption key
|
||||||
|
pub fn create_key(
|
||||||
|
kdf: Option<Kdf>,
|
||||||
|
password: String,
|
||||||
|
hint: String,
|
||||||
|
_rpcenv: &mut dyn RpcEnvironment
|
||||||
|
) -> Result<Fingerprint, Error> {
|
||||||
|
|
||||||
|
let kdf = kdf.unwrap_or_default();
|
||||||
|
|
||||||
|
if let Kdf::None = kdf {
|
||||||
|
bail!("Please specify a key derivation funktion (none is not allowed here).");
|
||||||
|
}
|
||||||
|
|
||||||
|
let (key, mut key_config) = KeyConfig::new(password.as_bytes(), kdf)?;
|
||||||
|
key_config.hint = Some(hint);
|
||||||
|
|
||||||
|
let fingerprint = key_config.fingerprint.clone().unwrap();
|
||||||
|
|
||||||
|
insert_key(key, key_config, false)?;
|
||||||
|
|
||||||
|
Ok(fingerprint)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
fingerprint: {
|
||||||
|
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: KeyInfo,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get key config (public key part)
|
||||||
|
pub fn read_key(
|
||||||
|
fingerprint: Fingerprint,
|
||||||
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<KeyInfo, Error> {
|
||||||
|
|
||||||
|
let (config_map, _digest) = load_key_configs()?;
|
||||||
|
|
||||||
|
let key_config = match config_map.get(&fingerprint) {
|
||||||
|
Some(key_config) => key_config,
|
||||||
|
None => bail!("tape encryption key '{}' does not exist.", fingerprint),
|
||||||
|
};
|
||||||
|
|
||||||
|
if key_config.kdf.is_none() {
|
||||||
|
bail!("found unencrypted key - internal error");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(key_config.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
fingerprint: {
|
||||||
|
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
optional: true,
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Remove a encryption key from the database
|
||||||
|
///
|
||||||
|
/// Please note that you can no longer access tapes using this key.
|
||||||
|
pub fn delete_key(
|
||||||
|
fingerprint: Fingerprint,
|
||||||
|
digest: Option<String>,
|
||||||
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = open_file_locked(
|
||||||
|
TAPE_KEYS_LOCKFILE,
|
||||||
|
std::time::Duration::new(10, 0),
|
||||||
|
true,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let (mut config_map, expected_digest) = load_key_configs()?;
|
||||||
|
let (mut key_map, _) = load_keys()?;
|
||||||
|
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
match config_map.get(&fingerprint) {
|
||||||
|
Some(_) => { config_map.remove(&fingerprint); },
|
||||||
|
None => bail!("tape encryption key '{}' does not exist.", fingerprint),
|
||||||
|
}
|
||||||
|
save_key_configs(config_map)?;
|
||||||
|
|
||||||
|
key_map.remove(&fingerprint);
|
||||||
|
save_keys(key_map)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
const ITEM_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_READ_KEY)
|
||||||
|
.put(&API_METHOD_CHANGE_PASSPHRASE)
|
||||||
|
.delete(&API_METHOD_DELETE_KEY);
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_KEYS)
|
||||||
|
.post(&API_METHOD_CREATE_KEY)
|
||||||
|
.match_all("fingerprint", &ITEM_ROUTER);
|
@ -98,7 +98,7 @@ pub fn create_verification_job(
|
|||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let verification_job: verify::VerificationJobConfig = serde_json::from_value(param.clone())?;
|
let verification_job: verify::VerificationJobConfig = serde_json::from_value(param)?;
|
||||||
|
|
||||||
user_info.check_privs(&auth_id, &["datastore", &verification_job.store], PRIV_DATASTORE_VERIFY, false)?;
|
user_info.check_privs(&auth_id, &["datastore", &verification_job.store], PRIV_DATASTORE_VERIFY, false)?;
|
||||||
|
|
||||||
@ -106,7 +106,7 @@ pub fn create_verification_job(
|
|||||||
|
|
||||||
let (mut config, _digest) = verify::config()?;
|
let (mut config, _digest) = verify::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&verification_job.id) {
|
if config.sections.get(&verification_job.id).is_some() {
|
||||||
bail!("job '{}' already exists.", verification_job.id);
|
bail!("job '{}' already exists.", verification_job.id);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,10 +127,7 @@ pub fn create_verification_job(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: { type: verify::VerificationJobConfig },
|
||||||
description: "The verification job configuration.",
|
|
||||||
type: verify::VerificationJobConfig,
|
|
||||||
},
|
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Anybody,
|
permission: &Permission::Anybody,
|
||||||
description: "Requires Datastore.Audit or Datastore.Verify on job's datastore.",
|
description: "Requires Datastore.Audit or Datastore.Verify on job's datastore.",
|
||||||
@ -218,6 +215,7 @@ pub enum DeletableProperty {
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Update verification job config.
|
/// Update verification job config.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn update_verification_job(
|
pub fn update_verification_job(
|
||||||
id: String,
|
id: String,
|
||||||
store: Option<String>,
|
store: Option<String>,
|
||||||
|
@ -16,7 +16,7 @@ pub async fn create_download_response(path: PathBuf) -> Result<Response<Body>, E
|
|||||||
};
|
};
|
||||||
|
|
||||||
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||||
.map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
|
.map_ok(|bytes| bytes.freeze());
|
||||||
|
|
||||||
let body = Body::wrap_stream(payload);
|
let body = Body::wrap_stream(payload);
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//! Server/Node Configuration and Administration
|
||||||
|
|
||||||
use std::net::TcpListener;
|
use std::net::TcpListener;
|
||||||
use std::os::unix::io::AsRawFd;
|
use std::os::unix::io::AsRawFd;
|
||||||
|
|
||||||
@ -6,7 +8,7 @@ use futures::future::{FutureExt, TryFutureExt};
|
|||||||
use hyper::body::Body;
|
use hyper::body::Body;
|
||||||
use hyper::http::request::Parts;
|
use hyper::http::request::Parts;
|
||||||
use hyper::upgrade::Upgraded;
|
use hyper::upgrade::Upgraded;
|
||||||
use nix::fcntl::{fcntl, FcntlArg, FdFlag};
|
use hyper::Request;
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
use tokio::io::{AsyncBufReadExt, BufReader};
|
use tokio::io::{AsyncBufReadExt, BufReader};
|
||||||
|
|
||||||
@ -93,11 +95,16 @@ async fn termproxy(
|
|||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
// intentionally user only for now
|
// intentionally user only for now
|
||||||
let userid: Userid = rpcenv
|
let auth_id: Authid = rpcenv
|
||||||
.get_auth_id()
|
.get_auth_id()
|
||||||
.ok_or_else(|| format_err!("unknown user"))?
|
.ok_or_else(|| format_err!("no authid available"))?
|
||||||
.parse()?;
|
.parse()?;
|
||||||
let auth_id = Authid::from(userid.clone());
|
|
||||||
|
if auth_id.is_token() {
|
||||||
|
bail!("API tokens cannot access this API endpoint");
|
||||||
|
}
|
||||||
|
|
||||||
|
let userid = auth_id.user();
|
||||||
|
|
||||||
if userid.realm() != "pam" {
|
if userid.realm() != "pam" {
|
||||||
bail!("only pam users can use the console");
|
bail!("only pam users can use the console");
|
||||||
@ -116,7 +123,7 @@ async fn termproxy(
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
let mut command = Vec::new();
|
let mut command = Vec::new();
|
||||||
match cmd.as_ref().map(|x| x.as_str()) {
|
match cmd.as_deref() {
|
||||||
Some("login") | None => {
|
Some("login") | None => {
|
||||||
command.push("login");
|
command.push("login");
|
||||||
if userid == "root@pam" {
|
if userid == "root@pam" {
|
||||||
@ -145,18 +152,10 @@ async fn termproxy(
|
|||||||
move |worker| async move {
|
move |worker| async move {
|
||||||
// move inside the worker so that it survives and does not close the port
|
// move inside the worker so that it survives and does not close the port
|
||||||
// remove CLOEXEC from listenere so that we can reuse it in termproxy
|
// remove CLOEXEC from listenere so that we can reuse it in termproxy
|
||||||
let fd = listener.as_raw_fd();
|
tools::fd_change_cloexec(listener.as_raw_fd(), false)?;
|
||||||
let mut flags = match fcntl(fd, FcntlArg::F_GETFD) {
|
|
||||||
Ok(bits) => FdFlag::from_bits_truncate(bits),
|
|
||||||
Err(err) => bail!("could not get fd: {}", err),
|
|
||||||
};
|
|
||||||
flags.remove(FdFlag::FD_CLOEXEC);
|
|
||||||
if let Err(err) = fcntl(fd, FcntlArg::F_SETFD(flags)) {
|
|
||||||
bail!("could not set fd: {}", err);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut arguments: Vec<&str> = Vec::new();
|
let mut arguments: Vec<&str> = Vec::new();
|
||||||
let fd_string = fd.to_string();
|
let fd_string = listener.as_raw_fd().to_string();
|
||||||
arguments.push(&fd_string);
|
arguments.push(&fd_string);
|
||||||
arguments.extend_from_slice(&[
|
arguments.extend_from_slice(&[
|
||||||
"--path",
|
"--path",
|
||||||
@ -201,7 +200,7 @@ async fn termproxy(
|
|||||||
|
|
||||||
let mut needs_kill = false;
|
let mut needs_kill = false;
|
||||||
let res = tokio::select!{
|
let res = tokio::select!{
|
||||||
res = &mut child => {
|
res = child.wait() => {
|
||||||
let exit_code = res?;
|
let exit_code = res?;
|
||||||
if !exit_code.success() {
|
if !exit_code.success() {
|
||||||
match exit_code.code() {
|
match exit_code.code() {
|
||||||
@ -221,14 +220,13 @@ async fn termproxy(
|
|||||||
|
|
||||||
if needs_kill {
|
if needs_kill {
|
||||||
if res.is_ok() {
|
if res.is_ok() {
|
||||||
child.kill()?;
|
child.kill().await?;
|
||||||
child.await?;
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(err) = child.kill() {
|
if let Err(err) = child.kill().await {
|
||||||
worker.warn(format!("error killing termproxy: {}", err));
|
worker.warn(format!("error killing termproxy: {}", err));
|
||||||
} else if let Err(err) = child.await {
|
} else if let Err(err) = child.wait().await {
|
||||||
worker.warn(format!("error awaiting termproxy: {}", err));
|
worker.warn(format!("error awaiting termproxy: {}", err));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -276,7 +274,16 @@ fn upgrade_to_websocket(
|
|||||||
) -> ApiResponseFuture {
|
) -> ApiResponseFuture {
|
||||||
async move {
|
async move {
|
||||||
// intentionally user only for now
|
// intentionally user only for now
|
||||||
let userid: Userid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv
|
||||||
|
.get_auth_id()
|
||||||
|
.ok_or_else(|| format_err!("no authid available"))?
|
||||||
|
.parse()?;
|
||||||
|
|
||||||
|
if auth_id.is_token() {
|
||||||
|
bail!("API tokens cannot access this API endpoint");
|
||||||
|
}
|
||||||
|
|
||||||
|
let userid = auth_id.user();
|
||||||
let ticket = tools::required_string_param(¶m, "vncticket")?;
|
let ticket = tools::required_string_param(¶m, "vncticket")?;
|
||||||
let port: u16 = tools::required_integer_param(¶m, "port")? as u16;
|
let port: u16 = tools::required_integer_param(¶m, "port")? as u16;
|
||||||
|
|
||||||
@ -288,10 +295,10 @@ fn upgrade_to_websocket(
|
|||||||
Some(&ticket::term_aad(&userid, "/system", port)),
|
Some(&ticket::term_aad(&userid, "/system", port)),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let (ws, response) = WebSocket::new(parts.headers)?;
|
let (ws, response) = WebSocket::new(parts.headers.clone())?;
|
||||||
|
|
||||||
crate::server::spawn_internal_task(async move {
|
crate::server::spawn_internal_task(async move {
|
||||||
let conn: Upgraded = match req_body.on_upgrade().map_err(Error::from).await {
|
let conn: Upgraded = match hyper::upgrade::on(Request::from_parts(parts, req_body)).map_err(Error::from).await {
|
||||||
Ok(upgraded) => upgraded,
|
Ok(upgraded) => upgraded,
|
||||||
_ => bail!("error"),
|
_ => bail!("error"),
|
||||||
};
|
};
|
||||||
|
@ -35,18 +35,15 @@ use crate::api2::types::{Authid, APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
|
|||||||
/// List available APT updates
|
/// List available APT updates
|
||||||
fn apt_update_available(_param: Value) -> Result<Value, Error> {
|
fn apt_update_available(_param: Value) -> Result<Value, Error> {
|
||||||
|
|
||||||
match apt::pkg_cache_expired() {
|
if let Ok(false) = apt::pkg_cache_expired() {
|
||||||
Ok(false) => {
|
|
||||||
if let Ok(Some(cache)) = apt::read_pkg_state() {
|
if let Ok(Some(cache)) = apt::read_pkg_state() {
|
||||||
return Ok(json!(cache.package_status));
|
return Ok(json!(cache.package_status));
|
||||||
}
|
}
|
||||||
},
|
|
||||||
_ => (),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let cache = apt::update_cache()?;
|
let cache = apt::update_cache()?;
|
||||||
|
|
||||||
return Ok(json!(cache.package_status));
|
Ok(json!(cache.package_status))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
|
fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
|
||||||
@ -90,8 +87,8 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
|
|||||||
type: bool,
|
type: bool,
|
||||||
description: r#"Send notification mail about new package updates availanle to the
|
description: r#"Send notification mail about new package updates availanle to the
|
||||||
email address configured for 'root@pam')."#,
|
email address configured for 'root@pam')."#,
|
||||||
optional: true,
|
|
||||||
default: false,
|
default: false,
|
||||||
|
optional: true,
|
||||||
},
|
},
|
||||||
quiet: {
|
quiet: {
|
||||||
description: "Only produces output suitable for logging, omitting progress indicators.",
|
description: "Only produces output suitable for logging, omitting progress indicators.",
|
||||||
@ -110,16 +107,13 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
|
|||||||
)]
|
)]
|
||||||
/// Update the APT database
|
/// Update the APT database
|
||||||
pub fn apt_update_database(
|
pub fn apt_update_database(
|
||||||
notify: Option<bool>,
|
notify: bool,
|
||||||
quiet: Option<bool>,
|
quiet: bool,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
// FIXME: change to non-option in signature and drop below once we have proxmox-api-macro 0.2.3
|
|
||||||
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
|
|
||||||
let notify = notify.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_NOTIFY);
|
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread("aptupdate", None, auth_id, to_stdout, move |worker| {
|
let upid_str = WorkerTask::new_thread("aptupdate", None, auth_id, to_stdout, move |worker| {
|
||||||
do_apt_update(&worker, quiet)?;
|
do_apt_update(&worker, quiet)?;
|
||||||
@ -196,7 +190,7 @@ fn apt_get_changelog(
|
|||||||
}
|
}
|
||||||
}, Some(&name));
|
}, Some(&name));
|
||||||
|
|
||||||
if pkg_info.len() == 0 {
|
if pkg_info.is_empty() {
|
||||||
bail!("Package '{}' not found", name);
|
bail!("Package '{}' not found", name);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,7 +199,7 @@ fn apt_get_changelog(
|
|||||||
if changelog_url.starts_with("http://download.proxmox.com/") {
|
if changelog_url.starts_with("http://download.proxmox.com/") {
|
||||||
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, None))
|
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, None))
|
||||||
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
||||||
return Ok(json!(changelog));
|
Ok(json!(changelog))
|
||||||
|
|
||||||
} else if changelog_url.starts_with("https://enterprise.proxmox.com/") {
|
} else if changelog_url.starts_with("https://enterprise.proxmox.com/") {
|
||||||
let sub = match subscription::read_subscription()? {
|
let sub = match subscription::read_subscription()? {
|
||||||
@ -229,7 +223,7 @@ fn apt_get_changelog(
|
|||||||
|
|
||||||
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, Some(&auth_header)))
|
let changelog = crate::tools::runtime::block_on(http::get_string(changelog_url, Some(&auth_header)))
|
||||||
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
.map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
|
||||||
return Ok(json!(changelog));
|
Ok(json!(changelog))
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
let mut command = std::process::Command::new("apt-get");
|
let mut command = std::process::Command::new("apt-get");
|
||||||
@ -237,7 +231,7 @@ fn apt_get_changelog(
|
|||||||
command.arg("-qq"); // don't display download progress
|
command.arg("-qq"); // don't display download progress
|
||||||
command.arg(name);
|
command.arg(name);
|
||||||
let output = crate::tools::run_command(command, None)?;
|
let output = crate::tools::run_command(command, None)?;
|
||||||
return Ok(json!(output));
|
Ok(json!(output))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,7 +138,7 @@ pub fn initialize_disk(
|
|||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
@ -132,7 +132,7 @@ pub fn create_datastore_disk(
|
|||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
@ -164,7 +164,7 @@ pub fn create_datastore_disk(
|
|||||||
|
|
||||||
let manager = DiskManage::new();
|
let manager = DiskManage::new();
|
||||||
|
|
||||||
let disk = manager.clone().disk_by_name(&disk)?;
|
let disk = manager.disk_by_name(&disk)?;
|
||||||
|
|
||||||
let partition = create_single_linux_partition(&disk)?;
|
let partition = create_single_linux_partition(&disk)?;
|
||||||
create_file_system(&partition, filesystem)?;
|
create_file_system(&partition, filesystem)?;
|
||||||
@ -212,8 +212,7 @@ pub fn delete_datastore_disk(name: String) -> Result<(), Error> {
|
|||||||
let (config, _) = crate::config::datastore::config()?;
|
let (config, _) = crate::config::datastore::config()?;
|
||||||
let datastores: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
|
let datastores: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
|
||||||
let conflicting_datastore: Option<DataStoreConfig> = datastores.into_iter()
|
let conflicting_datastore: Option<DataStoreConfig> = datastores.into_iter()
|
||||||
.filter(|ds| ds.path == path)
|
.find(|ds| ds.path == path);
|
||||||
.next();
|
|
||||||
|
|
||||||
if let Some(conflicting_datastore) = conflicting_datastore {
|
if let Some(conflicting_datastore) = conflicting_datastore {
|
||||||
bail!("Can't remove '{}' since it's required by datastore '{}'",
|
bail!("Can't remove '{}' since it's required by datastore '{}'",
|
||||||
|
@ -254,7 +254,7 @@ pub fn create_zpool(
|
|||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
|
||||||
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
@ -125,7 +125,7 @@ pub fn update_dns(
|
|||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
static ref MUTEX: Arc<Mutex<usize>> = Arc::new(Mutex::new(0));
|
static ref MUTEX: Arc<Mutex<()>> = Arc::new(Mutex::new(()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let _guard = MUTEX.lock();
|
let _guard = MUTEX.lock();
|
||||||
|
@ -102,10 +102,7 @@ pub fn list_network_devices(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: { type: Interface },
|
||||||
description: "The network interface configuration (with config digest).",
|
|
||||||
type: Interface,
|
|
||||||
},
|
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["system", "network", "interfaces", "{name}"], PRIV_SYS_AUDIT, false),
|
permission: &Permission::Privilege(&["system", "network", "interfaces", "{name}"], PRIV_SYS_AUDIT, false),
|
||||||
},
|
},
|
||||||
@ -135,7 +132,6 @@ pub fn read_interface(iface: String) -> Result<Value, Error> {
|
|||||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||||
},
|
},
|
||||||
"type": {
|
"type": {
|
||||||
description: "Interface type.",
|
|
||||||
type: NetworkInterfaceType,
|
type: NetworkInterfaceType,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
@ -217,6 +213,7 @@ pub fn read_interface(iface: String) -> Result<Value, Error> {
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Create network interface configuration.
|
/// Create network interface configuration.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn create_interface(
|
pub fn create_interface(
|
||||||
iface: String,
|
iface: String,
|
||||||
autostart: Option<bool>,
|
autostart: Option<bool>,
|
||||||
@ -388,7 +385,6 @@ pub enum DeletableProperty {
|
|||||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||||
},
|
},
|
||||||
"type": {
|
"type": {
|
||||||
description: "Interface type. If specified, need to match the current type.",
|
|
||||||
type: NetworkInterfaceType,
|
type: NetworkInterfaceType,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
@ -482,6 +478,7 @@ pub enum DeletableProperty {
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Update network interface config.
|
/// Update network interface config.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn update_interface(
|
pub fn update_interface(
|
||||||
iface: String,
|
iface: String,
|
||||||
autostart: Option<bool>,
|
autostart: Option<bool>,
|
||||||
|
@ -73,10 +73,7 @@ pub fn check_subscription(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: { type: SubscriptionInfo },
|
||||||
description: "Subscription status.",
|
|
||||||
type: SubscriptionInfo,
|
|
||||||
},
|
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Anybody,
|
permission: &Permission::Anybody,
|
||||||
},
|
},
|
||||||
@ -140,7 +137,7 @@ pub fn set_subscription(
|
|||||||
|
|
||||||
let server_id = tools::get_hardware_address()?;
|
let server_id = tools::get_hardware_address()?;
|
||||||
|
|
||||||
let info = subscription::check_subscription(key, server_id.to_owned())?;
|
let info = subscription::check_subscription(key, server_id)?;
|
||||||
|
|
||||||
subscription::write_subscription(info)
|
subscription::write_subscription(info)
|
||||||
.map_err(|e| format_err!("Error writing subscription status - {}", e))?;
|
.map_err(|e| format_err!("Error writing subscription status - {}", e))?;
|
||||||
|
@ -110,16 +110,12 @@ fn check_task_access(auth_id: &Authid, upid: &UPID) -> Result<(), Error> {
|
|||||||
} else {
|
} else {
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let task_privs = user_info.lookup_privs(auth_id, &["system", "tasks"]);
|
// access to all tasks
|
||||||
if task_privs & PRIV_SYS_AUDIT != 0 {
|
// or task == job which the user/token could have configured/manually executed
|
||||||
// allowed to read all tasks in general
|
|
||||||
Ok(())
|
user_info.check_privs(auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)
|
||||||
} else if check_job_privs(&auth_id, &user_info, upid).is_ok() {
|
.or_else(|_| check_job_privs(&auth_id, &user_info, upid))
|
||||||
// job which the user/token could have configured/manually executed
|
.or_else(|_| bail!("task access not allowed"))
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
bail!("task access not allowed");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -166,7 +162,6 @@ fn check_task_access(auth_id: &Authid, upid: &UPID) -> Result<(), Error> {
|
|||||||
},
|
},
|
||||||
user: {
|
user: {
|
||||||
type: Userid,
|
type: Userid,
|
||||||
description: "The user who started the task.",
|
|
||||||
},
|
},
|
||||||
tokenid: {
|
tokenid: {
|
||||||
type: Tokenname,
|
type: Tokenname,
|
||||||
@ -430,6 +425,7 @@ fn stop_task(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// List tasks.
|
/// List tasks.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn list_tasks(
|
pub fn list_tasks(
|
||||||
start: u64,
|
start: u64,
|
||||||
limit: u64,
|
limit: u64,
|
||||||
@ -514,7 +510,7 @@ pub fn list_tasks(
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let mut count = result.len() + start as usize;
|
let mut count = result.len() + start as usize;
|
||||||
if result.len() > 0 && result.len() >= limit { // we have a 'virtual' entry as long as we have any new
|
if !result.is_empty() && result.len() >= limit { // we have a 'virtual' entry as long as we have any new
|
||||||
count += 1;
|
count += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//! Cheap check if the API daemon is online.
|
||||||
|
|
||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
@ -20,7 +22,7 @@ use proxmox::api::{api, Router, Permission};
|
|||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
/// Dummy method which replies with `{ "pong": True }`
|
/// Dummy method which replies with `{ "pong": True }`
|
||||||
fn ping() -> Result<Value, Error> {
|
pub fn ping() -> Result<Value, Error> {
|
||||||
Ok(json!({
|
Ok(json!({
|
||||||
"pong": true,
|
"pong": true,
|
||||||
}))
|
}))
|
||||||
|
@ -88,7 +88,7 @@ pub fn do_sync_job(
|
|||||||
let worker_future = async move {
|
let worker_future = async move {
|
||||||
|
|
||||||
let delete = sync_job.remove_vanished.unwrap_or(true);
|
let delete = sync_job.remove_vanished.unwrap_or(true);
|
||||||
let sync_owner = sync_job.owner.unwrap_or(Authid::root_auth_id().clone());
|
let sync_owner = sync_job.owner.unwrap_or_else(|| Authid::root_auth_id().clone());
|
||||||
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
|
let (client, src_repo, tgt_store) = get_pull_parameters(&sync_job.store, &sync_job.remote, &sync_job.remote_store).await?;
|
||||||
|
|
||||||
worker.log(format!("Starting datastore sync job '{}'", job_id));
|
worker.log(format!("Starting datastore sync job '{}'", job_id));
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
|
//! Backup reader/restore protocol (HTTP2 upgrade)
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
use hyper::header::{self, HeaderValue, UPGRADE};
|
use hyper::header::{self, HeaderValue, UPGRADE};
|
||||||
use hyper::http::request::Parts;
|
use hyper::http::request::Parts;
|
||||||
use hyper::{Body, Response, StatusCode};
|
use hyper::{Body, Response, Request, StatusCode};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use proxmox::{sortable, identity};
|
use proxmox::{sortable, identity};
|
||||||
@ -113,7 +115,9 @@ fn upgrade_to_backup_reader_protocol(
|
|||||||
|
|
||||||
let worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
|
let worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
|
||||||
|
|
||||||
WorkerTask::spawn("reader", Some(worker_id), auth_id.clone(), true, move |worker| {
|
WorkerTask::spawn("reader", Some(worker_id), auth_id.clone(), true, move |worker| async move {
|
||||||
|
let _guard = _guard;
|
||||||
|
|
||||||
let mut env = ReaderEnvironment::new(
|
let mut env = ReaderEnvironment::new(
|
||||||
env_type,
|
env_type,
|
||||||
auth_id,
|
auth_id,
|
||||||
@ -128,15 +132,13 @@ fn upgrade_to_backup_reader_protocol(
|
|||||||
|
|
||||||
let service = H2Service::new(env.clone(), worker.clone(), &READER_API_ROUTER, debug);
|
let service = H2Service::new(env.clone(), worker.clone(), &READER_API_ROUTER, debug);
|
||||||
|
|
||||||
let abort_future = worker.abort_future();
|
let mut abort_future = worker.abort_future()
|
||||||
|
.map(|_| Err(format_err!("task aborted")));
|
||||||
|
|
||||||
let req_fut = req_body
|
let env2 = env.clone();
|
||||||
.on_upgrade()
|
let req_fut = async move {
|
||||||
.map_err(Error::from)
|
let conn = hyper::upgrade::on(Request::from_parts(parts, req_body)).await?;
|
||||||
.and_then({
|
env2.debug("protocol upgrade done");
|
||||||
let env = env.clone();
|
|
||||||
move |conn| {
|
|
||||||
env.debug("protocol upgrade done");
|
|
||||||
|
|
||||||
let mut http = hyper::server::conn::Http::new();
|
let mut http = hyper::server::conn::Http::new();
|
||||||
http.http2_only(true);
|
http.http2_only(true);
|
||||||
@ -147,24 +149,17 @@ fn upgrade_to_backup_reader_protocol(
|
|||||||
http.http2_max_frame_size(4*1024*1024);
|
http.http2_max_frame_size(4*1024*1024);
|
||||||
|
|
||||||
http.serve_connection(conn, service)
|
http.serve_connection(conn, service)
|
||||||
.map_err(Error::from)
|
.map_err(Error::from).await
|
||||||
}
|
};
|
||||||
});
|
|
||||||
let abort_future = abort_future
|
|
||||||
.map(|_| Err(format_err!("task aborted")));
|
|
||||||
|
|
||||||
use futures::future::Either;
|
futures::select!{
|
||||||
futures::future::select(req_fut, abort_future)
|
req = req_fut.fuse() => req?,
|
||||||
.map(move |res| {
|
abort = abort_future => abort?,
|
||||||
let _guard = _guard;
|
};
|
||||||
match res {
|
|
||||||
Either::Left((Ok(res), _)) => Ok(res),
|
env.log("reader finished successfully");
|
||||||
Either::Left((Err(err), _)) => Err(err),
|
|
||||||
Either::Right((Ok(res), _)) => Ok(res),
|
Ok(())
|
||||||
Either::Right((Err(err), _)) => Err(err),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.map_ok(move |_| env.log("reader finished successfully"))
|
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let response = Response::builder()
|
let response = Response::builder()
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//! Datastote status
|
||||||
|
|
||||||
use proxmox::list_subdirs_api_method;
|
use proxmox::list_subdirs_api_method;
|
||||||
|
|
||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
@ -75,7 +77,7 @@ use crate::config::acl::{
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// List Datastore usages and estimates
|
/// List Datastore usages and estimates
|
||||||
fn datastore_status(
|
pub fn datastore_status(
|
||||||
_param: Value,
|
_param: Value,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
@ -127,8 +129,7 @@ fn datastore_status(
|
|||||||
rrd_mode,
|
rrd_mode,
|
||||||
);
|
);
|
||||||
|
|
||||||
match (total_res, used_res) {
|
if let (Some((start, reso, total_list)), Some((_, _, used_list))) = (total_res, used_res) {
|
||||||
(Some((start, reso, total_list)), Some((_, _, used_list))) => {
|
|
||||||
let mut usage_list: Vec<f64> = Vec::new();
|
let mut usage_list: Vec<f64> = Vec::new();
|
||||||
let mut time_list: Vec<u64> = Vec::new();
|
let mut time_list: Vec<u64> = Vec::new();
|
||||||
let mut history = Vec::new();
|
let mut history = Vec::new();
|
||||||
@ -168,8 +169,6 @@ fn datastore_status(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
|
||||||
_ => {},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
list.push(entry);
|
list.push(entry);
|
||||||
|
254
src/api2/tape/backup.rs
Normal file
254
src/api2/tape/backup.rs
Normal file
@ -0,0 +1,254 @@
|
|||||||
|
use std::path::Path;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use anyhow::{bail, Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox::{
|
||||||
|
api::{
|
||||||
|
api,
|
||||||
|
RpcEnvironment,
|
||||||
|
RpcEnvironmentType,
|
||||||
|
Router,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
task_log,
|
||||||
|
config::{
|
||||||
|
self,
|
||||||
|
drive::check_drive_exists,
|
||||||
|
},
|
||||||
|
backup::{
|
||||||
|
DataStore,
|
||||||
|
BackupDir,
|
||||||
|
BackupInfo,
|
||||||
|
},
|
||||||
|
api2::types::{
|
||||||
|
Authid,
|
||||||
|
DATASTORE_SCHEMA,
|
||||||
|
MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
DRIVE_NAME_SCHEMA,
|
||||||
|
UPID_SCHEMA,
|
||||||
|
MediaPoolConfig,
|
||||||
|
},
|
||||||
|
server::WorkerTask,
|
||||||
|
task::TaskState,
|
||||||
|
tape::{
|
||||||
|
TAPE_STATUS_DIR,
|
||||||
|
Inventory,
|
||||||
|
PoolWriter,
|
||||||
|
MediaPool,
|
||||||
|
SnapshotReader,
|
||||||
|
drive::media_changer,
|
||||||
|
changer::update_changer_online_status,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
pool: {
|
||||||
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
drive: {
|
||||||
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
"eject-media": {
|
||||||
|
description: "Eject media upon job completion.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"export-media-set": {
|
||||||
|
description: "Export media set upon job completion.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
schema: UPID_SCHEMA,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Backup datastore to tape media pool
|
||||||
|
pub fn backup(
|
||||||
|
store: String,
|
||||||
|
pool: String,
|
||||||
|
drive: String,
|
||||||
|
eject_media: Option<bool>,
|
||||||
|
export_media_set: Option<bool>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
|
let (config, _digest) = config::media_pool::config()?;
|
||||||
|
let pool_config: MediaPoolConfig = config.lookup("pool", &pool)?;
|
||||||
|
|
||||||
|
let (drive_config, _digest) = config::drive::config()?;
|
||||||
|
// early check before starting worker
|
||||||
|
check_drive_exists(&drive_config, &drive)?;
|
||||||
|
|
||||||
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
|
let eject_media = eject_media.unwrap_or(false);
|
||||||
|
let export_media_set = export_media_set.unwrap_or(false);
|
||||||
|
|
||||||
|
let upid_str = WorkerTask::new_thread(
|
||||||
|
"tape-backup",
|
||||||
|
Some(store),
|
||||||
|
auth_id,
|
||||||
|
to_stdout,
|
||||||
|
move |worker| {
|
||||||
|
backup_worker(&worker, datastore, &drive, &pool_config, eject_media, export_media_set)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(upid_str.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.post(&API_METHOD_BACKUP);
|
||||||
|
|
||||||
|
|
||||||
|
fn backup_worker(
|
||||||
|
worker: &WorkerTask,
|
||||||
|
datastore: Arc<DataStore>,
|
||||||
|
drive: &str,
|
||||||
|
pool_config: &MediaPoolConfig,
|
||||||
|
eject_media: bool,
|
||||||
|
export_media_set: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
|
||||||
|
let _lock = MediaPool::lock(status_path, &pool_config.name)?;
|
||||||
|
|
||||||
|
task_log!(worker, "update media online status");
|
||||||
|
let has_changer = update_media_online_status(drive)?;
|
||||||
|
|
||||||
|
let use_offline_media = !has_changer;
|
||||||
|
|
||||||
|
let pool = MediaPool::with_config(status_path, &pool_config, use_offline_media)?;
|
||||||
|
|
||||||
|
let mut pool_writer = PoolWriter::new(pool, drive)?;
|
||||||
|
|
||||||
|
let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
|
||||||
|
|
||||||
|
group_list.sort_unstable();
|
||||||
|
|
||||||
|
for group in group_list {
|
||||||
|
let mut snapshot_list = group.list_backups(&datastore.base_path())?;
|
||||||
|
BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
|
||||||
|
|
||||||
|
for info in snapshot_list {
|
||||||
|
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
task_log!(worker, "backup snapshot {}", info.backup_dir);
|
||||||
|
backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pool_writer.commit()?;
|
||||||
|
|
||||||
|
if export_media_set {
|
||||||
|
pool_writer.export_media_set(worker)?;
|
||||||
|
} else if eject_media {
|
||||||
|
pool_writer.eject_media(worker)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to update the the media online status
|
||||||
|
fn update_media_online_status(drive: &str) -> Result<bool, Error> {
|
||||||
|
|
||||||
|
let (config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
let mut has_changer = false;
|
||||||
|
|
||||||
|
if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) {
|
||||||
|
|
||||||
|
has_changer = true;
|
||||||
|
|
||||||
|
let label_text_list = changer.online_media_label_texts()?;
|
||||||
|
|
||||||
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
let mut inventory = Inventory::load(status_path)?;
|
||||||
|
|
||||||
|
update_changer_online_status(
|
||||||
|
&config,
|
||||||
|
&mut inventory,
|
||||||
|
&changer_name,
|
||||||
|
&label_text_list,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(has_changer)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn backup_snapshot(
|
||||||
|
worker: &WorkerTask,
|
||||||
|
pool_writer: &mut PoolWriter,
|
||||||
|
datastore: Arc<DataStore>,
|
||||||
|
snapshot: BackupDir,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
task_log!(worker, "start backup {}:{}", datastore.name(), snapshot);
|
||||||
|
|
||||||
|
let snapshot_reader = SnapshotReader::new(datastore.clone(), snapshot.clone())?;
|
||||||
|
|
||||||
|
let mut chunk_iter = snapshot_reader.chunk_iterator()?.peekable();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
worker.check_abort()?;
|
||||||
|
|
||||||
|
// test is we have remaining chunks
|
||||||
|
if chunk_iter.peek().is_none() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
let uuid = pool_writer.load_writable_media(worker)?;
|
||||||
|
|
||||||
|
worker.check_abort()?;
|
||||||
|
|
||||||
|
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &datastore, &mut chunk_iter)?;
|
||||||
|
|
||||||
|
if leom {
|
||||||
|
pool_writer.set_media_status_full(&uuid)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
worker.check_abort()?;
|
||||||
|
|
||||||
|
let uuid = pool_writer.load_writable_media(worker)?;
|
||||||
|
|
||||||
|
worker.check_abort()?;
|
||||||
|
|
||||||
|
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
|
||||||
|
|
||||||
|
if !done {
|
||||||
|
// does not fit on tape, so we try on next volume
|
||||||
|
pool_writer.set_media_status_full(&uuid)?;
|
||||||
|
|
||||||
|
worker.check_abort()?;
|
||||||
|
|
||||||
|
pool_writer.load_writable_media(worker)?;
|
||||||
|
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
|
||||||
|
|
||||||
|
if !done {
|
||||||
|
bail!("write_snapshot_archive failed on second media");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task_log!(worker, "end backup {}:{}", datastore.name(), snapshot);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
191
src/api2/tape/changer.rs
Normal file
191
src/api2/tape/changer.rs
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use anyhow::Error;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox::api::{api, Router, SubdirMap};
|
||||||
|
use proxmox::list_subdirs_api_method;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
config,
|
||||||
|
api2::types::{
|
||||||
|
CHANGER_NAME_SCHEMA,
|
||||||
|
ChangerListEntry,
|
||||||
|
MtxEntryKind,
|
||||||
|
MtxStatusEntry,
|
||||||
|
ScsiTapeChanger,
|
||||||
|
},
|
||||||
|
tape::{
|
||||||
|
TAPE_STATUS_DIR,
|
||||||
|
Inventory,
|
||||||
|
linux_tape_changer_list,
|
||||||
|
changer::{
|
||||||
|
OnlineStatusMap,
|
||||||
|
ElementStatus,
|
||||||
|
ScsiMediaChange,
|
||||||
|
mtx_status_to_online_set,
|
||||||
|
},
|
||||||
|
lookup_device_identification,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "A status entry for each drive and slot.",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: MtxStatusEntry,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get tape changer status
|
||||||
|
pub async fn get_status(name: String) -> Result<Vec<MtxStatusEntry>, Error> {
|
||||||
|
|
||||||
|
let (config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
let mut changer_config: ScsiTapeChanger = config.lookup("changer", &name)?;
|
||||||
|
|
||||||
|
let status = tokio::task::spawn_blocking(move || {
|
||||||
|
changer_config.status()
|
||||||
|
}).await??;
|
||||||
|
|
||||||
|
let state_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
let mut inventory = Inventory::load(state_path)?;
|
||||||
|
|
||||||
|
let mut map = OnlineStatusMap::new(&config)?;
|
||||||
|
let online_set = mtx_status_to_online_set(&status, &inventory);
|
||||||
|
map.update_online_status(&name, online_set)?;
|
||||||
|
|
||||||
|
inventory.update_online_status(&map)?;
|
||||||
|
|
||||||
|
let mut list = Vec::new();
|
||||||
|
|
||||||
|
for (id, drive_status) in status.drives.iter().enumerate() {
|
||||||
|
let entry = MtxStatusEntry {
|
||||||
|
entry_kind: MtxEntryKind::Drive,
|
||||||
|
entry_id: id as u64,
|
||||||
|
label_text: match &drive_status.status {
|
||||||
|
ElementStatus::Empty => None,
|
||||||
|
ElementStatus::Full => Some(String::new()),
|
||||||
|
ElementStatus::VolumeTag(tag) => Some(tag.to_string()),
|
||||||
|
},
|
||||||
|
loaded_slot: drive_status.loaded_slot,
|
||||||
|
};
|
||||||
|
list.push(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (id, slot_info) in status.slots.iter().enumerate() {
|
||||||
|
let entry = MtxStatusEntry {
|
||||||
|
entry_kind: if slot_info.import_export {
|
||||||
|
MtxEntryKind::ImportExport
|
||||||
|
} else {
|
||||||
|
MtxEntryKind::Slot
|
||||||
|
},
|
||||||
|
entry_id: id as u64 + 1,
|
||||||
|
label_text: match &slot_info.status {
|
||||||
|
ElementStatus::Empty => None,
|
||||||
|
ElementStatus::Full => Some(String::new()),
|
||||||
|
ElementStatus::VolumeTag(tag) => Some(tag.to_string()),
|
||||||
|
},
|
||||||
|
loaded_slot: None,
|
||||||
|
};
|
||||||
|
list.push(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
from: {
|
||||||
|
description: "Source slot number",
|
||||||
|
minimum: 1,
|
||||||
|
},
|
||||||
|
to: {
|
||||||
|
description: "Destination slot number",
|
||||||
|
minimum: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Transfers media from one slot to another
|
||||||
|
pub async fn transfer(
|
||||||
|
name: String,
|
||||||
|
from: u64,
|
||||||
|
to: u64,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let (config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
let mut changer_config: ScsiTapeChanger = config.lookup("changer", &name)?;
|
||||||
|
|
||||||
|
tokio::task::spawn_blocking(move || {
|
||||||
|
changer_config.transfer(from, to)
|
||||||
|
}).await?
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "The list of configured changers with model information.",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: ChangerListEntry,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List changers
|
||||||
|
pub fn list_changers(
|
||||||
|
_param: Value,
|
||||||
|
) -> Result<Vec<ChangerListEntry>, Error> {
|
||||||
|
|
||||||
|
let (config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
let linux_changers = linux_tape_changer_list();
|
||||||
|
|
||||||
|
let changer_list: Vec<ScsiTapeChanger> = config.convert_to_typed_array("changer")?;
|
||||||
|
|
||||||
|
let mut list = Vec::new();
|
||||||
|
|
||||||
|
for changer in changer_list {
|
||||||
|
let info = lookup_device_identification(&linux_changers, &changer.path);
|
||||||
|
let entry = ChangerListEntry { config: changer, info };
|
||||||
|
list.push(entry);
|
||||||
|
}
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
const SUBDIRS: SubdirMap = &[
|
||||||
|
(
|
||||||
|
"status",
|
||||||
|
&Router::new()
|
||||||
|
.get(&API_METHOD_GET_STATUS)
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"transfer",
|
||||||
|
&Router::new()
|
||||||
|
.post(&API_METHOD_TRANSFER)
|
||||||
|
),
|
||||||
|
];
|
||||||
|
|
||||||
|
const ITEM_ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||||
|
.subdirs(&SUBDIRS);
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_CHANGERS)
|
||||||
|
.match_all("name", &ITEM_ROUTER);
|
1235
src/api2/tape/drive.rs
Normal file
1235
src/api2/tape/drive.rs
Normal file
File diff suppressed because it is too large
Load Diff
350
src/api2/tape/media.rs
Normal file
350
src/api2/tape/media.rs
Normal file
@ -0,0 +1,350 @@
|
|||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use serde::{Serialize, Deserialize};
|
||||||
|
|
||||||
|
use proxmox::{
|
||||||
|
api::{api, Router, SubdirMap},
|
||||||
|
list_subdirs_api_method,
|
||||||
|
tools::Uuid,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
config::{
|
||||||
|
self,
|
||||||
|
},
|
||||||
|
api2::types::{
|
||||||
|
BACKUP_ID_SCHEMA,
|
||||||
|
BACKUP_TYPE_SCHEMA,
|
||||||
|
MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
MEDIA_LABEL_SCHEMA,
|
||||||
|
MEDIA_UUID_SCHEMA,
|
||||||
|
MEDIA_SET_UUID_SCHEMA,
|
||||||
|
MediaPoolConfig,
|
||||||
|
MediaListEntry,
|
||||||
|
MediaStatus,
|
||||||
|
MediaContentEntry,
|
||||||
|
},
|
||||||
|
backup::{
|
||||||
|
BackupDir,
|
||||||
|
},
|
||||||
|
tape::{
|
||||||
|
TAPE_STATUS_DIR,
|
||||||
|
Inventory,
|
||||||
|
MediaPool,
|
||||||
|
MediaCatalog,
|
||||||
|
changer::update_online_status,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
pool: {
|
||||||
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "List of registered backup media.",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: MediaListEntry,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List pool media
|
||||||
|
pub async fn list_media(pool: Option<String>) -> Result<Vec<MediaListEntry>, Error> {
|
||||||
|
|
||||||
|
let (config, _digest) = config::media_pool::config()?;
|
||||||
|
|
||||||
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
|
||||||
|
let catalogs = tokio::task::spawn_blocking(move || {
|
||||||
|
// update online media status
|
||||||
|
if let Err(err) = update_online_status(status_path) {
|
||||||
|
eprintln!("{}", err);
|
||||||
|
eprintln!("update online media status failed - using old state");
|
||||||
|
}
|
||||||
|
// test what catalog files we have
|
||||||
|
MediaCatalog::media_with_catalogs(status_path)
|
||||||
|
}).await??;
|
||||||
|
|
||||||
|
let mut list = Vec::new();
|
||||||
|
|
||||||
|
for (_section_type, data) in config.sections.values() {
|
||||||
|
let pool_name = match data["name"].as_str() {
|
||||||
|
None => continue,
|
||||||
|
Some(name) => name,
|
||||||
|
};
|
||||||
|
if let Some(ref name) = pool {
|
||||||
|
if name != pool_name {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let config: MediaPoolConfig = config.lookup("pool", pool_name)?;
|
||||||
|
|
||||||
|
let use_offline_media = true; // does not matter here
|
||||||
|
let pool = MediaPool::with_config(status_path, &config, use_offline_media)?;
|
||||||
|
|
||||||
|
let current_time = proxmox::tools::time::epoch_i64();
|
||||||
|
|
||||||
|
for media in pool.list_media() {
|
||||||
|
let expired = pool.media_is_expired(&media, current_time);
|
||||||
|
|
||||||
|
let media_set_uuid = media.media_set_label()
|
||||||
|
.map(|set| set.uuid.clone());
|
||||||
|
|
||||||
|
let seq_nr = media.media_set_label()
|
||||||
|
.map(|set| set.seq_nr);
|
||||||
|
|
||||||
|
let media_set_name = media.media_set_label()
|
||||||
|
.map(|set| {
|
||||||
|
pool.generate_media_set_name(&set.uuid, config.template.clone())
|
||||||
|
.unwrap_or_else(|_| set.uuid.to_string())
|
||||||
|
});
|
||||||
|
|
||||||
|
let catalog_ok = if media.media_set_label().is_none() {
|
||||||
|
// Media is empty, we need no catalog
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
catalogs.contains(media.uuid())
|
||||||
|
};
|
||||||
|
|
||||||
|
list.push(MediaListEntry {
|
||||||
|
uuid: media.uuid().clone(),
|
||||||
|
label_text: media.label_text().to_string(),
|
||||||
|
ctime: media.ctime(),
|
||||||
|
pool: Some(pool_name.to_string()),
|
||||||
|
location: media.location().clone(),
|
||||||
|
status: *media.status(),
|
||||||
|
catalog: catalog_ok,
|
||||||
|
expired,
|
||||||
|
media_set_ctime: media.media_set_label().map(|set| set.ctime),
|
||||||
|
media_set_uuid,
|
||||||
|
media_set_name,
|
||||||
|
seq_nr,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pool.is_none() {
|
||||||
|
|
||||||
|
let inventory = Inventory::load(status_path)?;
|
||||||
|
|
||||||
|
for media_id in inventory.list_unassigned_media() {
|
||||||
|
|
||||||
|
let (mut status, location) = inventory.status_and_location(&media_id.label.uuid);
|
||||||
|
|
||||||
|
if status == MediaStatus::Unknown {
|
||||||
|
status = MediaStatus::Writable;
|
||||||
|
}
|
||||||
|
|
||||||
|
list.push(MediaListEntry {
|
||||||
|
uuid: media_id.label.uuid.clone(),
|
||||||
|
ctime: media_id.label.ctime,
|
||||||
|
label_text: media_id.label.label_text.to_string(),
|
||||||
|
location,
|
||||||
|
status,
|
||||||
|
catalog: true, // empty, so we do not need a catalog
|
||||||
|
expired: false,
|
||||||
|
media_set_uuid: None,
|
||||||
|
media_set_name: None,
|
||||||
|
media_set_ctime: None,
|
||||||
|
seq_nr: None,
|
||||||
|
pool: None,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
"label-text": {
|
||||||
|
schema: MEDIA_LABEL_SCHEMA,
|
||||||
|
},
|
||||||
|
force: {
|
||||||
|
description: "Force removal (even if media is used in a media set).",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Destroy media (completely remove from database)
|
||||||
|
pub fn destroy_media(label_text: String, force: Option<bool>,) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let force = force.unwrap_or(false);
|
||||||
|
|
||||||
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
let mut inventory = Inventory::load(status_path)?;
|
||||||
|
|
||||||
|
let media_id = inventory.find_media_by_label_text(&label_text)
|
||||||
|
.ok_or_else(|| format_err!("no such media '{}'", label_text))?;
|
||||||
|
|
||||||
|
if !force {
|
||||||
|
if let Some(ref set) = media_id.media_set_label {
|
||||||
|
let is_empty = set.uuid.as_ref() == [0u8;16];
|
||||||
|
if !is_empty {
|
||||||
|
bail!("media '{}' contains data (please use 'force' flag to remove.", label_text);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let uuid = media_id.label.uuid.clone();
|
||||||
|
|
||||||
|
inventory.remove_media(&uuid)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
pool: {
|
||||||
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"label-text": {
|
||||||
|
schema: MEDIA_LABEL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"media": {
|
||||||
|
schema: MEDIA_UUID_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"media-set": {
|
||||||
|
schema: MEDIA_SET_UUID_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"backup-type": {
|
||||||
|
schema: BACKUP_TYPE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"backup-id": {
|
||||||
|
schema: BACKUP_ID_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Content list filter parameters
|
||||||
|
pub struct MediaContentListFilter {
|
||||||
|
pub pool: Option<String>,
|
||||||
|
pub label_text: Option<String>,
|
||||||
|
pub media: Option<Uuid>,
|
||||||
|
pub media_set: Option<Uuid>,
|
||||||
|
pub backup_type: Option<String>,
|
||||||
|
pub backup_id: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
"filter": {
|
||||||
|
type: MediaContentListFilter,
|
||||||
|
flatten: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "Media content list.",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: MediaContentEntry,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List media content
|
||||||
|
pub fn list_content(
|
||||||
|
filter: MediaContentListFilter,
|
||||||
|
) -> Result<Vec<MediaContentEntry>, Error> {
|
||||||
|
|
||||||
|
let (config, _digest) = config::media_pool::config()?;
|
||||||
|
|
||||||
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
let inventory = Inventory::load(status_path)?;
|
||||||
|
|
||||||
|
let mut list = Vec::new();
|
||||||
|
|
||||||
|
for media_id in inventory.list_used_media() {
|
||||||
|
let set = media_id.media_set_label.as_ref().unwrap();
|
||||||
|
|
||||||
|
if let Some(ref label_text) = filter.label_text {
|
||||||
|
if &media_id.label.label_text != label_text { continue; }
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ref pool) = filter.pool {
|
||||||
|
if &set.pool != pool { continue; }
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ref media_uuid) = filter.media {
|
||||||
|
if &media_id.label.uuid != media_uuid { continue; }
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ref media_set_uuid) = filter.media_set {
|
||||||
|
if &set.uuid != media_set_uuid { continue; }
|
||||||
|
}
|
||||||
|
|
||||||
|
let config: MediaPoolConfig = config.lookup("pool", &set.pool)?;
|
||||||
|
|
||||||
|
let media_set_name = inventory
|
||||||
|
.generate_media_set_name(&set.uuid, config.template.clone())
|
||||||
|
.unwrap_or_else(|_| set.uuid.to_string());
|
||||||
|
|
||||||
|
let catalog = MediaCatalog::open(status_path, &media_id.label.uuid, false, false)?;
|
||||||
|
|
||||||
|
for snapshot in catalog.snapshot_index().keys() {
|
||||||
|
let backup_dir: BackupDir = snapshot.parse()?;
|
||||||
|
|
||||||
|
if let Some(ref backup_type) = filter.backup_type {
|
||||||
|
if backup_dir.group().backup_type() != backup_type { continue; }
|
||||||
|
}
|
||||||
|
if let Some(ref backup_id) = filter.backup_id {
|
||||||
|
if backup_dir.group().backup_id() != backup_id { continue; }
|
||||||
|
}
|
||||||
|
|
||||||
|
list.push(MediaContentEntry {
|
||||||
|
uuid: media_id.label.uuid.clone(),
|
||||||
|
label_text: media_id.label.label_text.to_string(),
|
||||||
|
pool: set.pool.clone(),
|
||||||
|
media_set_name: media_set_name.clone(),
|
||||||
|
media_set_uuid: set.uuid.clone(),
|
||||||
|
media_set_ctime: set.ctime,
|
||||||
|
seq_nr: set.seq_nr,
|
||||||
|
snapshot: snapshot.to_owned(),
|
||||||
|
backup_time: backup_dir.backup_time(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
const SUBDIRS: SubdirMap = &[
|
||||||
|
(
|
||||||
|
"content",
|
||||||
|
&Router::new()
|
||||||
|
.get(&API_METHOD_LIST_CONTENT)
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"destroy",
|
||||||
|
&Router::new()
|
||||||
|
.get(&API_METHOD_DESTROY_MEDIA)
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"list",
|
||||||
|
&Router::new()
|
||||||
|
.get(&API_METHOD_LIST_MEDIA)
|
||||||
|
),
|
||||||
|
];
|
||||||
|
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||||
|
.subdirs(SUBDIRS);
|
89
src/api2/tape/mod.rs
Normal file
89
src/api2/tape/mod.rs
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
//! Tape Backup Management
|
||||||
|
|
||||||
|
use anyhow::Error;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox::{
|
||||||
|
api::{
|
||||||
|
api,
|
||||||
|
router::SubdirMap,
|
||||||
|
Router,
|
||||||
|
},
|
||||||
|
list_subdirs_api_method,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
api2::types::TapeDeviceInfo,
|
||||||
|
tape::{
|
||||||
|
linux_tape_device_list,
|
||||||
|
linux_tape_changer_list,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub mod drive;
|
||||||
|
pub mod changer;
|
||||||
|
pub mod media;
|
||||||
|
pub mod backup;
|
||||||
|
pub mod restore;
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "The list of autodetected tape drives.",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: TapeDeviceInfo,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Scan tape drives
|
||||||
|
pub fn scan_drives(_param: Value) -> Result<Vec<TapeDeviceInfo>, Error> {
|
||||||
|
|
||||||
|
let list = linux_tape_device_list();
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "The list of autodetected tape changers.",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: TapeDeviceInfo,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Scan for SCSI tape changers
|
||||||
|
pub fn scan_changers(_param: Value) -> Result<Vec<TapeDeviceInfo>, Error> {
|
||||||
|
|
||||||
|
let list = linux_tape_changer_list();
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
const SUBDIRS: SubdirMap = &[
|
||||||
|
("backup", &backup::ROUTER),
|
||||||
|
("changer", &changer::ROUTER),
|
||||||
|
("drive", &drive::ROUTER),
|
||||||
|
("media", &media::ROUTER),
|
||||||
|
("restore", &restore::ROUTER),
|
||||||
|
(
|
||||||
|
"scan-changers",
|
||||||
|
&Router::new()
|
||||||
|
.get(&API_METHOD_SCAN_CHANGERS),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"scan-drives",
|
||||||
|
&Router::new()
|
||||||
|
.get(&API_METHOD_SCAN_DRIVES),
|
||||||
|
),
|
||||||
|
];
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||||
|
.subdirs(SUBDIRS);
|
556
src/api2/tape/restore.rs
Normal file
556
src/api2/tape/restore.rs
Normal file
@ -0,0 +1,556 @@
|
|||||||
|
use std::path::Path;
|
||||||
|
use std::ffi::OsStr;
|
||||||
|
use std::convert::TryFrom;
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox::{
|
||||||
|
api::{
|
||||||
|
api,
|
||||||
|
RpcEnvironment,
|
||||||
|
RpcEnvironmentType,
|
||||||
|
Router,
|
||||||
|
section_config::SectionConfigData,
|
||||||
|
},
|
||||||
|
tools::{
|
||||||
|
Uuid,
|
||||||
|
io::ReadExt,
|
||||||
|
fs::{
|
||||||
|
replace_file,
|
||||||
|
CreateOptions,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
tools::compute_file_csum,
|
||||||
|
api2::types::{
|
||||||
|
DATASTORE_SCHEMA,
|
||||||
|
DRIVE_NAME_SCHEMA,
|
||||||
|
UPID_SCHEMA,
|
||||||
|
Authid,
|
||||||
|
MediaPoolConfig,
|
||||||
|
},
|
||||||
|
config::{
|
||||||
|
self,
|
||||||
|
drive::check_drive_exists,
|
||||||
|
},
|
||||||
|
backup::{
|
||||||
|
archive_type,
|
||||||
|
MANIFEST_BLOB_NAME,
|
||||||
|
CryptMode,
|
||||||
|
DataStore,
|
||||||
|
BackupDir,
|
||||||
|
DataBlob,
|
||||||
|
BackupManifest,
|
||||||
|
ArchiveType,
|
||||||
|
IndexFile,
|
||||||
|
DynamicIndexReader,
|
||||||
|
FixedIndexReader,
|
||||||
|
},
|
||||||
|
server::WorkerTask,
|
||||||
|
tape::{
|
||||||
|
TAPE_STATUS_DIR,
|
||||||
|
TapeRead,
|
||||||
|
MediaId,
|
||||||
|
MediaCatalog,
|
||||||
|
ChunkArchiveDecoder,
|
||||||
|
MediaPool,
|
||||||
|
Inventory,
|
||||||
|
file_formats::{
|
||||||
|
PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
|
||||||
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
|
||||||
|
PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
|
||||||
|
PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
|
||||||
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
|
||||||
|
MediaContentHeader,
|
||||||
|
},
|
||||||
|
drive::{
|
||||||
|
TapeDriver,
|
||||||
|
request_and_load_media,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.post(&API_METHOD_RESTORE);
|
||||||
|
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
drive: {
|
||||||
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
"media-set": {
|
||||||
|
description: "Media set UUID.",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
schema: UPID_SCHEMA,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Restore data from media-set
|
||||||
|
pub fn restore(
|
||||||
|
store: String,
|
||||||
|
drive: String,
|
||||||
|
media_set: String,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
let inventory = Inventory::load(status_path)?;
|
||||||
|
|
||||||
|
let media_set_uuid = media_set.parse()?;
|
||||||
|
|
||||||
|
let pool = inventory.lookup_media_set_pool(&media_set_uuid)?;
|
||||||
|
|
||||||
|
// check if pool exists
|
||||||
|
let (config, _digest) = config::media_pool::config()?;
|
||||||
|
let _pool_config: MediaPoolConfig = config.lookup("pool", &pool)?;
|
||||||
|
|
||||||
|
let (drive_config, _digest) = config::drive::config()?;
|
||||||
|
// early check before starting worker
|
||||||
|
check_drive_exists(&drive_config, &drive)?;
|
||||||
|
|
||||||
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
|
let upid_str = WorkerTask::new_thread(
|
||||||
|
"tape-restore",
|
||||||
|
Some(store.clone()),
|
||||||
|
auth_id.clone(),
|
||||||
|
to_stdout,
|
||||||
|
move |worker| {
|
||||||
|
|
||||||
|
let _lock = MediaPool::lock(status_path, &pool)?;
|
||||||
|
|
||||||
|
let members = inventory.compute_media_set_members(&media_set_uuid)?;
|
||||||
|
|
||||||
|
let media_list = members.media_list();
|
||||||
|
|
||||||
|
let mut media_id_list = Vec::new();
|
||||||
|
|
||||||
|
let mut encryption_key_fingerprint = None;
|
||||||
|
|
||||||
|
for (seq_nr, media_uuid) in media_list.iter().enumerate() {
|
||||||
|
match media_uuid {
|
||||||
|
None => {
|
||||||
|
bail!("media set {} is incomplete (missing member {}).", media_set_uuid, seq_nr);
|
||||||
|
}
|
||||||
|
Some(media_uuid) => {
|
||||||
|
let media_id = inventory.lookup_media(media_uuid).unwrap();
|
||||||
|
if let Some(ref set) = media_id.media_set_label { // always true here
|
||||||
|
if encryption_key_fingerprint.is_none() && set.encryption_key_fingerprint.is_some() {
|
||||||
|
encryption_key_fingerprint = set.encryption_key_fingerprint.clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
media_id_list.push(media_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
worker.log(format!("Restore mediaset '{}'", media_set));
|
||||||
|
if let Some(fingerprint) = encryption_key_fingerprint {
|
||||||
|
worker.log(format!("Encryption key fingerprint: {}", fingerprint));
|
||||||
|
}
|
||||||
|
worker.log(format!("Pool: {}", pool));
|
||||||
|
worker.log(format!("Datastore: {}", store));
|
||||||
|
worker.log(format!("Drive: {}", drive));
|
||||||
|
worker.log(format!(
|
||||||
|
"Required media list: {}",
|
||||||
|
media_id_list.iter()
|
||||||
|
.map(|media_id| media_id.label.label_text.as_str())
|
||||||
|
.collect::<Vec<&str>>()
|
||||||
|
.join(";")
|
||||||
|
));
|
||||||
|
|
||||||
|
for media_id in media_id_list.iter() {
|
||||||
|
request_and_restore_media(
|
||||||
|
&worker,
|
||||||
|
media_id,
|
||||||
|
&drive_config,
|
||||||
|
&drive,
|
||||||
|
&datastore,
|
||||||
|
&auth_id,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
worker.log(format!("Restore mediaset '{}' done", media_set));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(upid_str.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Request and restore complete media without using existing catalog (create catalog instead)
|
||||||
|
pub fn request_and_restore_media(
|
||||||
|
worker: &WorkerTask,
|
||||||
|
media_id: &MediaId,
|
||||||
|
drive_config: &SectionConfigData,
|
||||||
|
drive_name: &str,
|
||||||
|
datastore: &DataStore,
|
||||||
|
authid: &Authid,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let media_set_uuid = match media_id.media_set_label {
|
||||||
|
None => bail!("restore_media: no media set - internal error"),
|
||||||
|
Some(ref set) => &set.uuid,
|
||||||
|
};
|
||||||
|
|
||||||
|
let (mut drive, info) = request_and_load_media(worker, &drive_config, &drive_name, &media_id.label)?;
|
||||||
|
|
||||||
|
match info.media_set_label {
|
||||||
|
None => {
|
||||||
|
bail!("missing media set label on media {} ({})",
|
||||||
|
media_id.label.label_text, media_id.label.uuid);
|
||||||
|
}
|
||||||
|
Some(ref set) => {
|
||||||
|
if &set.uuid != media_set_uuid {
|
||||||
|
bail!("wrong media set label on media {} ({} != {})",
|
||||||
|
media_id.label.label_text, media_id.label.uuid,
|
||||||
|
media_set_uuid);
|
||||||
|
}
|
||||||
|
let encrypt_fingerprint = set.encryption_key_fingerprint.clone()
|
||||||
|
.map(|fp| (fp, set.uuid.clone()));
|
||||||
|
|
||||||
|
drive.set_encryption(encrypt_fingerprint)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
restore_media(worker, &mut drive, &info, Some((datastore, authid)), false)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Restore complete media content and catalog
|
||||||
|
///
|
||||||
|
/// Only create the catalog if target is None.
|
||||||
|
pub fn restore_media(
|
||||||
|
worker: &WorkerTask,
|
||||||
|
drive: &mut Box<dyn TapeDriver>,
|
||||||
|
media_id: &MediaId,
|
||||||
|
target: Option<(&DataStore, &Authid)>,
|
||||||
|
verbose: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
||||||
|
let mut catalog = MediaCatalog::create_temporary_database(status_path, media_id, false)?;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let current_file_number = drive.current_file_number()?;
|
||||||
|
let reader = match drive.read_next_file()? {
|
||||||
|
None => {
|
||||||
|
worker.log(format!("detected EOT after {} files", current_file_number));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Some(reader) => reader,
|
||||||
|
};
|
||||||
|
|
||||||
|
restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn restore_archive<'a>(
|
||||||
|
worker: &WorkerTask,
|
||||||
|
mut reader: Box<dyn 'a + TapeRead>,
|
||||||
|
current_file_number: u64,
|
||||||
|
target: Option<(&DataStore, &Authid)>,
|
||||||
|
catalog: &mut MediaCatalog,
|
||||||
|
verbose: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let header: MediaContentHeader = unsafe { reader.read_le_value()? };
|
||||||
|
if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
|
||||||
|
bail!("missing MediaContentHeader");
|
||||||
|
}
|
||||||
|
|
||||||
|
//println!("Found MediaContentHeader: {:?}", header);
|
||||||
|
|
||||||
|
match header.content_magic {
|
||||||
|
PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0 => {
|
||||||
|
bail!("unexpected content magic (label)");
|
||||||
|
}
|
||||||
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
|
||||||
|
let snapshot = reader.read_exact_allocated(header.size as usize)?;
|
||||||
|
let snapshot = std::str::from_utf8(&snapshot)
|
||||||
|
.map_err(|_| format_err!("found snapshot archive with non-utf8 characters in name"))?;
|
||||||
|
worker.log(format!("Found snapshot archive: {} {}", current_file_number, snapshot));
|
||||||
|
|
||||||
|
let backup_dir: BackupDir = snapshot.parse()?;
|
||||||
|
|
||||||
|
if let Some((datastore, authid)) = target.as_ref() {
|
||||||
|
|
||||||
|
let (owner, _group_lock) = datastore.create_locked_backup_group(backup_dir.group(), authid)?;
|
||||||
|
if *authid != &owner { // only the owner is allowed to create additional snapshots
|
||||||
|
bail!("restore '{}' failed - owner check failed ({} != {})", snapshot, authid, owner);
|
||||||
|
}
|
||||||
|
|
||||||
|
let (rel_path, is_new, _snap_lock) = datastore.create_locked_backup_dir(&backup_dir)?;
|
||||||
|
let mut path = datastore.base_path();
|
||||||
|
path.push(rel_path);
|
||||||
|
|
||||||
|
if is_new {
|
||||||
|
worker.log(format!("restore snapshot {}", backup_dir));
|
||||||
|
|
||||||
|
match restore_snapshot_archive(reader, &path) {
|
||||||
|
Err(err) => {
|
||||||
|
std::fs::remove_dir_all(&path)?;
|
||||||
|
bail!("restore snapshot {} failed - {}", backup_dir, err);
|
||||||
|
}
|
||||||
|
Ok(false) => {
|
||||||
|
std::fs::remove_dir_all(&path)?;
|
||||||
|
worker.log(format!("skip incomplete snapshot {}", backup_dir));
|
||||||
|
}
|
||||||
|
Ok(true) => {
|
||||||
|
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
|
||||||
|
catalog.commit_if_large()?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
reader.skip_to_end()?; // read all data
|
||||||
|
if let Ok(false) = reader.is_incomplete() {
|
||||||
|
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
|
||||||
|
catalog.commit_if_large()?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
|
||||||
|
|
||||||
|
worker.log(format!("Found chunk archive: {}", current_file_number));
|
||||||
|
let datastore = target.as_ref().map(|t| t.0);
|
||||||
|
|
||||||
|
if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
|
||||||
|
catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number)?;
|
||||||
|
for digest in chunks.iter() {
|
||||||
|
catalog.register_chunk(&digest)?;
|
||||||
|
}
|
||||||
|
worker.log(format!("register {} chunks", chunks.len()));
|
||||||
|
catalog.end_chunk_archive()?;
|
||||||
|
catalog.commit_if_large()?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => bail!("unknown content magic {:?}", header.content_magic),
|
||||||
|
}
|
||||||
|
|
||||||
|
catalog.commit()?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn restore_chunk_archive<'a>(
|
||||||
|
worker: &WorkerTask,
|
||||||
|
reader: Box<dyn 'a + TapeRead>,
|
||||||
|
datastore: Option<&DataStore>,
|
||||||
|
verbose: bool,
|
||||||
|
) -> Result<Option<Vec<[u8;32]>>, Error> {
|
||||||
|
|
||||||
|
let mut chunks = Vec::new();
|
||||||
|
|
||||||
|
let mut decoder = ChunkArchiveDecoder::new(reader);
|
||||||
|
|
||||||
|
let result: Result<_, Error> = proxmox::try_block!({
|
||||||
|
while let Some((digest, blob)) = decoder.next_chunk()? {
|
||||||
|
if let Some(datastore) = datastore {
|
||||||
|
let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
|
||||||
|
if !chunk_exists {
|
||||||
|
blob.verify_crc()?;
|
||||||
|
|
||||||
|
if blob.crypt_mode()? == CryptMode::None {
|
||||||
|
blob.decode(None, Some(&digest))?; // verify digest
|
||||||
|
}
|
||||||
|
if verbose {
|
||||||
|
worker.log(format!("Insert chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
||||||
|
}
|
||||||
|
datastore.insert_chunk(&blob, &digest)?;
|
||||||
|
} else if verbose {
|
||||||
|
worker.log(format!("Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
||||||
|
}
|
||||||
|
} else if verbose {
|
||||||
|
worker.log(format!("Found chunk: {}", proxmox::tools::digest_to_hex(&digest)));
|
||||||
|
}
|
||||||
|
chunks.push(digest);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
});
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(()) => Ok(Some(chunks)),
|
||||||
|
Err(err) => {
|
||||||
|
let reader = decoder.reader();
|
||||||
|
|
||||||
|
// check if this stream is marked incomplete
|
||||||
|
if let Ok(true) = reader.is_incomplete() {
|
||||||
|
return Ok(Some(chunks));
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if this is an aborted stream without end marker
|
||||||
|
if let Ok(false) = reader.has_end_marker() {
|
||||||
|
worker.log("missing stream end marker".to_string());
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
// else the archive is corrupt
|
||||||
|
Err(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn restore_snapshot_archive<'a>(
|
||||||
|
reader: Box<dyn 'a + TapeRead>,
|
||||||
|
snapshot_path: &Path,
|
||||||
|
) -> Result<bool, Error> {
|
||||||
|
|
||||||
|
let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
|
||||||
|
match try_restore_snapshot_archive(&mut decoder, snapshot_path) {
|
||||||
|
Ok(()) => Ok(true),
|
||||||
|
Err(err) => {
|
||||||
|
let reader = decoder.input();
|
||||||
|
|
||||||
|
// check if this stream is marked incomplete
|
||||||
|
if let Ok(true) = reader.is_incomplete() {
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if this is an aborted stream without end marker
|
||||||
|
if let Ok(false) = reader.has_end_marker() {
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
// else the archive is corrupt
|
||||||
|
Err(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
|
||||||
|
decoder: &mut pxar::decoder::sync::Decoder<R>,
|
||||||
|
snapshot_path: &Path,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _root = match decoder.next() {
|
||||||
|
None => bail!("missing root entry"),
|
||||||
|
Some(root) => {
|
||||||
|
let root = root?;
|
||||||
|
match root.kind() {
|
||||||
|
pxar::EntryKind::Directory => { /* Ok */ }
|
||||||
|
_ => bail!("wrong root entry type"),
|
||||||
|
}
|
||||||
|
root
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let root_path = Path::new("/");
|
||||||
|
let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME);
|
||||||
|
|
||||||
|
let mut manifest = None;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let entry = match decoder.next() {
|
||||||
|
None => break,
|
||||||
|
Some(entry) => entry?,
|
||||||
|
};
|
||||||
|
let entry_path = entry.path();
|
||||||
|
|
||||||
|
match entry.kind() {
|
||||||
|
pxar::EntryKind::File { .. } => { /* Ok */ }
|
||||||
|
_ => bail!("wrong entry type for {:?}", entry_path),
|
||||||
|
}
|
||||||
|
match entry_path.parent() {
|
||||||
|
None => bail!("wrong parent for {:?}", entry_path),
|
||||||
|
Some(p) => {
|
||||||
|
if p != root_path {
|
||||||
|
bail!("wrong parent for {:?}", entry_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let filename = entry.file_name();
|
||||||
|
let mut contents = match decoder.contents() {
|
||||||
|
None => bail!("missing file content"),
|
||||||
|
Some(contents) => contents,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut archive_path = snapshot_path.to_owned();
|
||||||
|
archive_path.push(&filename);
|
||||||
|
|
||||||
|
let mut tmp_path = archive_path.clone();
|
||||||
|
tmp_path.set_extension("tmp");
|
||||||
|
|
||||||
|
if filename == manifest_file_name {
|
||||||
|
|
||||||
|
let blob = DataBlob::load_from_reader(&mut contents)?;
|
||||||
|
let options = CreateOptions::new();
|
||||||
|
replace_file(&tmp_path, blob.raw_data(), options)?;
|
||||||
|
|
||||||
|
manifest = Some(BackupManifest::try_from(blob)?);
|
||||||
|
} else {
|
||||||
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.create(true)
|
||||||
|
.read(true)
|
||||||
|
.open(&tmp_path)
|
||||||
|
.map_err(|err| format_err!("restore {:?} failed - {}", tmp_path, err))?;
|
||||||
|
|
||||||
|
std::io::copy(&mut contents, &mut tmpfile)?;
|
||||||
|
|
||||||
|
if let Err(err) = std::fs::rename(&tmp_path, &archive_path) {
|
||||||
|
bail!("Atomic rename file {:?} failed - {}", archive_path, err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let manifest = match manifest {
|
||||||
|
None => bail!("missing manifest"),
|
||||||
|
Some(manifest) => manifest,
|
||||||
|
};
|
||||||
|
|
||||||
|
for item in manifest.files() {
|
||||||
|
let mut archive_path = snapshot_path.to_owned();
|
||||||
|
archive_path.push(&item.filename);
|
||||||
|
|
||||||
|
match archive_type(&item.filename)? {
|
||||||
|
ArchiveType::DynamicIndex => {
|
||||||
|
let index = DynamicIndexReader::open(&archive_path)?;
|
||||||
|
let (csum, size) = index.compute_csum();
|
||||||
|
manifest.verify_file(&item.filename, &csum, size)?;
|
||||||
|
}
|
||||||
|
ArchiveType::FixedIndex => {
|
||||||
|
let index = FixedIndexReader::open(&archive_path)?;
|
||||||
|
let (csum, size) = index.compute_csum();
|
||||||
|
manifest.verify_file(&item.filename, &csum, size)?;
|
||||||
|
}
|
||||||
|
ArchiveType::Blob => {
|
||||||
|
let mut tmpfile = std::fs::File::open(&archive_path)?;
|
||||||
|
let (csum, size) = compute_file_csum(&mut tmpfile)?;
|
||||||
|
manifest.verify_file(&item.filename, &csum, size)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// commit manifest
|
||||||
|
let mut manifest_path = snapshot_path.to_owned();
|
||||||
|
manifest_path.push(MANIFEST_BLOB_NAME);
|
||||||
|
let mut tmp_manifest_path = manifest_path.clone();
|
||||||
|
tmp_manifest_path.set_extension("tmp");
|
||||||
|
|
||||||
|
if let Err(err) = std::fs::rename(&tmp_manifest_path, &manifest_path) {
|
||||||
|
bail!("Atomic rename manifest {:?} failed - {}", manifest_path, err);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -1,3 +1,5 @@
|
|||||||
|
//! API Type Definitions
|
||||||
|
|
||||||
use anyhow::bail;
|
use anyhow::bail;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
@ -5,8 +7,15 @@ use proxmox::api::{api, schema::*};
|
|||||||
use proxmox::const_regex;
|
use proxmox::const_regex;
|
||||||
use proxmox::{IPRE, IPRE_BRACKET, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
use proxmox::{IPRE, IPRE_BRACKET, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
|
||||||
|
|
||||||
use crate::backup::{CryptMode, Fingerprint, BACKUP_ID_REGEX};
|
use crate::{
|
||||||
use crate::server::UPID;
|
backup::{
|
||||||
|
CryptMode,
|
||||||
|
Fingerprint,
|
||||||
|
BACKUP_ID_REGEX,
|
||||||
|
},
|
||||||
|
server::UPID,
|
||||||
|
config::acl::Role,
|
||||||
|
};
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
mod macros;
|
mod macros;
|
||||||
@ -20,6 +29,9 @@ pub use userid::Userid;
|
|||||||
pub use userid::Authid;
|
pub use userid::Authid;
|
||||||
pub use userid::{PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA, PROXMOX_GROUP_ID_SCHEMA};
|
pub use userid::{PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA, PROXMOX_GROUP_ID_SCHEMA};
|
||||||
|
|
||||||
|
mod tape;
|
||||||
|
pub use tape::*;
|
||||||
|
|
||||||
// File names: may not contain slashes, may not start with "."
|
// File names: may not contain slashes, may not start with "."
|
||||||
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
|
||||||
if name.starts_with('.') {
|
if name.starts_with('.') {
|
||||||
@ -74,7 +86,7 @@ const_regex!{
|
|||||||
|
|
||||||
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE_BRACKET!() ,"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE_BRACKET!() ,"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
|
||||||
|
|
||||||
pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
||||||
|
|
||||||
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
||||||
|
|
||||||
@ -83,6 +95,8 @@ const_regex!{
|
|||||||
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
|
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
|
||||||
|
|
||||||
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
||||||
|
|
||||||
|
pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$";
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
|
||||||
@ -100,8 +114,8 @@ pub const IP_FORMAT: ApiStringFormat =
|
|||||||
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat =
|
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat =
|
||||||
ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
|
ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
|
||||||
|
|
||||||
pub const CERT_FINGERPRINT_SHA256_FORMAT: ApiStringFormat =
|
pub const FINGERPRINT_SHA256_FORMAT: ApiStringFormat =
|
||||||
ApiStringFormat::Pattern(&CERT_FINGERPRINT_SHA256_REGEX);
|
ApiStringFormat::Pattern(&FINGERPRINT_SHA256_REGEX);
|
||||||
|
|
||||||
pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat =
|
pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat =
|
||||||
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
||||||
@ -109,6 +123,9 @@ pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat =
|
|||||||
pub const BACKUP_ID_FORMAT: ApiStringFormat =
|
pub const BACKUP_ID_FORMAT: ApiStringFormat =
|
||||||
ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
|
ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
|
||||||
|
|
||||||
|
pub const UUID_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&UUID_REGEX);
|
||||||
|
|
||||||
pub const SINGLE_LINE_COMMENT_FORMAT: ApiStringFormat =
|
pub const SINGLE_LINE_COMMENT_FORMAT: ApiStringFormat =
|
||||||
ApiStringFormat::Pattern(&SINGLE_LINE_COMMENT_REGEX);
|
ApiStringFormat::Pattern(&SINGLE_LINE_COMMENT_REGEX);
|
||||||
|
|
||||||
@ -160,17 +177,22 @@ pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
|||||||
pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema = StringSchema::new(
|
pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema = StringSchema::new(
|
||||||
"X509 certificate fingerprint (sha256)."
|
"X509 certificate fingerprint (sha256)."
|
||||||
)
|
)
|
||||||
.format(&CERT_FINGERPRINT_SHA256_FORMAT)
|
.format(&FINGERPRINT_SHA256_FORMAT)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(r#"\
|
pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema = StringSchema::new(
|
||||||
Prevent changes if current configuration file has different SHA256 digest.
|
"Tape encryption key fingerprint (sha256)."
|
||||||
This can be used to prevent concurrent modifications.
|
|
||||||
"#
|
|
||||||
)
|
)
|
||||||
.format(&PVE_CONFIG_DIGEST_FORMAT)
|
.format(&FINGERPRINT_SHA256_FORMAT)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Prevent changes if current configuration file has different \
|
||||||
|
SHA256 digest. This can be used to prevent concurrent \
|
||||||
|
modifications."
|
||||||
|
)
|
||||||
|
.format(&PVE_CONFIG_DIGEST_FORMAT) .schema();
|
||||||
|
|
||||||
|
|
||||||
pub const CHUNK_DIGEST_FORMAT: ApiStringFormat =
|
pub const CHUNK_DIGEST_FORMAT: ApiStringFormat =
|
||||||
ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
|
ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
|
||||||
@ -269,6 +291,36 @@ pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new(
|
|||||||
EnumEntry::new("group", "Group")]))
|
EnumEntry::new("group", "Group")]))
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
propagate: {
|
||||||
|
schema: ACL_PROPAGATE_SCHEMA,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
schema: ACL_PATH_SCHEMA,
|
||||||
|
},
|
||||||
|
ugid_type: {
|
||||||
|
schema: ACL_UGID_TYPE_SCHEMA,
|
||||||
|
},
|
||||||
|
ugid: {
|
||||||
|
type: String,
|
||||||
|
description: "User or Group ID.",
|
||||||
|
},
|
||||||
|
roleid: {
|
||||||
|
type: Role,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
/// ACL list entry.
|
||||||
|
pub struct AclListItem {
|
||||||
|
pub path: String,
|
||||||
|
pub ugid: String,
|
||||||
|
pub ugid_type: String,
|
||||||
|
pub propagate: bool,
|
||||||
|
pub roleid: String,
|
||||||
|
}
|
||||||
|
|
||||||
pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema =
|
pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema =
|
||||||
StringSchema::new("Backup archive name.")
|
StringSchema::new("Backup archive name.")
|
||||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
@ -302,6 +354,16 @@ pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
|
|||||||
.max_length(32)
|
.max_length(32)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
pub const MEDIA_SET_UUID_SCHEMA: Schema =
|
||||||
|
StringSchema::new("MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).")
|
||||||
|
.format(&UUID_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const MEDIA_UUID_SCHEMA: Schema =
|
||||||
|
StringSchema::new("Media Uuid.")
|
||||||
|
.format(&UUID_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||||
"Run sync job at specified schedule.")
|
"Run sync job at specified schedule.")
|
||||||
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
|
||||||
@ -1069,7 +1131,7 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
|
|||||||
];
|
];
|
||||||
|
|
||||||
for fingerprint in invalid_fingerprints.iter() {
|
for fingerprint in invalid_fingerprints.iter() {
|
||||||
if let Ok(_) = parse_simple_value(fingerprint, &schema) {
|
if parse_simple_value(fingerprint, &schema).is_ok() {
|
||||||
bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint);
|
bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1110,7 +1172,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
|
|||||||
];
|
];
|
||||||
|
|
||||||
for name in invalid_user_ids.iter() {
|
for name in invalid_user_ids.iter() {
|
||||||
if let Ok(_) = parse_simple_value(name, &Userid::API_SCHEMA) {
|
if parse_simple_value(name, &Userid::API_SCHEMA).is_ok() {
|
||||||
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
|
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1241,3 +1303,60 @@ pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
|
|||||||
"Datastore notification setting")
|
"Datastore notification setting")
|
||||||
.format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA))
|
.format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA))
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
|
||||||
|
pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
|
||||||
|
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||||
|
.min_length(1)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(default: "scrypt")]
|
||||||
|
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// Key derivation function for password protected encryption keys.
|
||||||
|
pub enum Kdf {
|
||||||
|
/// Do not encrypt the key.
|
||||||
|
None,
|
||||||
|
/// Encrypt they key with a password using SCrypt.
|
||||||
|
Scrypt,
|
||||||
|
/// Encrtypt the Key with a password using PBKDF2
|
||||||
|
PBKDF2,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Kdf {
|
||||||
|
#[inline]
|
||||||
|
fn default() -> Self {
|
||||||
|
Kdf::Scrypt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
kdf: {
|
||||||
|
type: Kdf,
|
||||||
|
},
|
||||||
|
fingerprint: {
|
||||||
|
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
/// Encryption Key Information
|
||||||
|
pub struct KeyInfo {
|
||||||
|
/// Path to key (if stored in a file)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub path: Option<String>,
|
||||||
|
pub kdf: Kdf,
|
||||||
|
/// Key creation time
|
||||||
|
pub created: i64,
|
||||||
|
/// Key modification time
|
||||||
|
pub modified: i64,
|
||||||
|
/// Key fingerprint
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub fingerprint: Option<String>,
|
||||||
|
/// Password hint
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub hint: Option<String>,
|
||||||
|
}
|
||||||
|
132
src/api2/types/tape/changer.rs
Normal file
132
src/api2/types/tape/changer.rs
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
//! Types for tape changer API
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::{
|
||||||
|
api,
|
||||||
|
schema::{
|
||||||
|
Schema,
|
||||||
|
ApiStringFormat,
|
||||||
|
ArraySchema,
|
||||||
|
IntegerSchema,
|
||||||
|
StringSchema,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::api2::types::{
|
||||||
|
PROXMOX_SAFE_ID_FORMAT,
|
||||||
|
OptionalDeviceIdentification,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const CHANGER_NAME_SCHEMA: Schema = StringSchema::new("Tape Changer Identifier.")
|
||||||
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(32)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const SCSI_CHANGER_PATH_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Path to Linux generic SCSI device (e.g. '/dev/sg4')")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const MEDIA_LABEL_SCHEMA: Schema = StringSchema::new("Media Label/Barcode.")
|
||||||
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
|
.min_length(2)
|
||||||
|
.max_length(32)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const SLOT_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||||
|
"Slot list.", &IntegerSchema::new("Slot number")
|
||||||
|
.minimum(1)
|
||||||
|
.schema())
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const EXPORT_SLOT_LIST_SCHEMA: Schema = StringSchema::new(r###"\
|
||||||
|
A list of slot numbers, comma separated. Those slots are reserved for
|
||||||
|
Import/Export, i.e. any media in those slots are considered to be
|
||||||
|
'offline'.
|
||||||
|
"###)
|
||||||
|
.format(&ApiStringFormat::PropertyString(&SLOT_ARRAY_SCHEMA))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
schema: SCSI_CHANGER_PATH_SCHEMA,
|
||||||
|
},
|
||||||
|
"export-slots": {
|
||||||
|
schema: EXPORT_SLOT_LIST_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// SCSI tape changer
|
||||||
|
pub struct ScsiTapeChanger {
|
||||||
|
pub name: String,
|
||||||
|
pub path: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub export_slots: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
config: {
|
||||||
|
type: ScsiTapeChanger,
|
||||||
|
},
|
||||||
|
info: {
|
||||||
|
type: OptionalDeviceIdentification,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Changer config with optional device identification attributes
|
||||||
|
pub struct ChangerListEntry {
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub config: ScsiTapeChanger,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub info: OptionalDeviceIdentification,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Mtx Entry Kind
|
||||||
|
pub enum MtxEntryKind {
|
||||||
|
/// Drive
|
||||||
|
Drive,
|
||||||
|
/// Slot
|
||||||
|
Slot,
|
||||||
|
/// Import/Export Slot
|
||||||
|
ImportExport,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"entry-kind": {
|
||||||
|
type: MtxEntryKind,
|
||||||
|
},
|
||||||
|
"label-text": {
|
||||||
|
schema: MEDIA_LABEL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Mtx Status Entry
|
||||||
|
pub struct MtxStatusEntry {
|
||||||
|
pub entry_kind: MtxEntryKind,
|
||||||
|
/// The ID of the slot or drive
|
||||||
|
pub entry_id: u64,
|
||||||
|
/// The media label (volume tag) if the slot/drive is full
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub label_text: Option<String>,
|
||||||
|
/// The slot the drive was loaded from
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub loaded_slot: Option<u64>,
|
||||||
|
}
|
55
src/api2/types/tape/device.rs
Normal file
55
src/api2/types/tape/device.rs
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Optional Device Identification Attributes
|
||||||
|
pub struct OptionalDeviceIdentification {
|
||||||
|
/// Vendor (autodetected)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub vendor: Option<String>,
|
||||||
|
/// Model (autodetected)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub model: Option<String>,
|
||||||
|
/// Serial number (autodetected)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub serial: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug,Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Kind of devive
|
||||||
|
pub enum DeviceKind {
|
||||||
|
/// Tape changer (Autoloader, Robot)
|
||||||
|
Changer,
|
||||||
|
/// Normal SCSI tape device
|
||||||
|
Tape,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
kind: {
|
||||||
|
type: DeviceKind,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Debug,Serialize,Deserialize)]
|
||||||
|
/// Tape device information
|
||||||
|
pub struct TapeDeviceInfo {
|
||||||
|
pub kind: DeviceKind,
|
||||||
|
/// Path to the linux device node
|
||||||
|
pub path: String,
|
||||||
|
/// Serial number (autodetected)
|
||||||
|
pub serial: String,
|
||||||
|
/// Vendor (autodetected)
|
||||||
|
pub vendor: String,
|
||||||
|
/// Model (autodetected)
|
||||||
|
pub model: String,
|
||||||
|
/// Device major number
|
||||||
|
pub major: u32,
|
||||||
|
/// Device minor number
|
||||||
|
pub minor: u32,
|
||||||
|
}
|
211
src/api2/types/tape/drive.rs
Normal file
211
src/api2/types/tape/drive.rs
Normal file
@ -0,0 +1,211 @@
|
|||||||
|
//! Types for tape drive API
|
||||||
|
use std::convert::TryFrom;
|
||||||
|
|
||||||
|
use anyhow::{bail, Error};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::{
|
||||||
|
api,
|
||||||
|
schema::{Schema, IntegerSchema, StringSchema},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::api2::types::{
|
||||||
|
PROXMOX_SAFE_ID_FORMAT,
|
||||||
|
CHANGER_NAME_SCHEMA,
|
||||||
|
OptionalDeviceIdentification,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.")
|
||||||
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(32)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const LINUX_DRIVE_PATH_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"The path to a LINUX non-rewinding SCSI tape device (i.e. '/dev/nst0')")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const CHANGER_DRIVENUM_SCHEMA: Schema = IntegerSchema::new(
|
||||||
|
"Associated changer drive number (requires option changer)")
|
||||||
|
.minimum(0)
|
||||||
|
.maximum(8)
|
||||||
|
.default(0)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
/// Simulate tape drives (only for test and debug)
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct VirtualTapeDrive {
|
||||||
|
pub name: String,
|
||||||
|
/// Path to directory
|
||||||
|
pub path: String,
|
||||||
|
/// Virtual tape size
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub max_size: Option<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
schema: LINUX_DRIVE_PATH_SCHEMA,
|
||||||
|
},
|
||||||
|
changer: {
|
||||||
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"changer-drivenum": {
|
||||||
|
schema: CHANGER_DRIVENUM_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Linux SCSI tape driver
|
||||||
|
pub struct LinuxTapeDrive {
|
||||||
|
pub name: String,
|
||||||
|
pub path: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub changer: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub changer_drivenum: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
config: {
|
||||||
|
type: LinuxTapeDrive,
|
||||||
|
},
|
||||||
|
info: {
|
||||||
|
type: OptionalDeviceIdentification,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Drive list entry
|
||||||
|
pub struct DriveListEntry {
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub config: LinuxTapeDrive,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub info: OptionalDeviceIdentification,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
/// Medium auxiliary memory attributes (MAM)
|
||||||
|
pub struct MamAttribute {
|
||||||
|
/// Attribute id
|
||||||
|
pub id: u16,
|
||||||
|
/// Attribute name
|
||||||
|
pub name: String,
|
||||||
|
/// Attribute value
|
||||||
|
pub value: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize,Deserialize,Copy,Clone,Debug)]
|
||||||
|
pub enum TapeDensity {
|
||||||
|
/// LTO1
|
||||||
|
LTO1,
|
||||||
|
/// LTO2
|
||||||
|
LTO2,
|
||||||
|
/// LTO3
|
||||||
|
LTO3,
|
||||||
|
/// LTO4
|
||||||
|
LTO4,
|
||||||
|
/// LTO5
|
||||||
|
LTO5,
|
||||||
|
/// LTO6
|
||||||
|
LTO6,
|
||||||
|
/// LTO7
|
||||||
|
LTO7,
|
||||||
|
/// LTO7M8
|
||||||
|
LTO7M8,
|
||||||
|
/// LTO8
|
||||||
|
LTO8,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<u8> for TapeDensity {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||||
|
let density = match value {
|
||||||
|
0x40 => TapeDensity::LTO1,
|
||||||
|
0x42 => TapeDensity::LTO2,
|
||||||
|
0x44 => TapeDensity::LTO3,
|
||||||
|
0x46 => TapeDensity::LTO4,
|
||||||
|
0x58 => TapeDensity::LTO5,
|
||||||
|
0x5a => TapeDensity::LTO6,
|
||||||
|
0x5c => TapeDensity::LTO7,
|
||||||
|
0x5d => TapeDensity::LTO7M8,
|
||||||
|
0x5e => TapeDensity::LTO8,
|
||||||
|
_ => bail!("unknown tape density code 0x{:02x}", value),
|
||||||
|
};
|
||||||
|
Ok(density)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
density: {
|
||||||
|
type: TapeDensity,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Drive/Media status for Linux SCSI drives.
|
||||||
|
///
|
||||||
|
/// Media related data is optional - only set if there is a medium
|
||||||
|
/// loaded.
|
||||||
|
pub struct LinuxDriveAndMediaStatus {
|
||||||
|
/// Block size (0 is variable size)
|
||||||
|
pub blocksize: u32,
|
||||||
|
/// Tape density
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub density: Option<TapeDensity>,
|
||||||
|
/// Status flags
|
||||||
|
pub status: String,
|
||||||
|
/// Linux Driver Options
|
||||||
|
pub options: String,
|
||||||
|
/// Tape Alert Flags
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub alert_flags: Option<String>,
|
||||||
|
/// Current file number
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub file_number: Option<u32>,
|
||||||
|
/// Current block number
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub block_number: Option<u32>,
|
||||||
|
/// Medium Manufacture Date (epoch)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub manufactured: Option<i64>,
|
||||||
|
/// Total Bytes Read in Medium Life
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub bytes_read: Option<u64>,
|
||||||
|
/// Total Bytes Written in Medium Life
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub bytes_written: Option<u64>,
|
||||||
|
/// Number of mounts for the current volume (i.e., Thread Count)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub volume_mounts: Option<u64>,
|
||||||
|
/// Count of the total number of times the medium has passed over
|
||||||
|
/// the head.
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub medium_passes: Option<u64>,
|
||||||
|
/// Estimated tape wearout factor (assuming max. 16000 end-to-end passes)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub medium_wearout: Option<f64>,
|
||||||
|
}
|
151
src/api2/types/tape/media.rs
Normal file
151
src/api2/types/tape/media.rs
Normal file
@ -0,0 +1,151 @@
|
|||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::{
|
||||||
|
api::api,
|
||||||
|
tools::Uuid,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::api2::types::{
|
||||||
|
MEDIA_UUID_SCHEMA,
|
||||||
|
MEDIA_SET_UUID_SCHEMA,
|
||||||
|
MediaStatus,
|
||||||
|
MediaLocation,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
location: {
|
||||||
|
type: MediaLocation,
|
||||||
|
},
|
||||||
|
status: {
|
||||||
|
type: MediaStatus,
|
||||||
|
},
|
||||||
|
uuid: {
|
||||||
|
schema: MEDIA_UUID_SCHEMA,
|
||||||
|
},
|
||||||
|
"media-set-uuid": {
|
||||||
|
schema: MEDIA_SET_UUID_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Media list entry
|
||||||
|
pub struct MediaListEntry {
|
||||||
|
/// Media label text (or Barcode)
|
||||||
|
pub label_text: String,
|
||||||
|
pub uuid: Uuid,
|
||||||
|
/// Creation time stamp
|
||||||
|
pub ctime: i64,
|
||||||
|
pub location: MediaLocation,
|
||||||
|
pub status: MediaStatus,
|
||||||
|
/// Expired flag
|
||||||
|
pub expired: bool,
|
||||||
|
/// Catalog status OK
|
||||||
|
pub catalog: bool,
|
||||||
|
/// Media set name
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub media_set_name: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub media_set_uuid: Option<Uuid>,
|
||||||
|
/// Media set seq_nr
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub seq_nr: Option<u64>,
|
||||||
|
/// MediaSet creation time stamp
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub media_set_ctime: Option<i64>,
|
||||||
|
/// Media Pool
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub pool: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
uuid: {
|
||||||
|
schema: MEDIA_UUID_SCHEMA,
|
||||||
|
},
|
||||||
|
"media-set-uuid": {
|
||||||
|
schema: MEDIA_SET_UUID_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Media label info
|
||||||
|
pub struct MediaIdFlat {
|
||||||
|
/// Unique ID
|
||||||
|
pub uuid: Uuid,
|
||||||
|
/// Media label text (or Barcode)
|
||||||
|
pub label_text: String,
|
||||||
|
/// Creation time stamp
|
||||||
|
pub ctime: i64,
|
||||||
|
// All MediaSet properties are optional here
|
||||||
|
/// MediaSet Pool
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub pool: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub media_set_uuid: Option<Uuid>,
|
||||||
|
/// MediaSet media sequence number
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub seq_nr: Option<u64>,
|
||||||
|
/// MediaSet Creation time stamp
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub media_set_ctime: Option<i64>,
|
||||||
|
/// Encryption key fingerprint
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub encryption_key_fingerprint: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
uuid: {
|
||||||
|
schema: MEDIA_UUID_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Label with optional Uuid
|
||||||
|
pub struct LabelUuidMap {
|
||||||
|
/// Changer label text (or Barcode)
|
||||||
|
pub label_text: String,
|
||||||
|
/// Associated Uuid (if any)
|
||||||
|
pub uuid: Option<Uuid>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
uuid: {
|
||||||
|
schema: MEDIA_UUID_SCHEMA,
|
||||||
|
},
|
||||||
|
"media-set-uuid": {
|
||||||
|
schema: MEDIA_SET_UUID_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Media content list entry
|
||||||
|
pub struct MediaContentEntry {
|
||||||
|
/// Media label text (or Barcode)
|
||||||
|
pub label_text: String,
|
||||||
|
/// Media Uuid
|
||||||
|
pub uuid: Uuid,
|
||||||
|
/// Media set name
|
||||||
|
pub media_set_name: String,
|
||||||
|
/// Media set uuid
|
||||||
|
pub media_set_uuid: Uuid,
|
||||||
|
/// MediaSet Creation time stamp
|
||||||
|
pub media_set_ctime: i64,
|
||||||
|
/// Media set seq_nr
|
||||||
|
pub seq_nr: u64,
|
||||||
|
/// Media Pool
|
||||||
|
pub pool: String,
|
||||||
|
/// Backup snapshot
|
||||||
|
pub snapshot: String,
|
||||||
|
/// Snapshot creation time (epoch)
|
||||||
|
pub backup_time: i64,
|
||||||
|
}
|
91
src/api2/types/tape/media_location.rs
Normal file
91
src/api2/types/tape/media_location.rs
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
|
||||||
|
use proxmox::api::{
|
||||||
|
schema::{
|
||||||
|
Schema,
|
||||||
|
StringSchema,
|
||||||
|
ApiStringFormat,
|
||||||
|
parse_simple_value,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::api2::types::{
|
||||||
|
PROXMOX_SAFE_ID_FORMAT,
|
||||||
|
CHANGER_NAME_SCHEMA,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const VAULT_NAME_SCHEMA: Schema = StringSchema::new("Vault name.")
|
||||||
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(32)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
|
/// Media location
|
||||||
|
pub enum MediaLocation {
|
||||||
|
/// Ready for use (inside tape library)
|
||||||
|
Online(String),
|
||||||
|
/// Local available, but need to be mounted (insert into tape
|
||||||
|
/// drive)
|
||||||
|
Offline,
|
||||||
|
/// Media is inside a Vault
|
||||||
|
Vault(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
proxmox::forward_deserialize_to_from_str!(MediaLocation);
|
||||||
|
proxmox::forward_serialize_to_display!(MediaLocation);
|
||||||
|
|
||||||
|
impl MediaLocation {
|
||||||
|
pub const API_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Media location (e.g. 'offline', 'online-<changer_name>', 'vault-<vault_name>')")
|
||||||
|
.format(&ApiStringFormat::VerifyFn(|text| {
|
||||||
|
let location: MediaLocation = text.parse()?;
|
||||||
|
match location {
|
||||||
|
MediaLocation::Online(ref changer) => {
|
||||||
|
parse_simple_value(changer, &CHANGER_NAME_SCHEMA)?;
|
||||||
|
}
|
||||||
|
MediaLocation::Vault(ref vault) => {
|
||||||
|
parse_simple_value(vault, &VAULT_NAME_SCHEMA)?;
|
||||||
|
}
|
||||||
|
MediaLocation::Offline => { /* OK */}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}))
|
||||||
|
.schema();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
impl std::fmt::Display for MediaLocation {
|
||||||
|
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
MediaLocation::Offline => {
|
||||||
|
write!(f, "offline")
|
||||||
|
}
|
||||||
|
MediaLocation::Online(changer) => {
|
||||||
|
write!(f, "online-{}", changer)
|
||||||
|
}
|
||||||
|
MediaLocation::Vault(vault) => {
|
||||||
|
write!(f, "vault-{}", vault)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::str::FromStr for MediaLocation {
|
||||||
|
type Err = Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
if s == "offline" {
|
||||||
|
return Ok(MediaLocation::Offline);
|
||||||
|
}
|
||||||
|
if let Some(changer) = s.strip_prefix("online-") {
|
||||||
|
return Ok(MediaLocation::Online(changer.to_string()));
|
||||||
|
}
|
||||||
|
if let Some(vault) = s.strip_prefix("vault-") {
|
||||||
|
return Ok(MediaLocation::Online(vault.to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
bail!("MediaLocation parse error");
|
||||||
|
}
|
||||||
|
}
|
158
src/api2/types/tape/media_pool.rs
Normal file
158
src/api2/types/tape/media_pool.rs
Normal file
@ -0,0 +1,158 @@
|
|||||||
|
//! Types for tape media pool API
|
||||||
|
//!
|
||||||
|
//! Note: Both MediaSetPolicy and RetentionPolicy are complex enums,
|
||||||
|
//! so we cannot use them directly for the API. Instead, we represent
|
||||||
|
//! them as String.
|
||||||
|
|
||||||
|
use anyhow::Error;
|
||||||
|
use std::str::FromStr;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::{
|
||||||
|
api,
|
||||||
|
schema::{Schema, StringSchema, ApiStringFormat},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
tools::systemd::time::{
|
||||||
|
CalendarEvent,
|
||||||
|
TimeSpan,
|
||||||
|
parse_time_span,
|
||||||
|
parse_calendar_event,
|
||||||
|
},
|
||||||
|
api2::types::{
|
||||||
|
PROXMOX_SAFE_ID_FORMAT,
|
||||||
|
SINGLE_LINE_COMMENT_FORMAT,
|
||||||
|
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const MEDIA_POOL_NAME_SCHEMA: Schema = StringSchema::new("Media pool name.")
|
||||||
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
|
.min_length(2)
|
||||||
|
.max_length(32)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const MEDIA_SET_NAMING_TEMPLATE_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Media set naming template.")
|
||||||
|
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||||
|
.min_length(2)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const MEDIA_SET_ALLOCATION_POLICY_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::VerifyFn(|s| { MediaSetPolicy::from_str(s)?; Ok(()) });
|
||||||
|
|
||||||
|
pub const MEDIA_SET_ALLOCATION_POLICY_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Media set allocation policy ('continue', 'always', or a calendar event).")
|
||||||
|
.format(&MEDIA_SET_ALLOCATION_POLICY_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
/// Media set allocation policy
|
||||||
|
pub enum MediaSetPolicy {
|
||||||
|
/// Try to use the current media set
|
||||||
|
ContinueCurrent,
|
||||||
|
/// Each backup job creates a new media set
|
||||||
|
AlwaysCreate,
|
||||||
|
/// Create a new set when the specified CalendarEvent triggers
|
||||||
|
CreateAt(CalendarEvent),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::str::FromStr for MediaSetPolicy {
|
||||||
|
type Err = Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
if s == "continue" {
|
||||||
|
return Ok(MediaSetPolicy::ContinueCurrent);
|
||||||
|
}
|
||||||
|
if s == "always" {
|
||||||
|
return Ok(MediaSetPolicy::AlwaysCreate);
|
||||||
|
}
|
||||||
|
|
||||||
|
let event = parse_calendar_event(s)?;
|
||||||
|
|
||||||
|
Ok(MediaSetPolicy::CreateAt(event))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const MEDIA_RETENTION_POLICY_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::VerifyFn(|s| { RetentionPolicy::from_str(s)?; Ok(()) });
|
||||||
|
|
||||||
|
pub const MEDIA_RETENTION_POLICY_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Media retention policy ('overwrite', 'keep', or time span).")
|
||||||
|
.format(&MEDIA_RETENTION_POLICY_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
/// Media retention Policy
|
||||||
|
pub enum RetentionPolicy {
|
||||||
|
/// Always overwrite media
|
||||||
|
OverwriteAlways,
|
||||||
|
/// Protect data for the timespan specified
|
||||||
|
ProtectFor(TimeSpan),
|
||||||
|
/// Never overwrite data
|
||||||
|
KeepForever,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::str::FromStr for RetentionPolicy {
|
||||||
|
type Err = Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
if s == "overwrite" {
|
||||||
|
return Ok(RetentionPolicy::OverwriteAlways);
|
||||||
|
}
|
||||||
|
if s == "keep" {
|
||||||
|
return Ok(RetentionPolicy::KeepForever);
|
||||||
|
}
|
||||||
|
|
||||||
|
let time_span = parse_time_span(s)?;
|
||||||
|
|
||||||
|
Ok(RetentionPolicy::ProtectFor(time_span))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
allocation: {
|
||||||
|
schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
retention: {
|
||||||
|
schema: MEDIA_RETENTION_POLICY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
template: {
|
||||||
|
schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
encrypt: {
|
||||||
|
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
/// Media pool configuration
|
||||||
|
pub struct MediaPoolConfig {
|
||||||
|
/// The pool name
|
||||||
|
pub name: String,
|
||||||
|
/// Media Set allocation policy
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub allocation: Option<String>,
|
||||||
|
/// Media retention policy
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub retention: Option<String>,
|
||||||
|
/// Media set naming template (default "%c")
|
||||||
|
///
|
||||||
|
/// The template is UTF8 text, and can include strftime time
|
||||||
|
/// format specifications.
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub template: Option<String>,
|
||||||
|
/// Encryption key fingerprint
|
||||||
|
///
|
||||||
|
/// If set, encrypt all data using the specified key.
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub encrypt: Option<String>,
|
||||||
|
}
|
21
src/api2/types/tape/media_status.rs
Normal file
21
src/api2/types/tape/media_status.rs
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
/// Media status
|
||||||
|
#[derive(Debug, PartialEq, Copy, Clone, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// Media Status
|
||||||
|
pub enum MediaStatus {
|
||||||
|
/// Media is ready to be written
|
||||||
|
Writable,
|
||||||
|
/// Media is full (contains data)
|
||||||
|
Full,
|
||||||
|
/// Media is marked as unknown, needs rescan
|
||||||
|
Unknown,
|
||||||
|
/// Media is marked as damaged
|
||||||
|
Damaged,
|
||||||
|
/// Media is marked as retired
|
||||||
|
Retired,
|
||||||
|
}
|
22
src/api2/types/tape/mod.rs
Normal file
22
src/api2/types/tape/mod.rs
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
//! Types for tape backup API
|
||||||
|
|
||||||
|
mod device;
|
||||||
|
pub use device::*;
|
||||||
|
|
||||||
|
mod changer;
|
||||||
|
pub use changer::*;
|
||||||
|
|
||||||
|
mod drive;
|
||||||
|
pub use drive::*;
|
||||||
|
|
||||||
|
mod media_pool;
|
||||||
|
pub use media_pool::*;
|
||||||
|
|
||||||
|
mod media_status;
|
||||||
|
pub use media_status::*;
|
||||||
|
|
||||||
|
mod media_location;
|
||||||
|
pub use media_location::*;
|
||||||
|
|
||||||
|
mod media;
|
||||||
|
pub use media::*;
|
@ -16,10 +16,10 @@
|
|||||||
//! * [`Authid`]: an owned Authentication ID (a `Userid` with an optional `Tokenname`).
|
//! * [`Authid`]: an owned Authentication ID (a `Userid` with an optional `Tokenname`).
|
||||||
//! Note that `Userid` and `Authid` do not have a separate borrowed type.
|
//! Note that `Userid` and `Authid` do not have a separate borrowed type.
|
||||||
//!
|
//!
|
||||||
//! Note that `Username`s and `Tokenname`s are not unique, therefore they do not implement `Eq` and cannot be
|
//! Note that `Username`s are not unique, therefore they do not implement `Eq` and cannot be
|
||||||
//! compared directly. If a direct comparison is really required, they can be compared as strings
|
//! compared directly. If a direct comparison is really required, they can be compared as strings
|
||||||
//! via the `as_str()` method. [`Realm`]s, [`Userid`]s and [`Authid`]s on the other
|
//! via the `as_str()` method. [`Realm`]s, [`Userid`]s and [`Authid`]s on the other hand can be
|
||||||
//! hand can be compared with each other, as in those cases the comparison has meaning.
|
//! compared with each other, as in those cases the comparison has meaning.
|
||||||
|
|
||||||
use std::borrow::Borrow;
|
use std::borrow::Borrow;
|
||||||
use std::convert::TryFrom;
|
use std::convert::TryFrom;
|
||||||
@ -277,7 +277,7 @@ impl PartialEq<&str> for RealmRef {
|
|||||||
|
|
||||||
impl PartialEq<RealmRef> for Realm {
|
impl PartialEq<RealmRef> for Realm {
|
||||||
fn eq(&self, rhs: &RealmRef) -> bool {
|
fn eq(&self, rhs: &RealmRef) -> bool {
|
||||||
self.0 == &rhs.0
|
self.0 == rhs.0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -299,16 +299,8 @@ impl PartialEq<Realm> for &RealmRef {
|
|||||||
)]
|
)]
|
||||||
/// The token ID part of an API token authentication id.
|
/// The token ID part of an API token authentication id.
|
||||||
///
|
///
|
||||||
/// This alone does NOT uniquely identify the API token and therefore does not implement `Eq`. In
|
/// This alone does NOT uniquely identify the API token - use a full `Authid` for such use cases.
|
||||||
/// order to compare token IDs directly, they need to be explicitly compared as strings by calling
|
#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
|
||||||
/// `.as_str()`.
|
|
||||||
///
|
|
||||||
/// ```compile_fail
|
|
||||||
/// fn test(a: Tokenname, b: Tokenname) -> bool {
|
|
||||||
/// a == b // illegal and does not compile
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
#[derive(Clone, Debug, Hash, Deserialize, Serialize)]
|
|
||||||
pub struct Tokenname(String);
|
pub struct Tokenname(String);
|
||||||
|
|
||||||
/// A reference to a token name part of an authentication id. This alone does NOT uniquely identify
|
/// A reference to a token name part of an authentication id. This alone does NOT uniquely identify
|
||||||
@ -336,24 +328,6 @@ pub struct TokennameRef(str);
|
|||||||
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
|
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
|
||||||
/// let _ = <&UsernameRef as PartialEq>::eq(&a, &b);
|
/// let _ = <&UsernameRef as PartialEq>::eq(&a, &b);
|
||||||
/// ```
|
/// ```
|
||||||
///
|
|
||||||
/// ```compile_fail
|
|
||||||
/// let a: Tokenname = unsafe { std::mem::zeroed() };
|
|
||||||
/// let b: Tokenname = unsafe { std::mem::zeroed() };
|
|
||||||
/// let _ = <Tokenname as PartialEq>::eq(&a, &b);
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// ```compile_fail
|
|
||||||
/// let a: &TokennameRef = unsafe { std::mem::zeroed() };
|
|
||||||
/// let b: &TokennameRef = unsafe { std::mem::zeroed() };
|
|
||||||
/// let _ = <&TokennameRef as PartialEq>::eq(a, b);
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// ```compile_fail
|
|
||||||
/// let a: &TokennameRef = unsafe { std::mem::zeroed() };
|
|
||||||
/// let b: &TokennameRef = unsafe { std::mem::zeroed() };
|
|
||||||
/// let _ = <&TokennameRef as PartialEq>::eq(&a, &b);
|
|
||||||
/// ```
|
|
||||||
struct _AssertNoEqImpl;
|
struct _AssertNoEqImpl;
|
||||||
|
|
||||||
impl TokennameRef {
|
impl TokennameRef {
|
||||||
@ -419,12 +393,10 @@ impl<'a> TryFrom<&'a str> for &'a TokennameRef {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A complete user id consisting of a user name and a realm
|
/// A complete user id consisting of a user name and a realm
|
||||||
#[derive(Clone, Debug, Hash)]
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
pub struct Userid {
|
pub struct Userid {
|
||||||
data: String,
|
data: String,
|
||||||
name_len: usize,
|
name_len: usize,
|
||||||
//name: Username,
|
|
||||||
//realm: Realm,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Userid {
|
impl Userid {
|
||||||
@ -460,14 +432,6 @@ lazy_static! {
|
|||||||
pub static ref ROOT_USERID: Userid = Userid::new("root@pam".to_string(), 4);
|
pub static ref ROOT_USERID: Userid = Userid::new("root@pam".to_string(), 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Eq for Userid {}
|
|
||||||
|
|
||||||
impl PartialEq for Userid {
|
|
||||||
fn eq(&self, rhs: &Self) -> bool {
|
|
||||||
self.data == rhs.data && self.name_len == rhs.name_len
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Authid> for Userid {
|
impl From<Authid> for Userid {
|
||||||
fn from(authid: Authid) -> Self {
|
fn from(authid: Authid) -> Self {
|
||||||
authid.user
|
authid.user
|
||||||
@ -558,7 +522,7 @@ impl PartialEq<String> for Userid {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A complete authentication id consisting of a user id and an optional token name.
|
/// A complete authentication id consisting of a user id and an optional token name.
|
||||||
#[derive(Clone, Debug, Hash)]
|
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
|
||||||
pub struct Authid {
|
pub struct Authid {
|
||||||
user: Userid,
|
user: Userid,
|
||||||
tokenname: Option<Tokenname>
|
tokenname: Option<Tokenname>
|
||||||
@ -600,18 +564,6 @@ lazy_static! {
|
|||||||
pub static ref ROOT_AUTHID: Authid = Authid::from(Userid::new("root@pam".to_string(), 4));
|
pub static ref ROOT_AUTHID: Authid = Authid::from(Userid::new("root@pam".to_string(), 4));
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Eq for Authid {}
|
|
||||||
|
|
||||||
impl PartialEq for Authid {
|
|
||||||
fn eq(&self, rhs: &Self) -> bool {
|
|
||||||
self.user == rhs.user && match (&self.tokenname, &rhs.tokenname) {
|
|
||||||
(Some(ours), Some(theirs)) => ours.as_str() == theirs.as_str(),
|
|
||||||
(None, None) => true,
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Userid> for Authid {
|
impl From<Userid> for Authid {
|
||||||
fn from(parts: Userid) -> Self {
|
fn from(parts: Userid) -> Self {
|
||||||
Self::new(parts, None)
|
Self::new(parts, None)
|
||||||
@ -648,7 +600,7 @@ impl std::str::FromStr for Authid {
|
|||||||
.iter()
|
.iter()
|
||||||
.rposition(|&b| b == b'!')
|
.rposition(|&b| b == b'!')
|
||||||
.map(|pos| if pos < name_len { id.len() } else { pos })
|
.map(|pos| if pos < name_len { id.len() } else { pos })
|
||||||
.unwrap_or(id.len());
|
.unwrap_or_else(|| id.len());
|
||||||
|
|
||||||
if realm_end == id.len() - 1 {
|
if realm_end == id.len() - 1 {
|
||||||
bail!("empty token name in userid");
|
bail!("empty token name in userid");
|
||||||
@ -680,7 +632,7 @@ impl TryFrom<String> for Authid {
|
|||||||
.iter()
|
.iter()
|
||||||
.rposition(|&b| b == b'!')
|
.rposition(|&b| b == b'!')
|
||||||
.map(|pos| if pos < name_len { data.len() } else { pos })
|
.map(|pos| if pos < name_len { data.len() } else { pos })
|
||||||
.unwrap_or(data.len());
|
.unwrap_or_else(|| data.len());
|
||||||
|
|
||||||
if realm_end == data.len() - 1 {
|
if realm_end == data.len() - 1 {
|
||||||
bail!("empty token name in userid");
|
bail!("empty token name in userid");
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user