Compare commits
1093 Commits
Author | SHA1 | Date | |
---|---|---|---|
64394b0de8 | |||
2f617a4548 | |||
2ba64bed18 | |||
cafccb5991 | |||
b22e8c3632 | |||
7929292618 | |||
0d4e4cae7f | |||
f4ba2e3155 | |||
7101ed6e27 | |||
85ac35aa9a | |||
40590561fe | |||
631e550920 | |||
f806c0effa | |||
50a4797fb1 | |||
cc2a0b12f8 | |||
988e8de122 | |||
2f8809c6bc | |||
92b7775fa1 | |||
f4d231e70a | |||
b419050aa7 | |||
8937c65951 | |||
6c6ad82d90 | |||
d0f11b66f7 | |||
f9fcac51a5 | |||
ca953d831f | |||
01c023d50f | |||
c2113a405e | |||
5dae81d199 | |||
bd768c3320 | |||
572fc035a2 | |||
99b2f045af | |||
6248e51797 | |||
19e4a36c70 | |||
90769e5694 | |||
b8cbe5d65b | |||
35c95ca653 | |||
2dbc1a9a55 | |||
dceecb0bbf | |||
d690d14568 | |||
85ef624440 | |||
e995996290 | |||
8e6ad4301d | |||
86740dfc89 | |||
1399c592d1 | |||
9883b54cba | |||
83b8949a98 | |||
28f60e5291 | |||
1f31d06f48 | |||
2f2e83c890 | |||
b22c618734 | |||
1e041082bb | |||
a57ce270ac | |||
b5b99a52cd | |||
9586ce2f46 | |||
b8d526f18d | |||
d2edc68ead | |||
4d651378e2 | |||
58791864d7 | |||
1a41e9af4f | |||
c297835b01 | |||
e68269fcaf | |||
5243df4712 | |||
4470eba551 | |||
1f2c4713ef | |||
a6c16894ff | |||
271764deb9 | |||
52f7a73009 | |||
bdb6e6b83f | |||
41dacd5d3d | |||
eb1dfb02b5 | |||
1a0eb86344 | |||
bdb62b20a3 | |||
f2ca03d7d0 | |||
00ac86c31b | |||
627d000098 | |||
4be4736603 | |||
2da7aca8e8 | |||
8306b8b1a5 | |||
605cfd4ab1 | |||
dec3147501 | |||
c642aec128 | |||
fd9aa8dfa2 | |||
07d6c0967d | |||
80a3749088 | |||
c72fdb53ae | |||
b03ec281bf | |||
cef4654ff4 | |||
f45dceeb73 | |||
18262a88c9 | |||
87f4be7998 | |||
d737adc6be | |||
5fdaecf6f4 | |||
d8792b88ef | |||
8b1174f50a | |||
8c8f7b5a09 | |||
44915932d5 | |||
e90fdf5bed | |||
a11c8ab485 | |||
74a50158ca | |||
6ee85d57be | |||
b2fc6f9228 | |||
f91481eded | |||
651a61f559 | |||
b06edeca02 | |||
89ccb125d1 | |||
c972704477 | |||
887f1cb90c | |||
16b4d78400 | |||
ec8d9c6b80 | |||
49c2d1dcad | |||
d0f51651f9 | |||
481ccf16a5 | |||
a223458753 | |||
e1740f3f01 | |||
740dc9d1d4 | |||
bbf01b644c | |||
66d066964c | |||
c81c46c336 | |||
c3747b93c8 | |||
d43265b7f1 | |||
6864fd0149 | |||
340c0bf9e3 | |||
4d104cd4d8 | |||
367c0ff7c6 | |||
9c26a3d61a | |||
93e3581ce7 | |||
f4e52bb27d | |||
72064fd0df | |||
77486a608e | |||
e97025ab02 | |||
e43b9175c0 | |||
9cc1415ef5 | |||
bd215dc0e4 | |||
12e874cef0 | |||
6d233161b0 | |||
905a570489 | |||
432fe44187 | |||
51b938496d | |||
b7f9b25e4d | |||
fe61280b6b | |||
68c087d578 | |||
d6bf87cab7 | |||
2b96a43879 | |||
697c41c584 | |||
a2379996e6 | |||
29077d95db | |||
dbd00a57b0 | |||
d08cff51a4 | |||
3e461dec1c | |||
4d08e25913 | |||
43313c2ee7 | |||
81b2a87232 | |||
3d8cd0ced7 | |||
7c78d54231 | |||
f9d71e8b17 | |||
0107fd323c | |||
8ba47929a0 | |||
794b0fe9ce | |||
979dccc7ec | |||
44a5f38bc4 | |||
bf78f70885 | |||
545706cbee | |||
0d916ac531 | |||
d4ab407045 | |||
45212a8c78 | |||
64b83c3d70 | |||
639a6782bd | |||
5f34d69bcc | |||
337ff5a3cc | |||
8e6459a818 | |||
aff3e16194 | |||
9372c0787d | |||
83fb2da53e | |||
645a044bf6 | |||
37796ff73f | |||
e1fdcb1678 | |||
aab9a26409 | |||
958055a789 | |||
edda5039d4 | |||
1c86893d95 | |||
d543587d34 | |||
780bc4cad2 | |||
18bd6ba13d | |||
4dafc513cc | |||
7acd5c5659 | |||
8428063d9e | |||
f490dda05a | |||
2b191385ea | |||
bc228e5eaf | |||
8be65e34de | |||
d967d8f1a7 | |||
50deb0d3f8 | |||
1d928b25fe | |||
f2f81791d1 | |||
382f10a0cc | |||
0d2133db98 | |||
09faa9ee95 | |||
ccec086e25 | |||
05725ac9a4 | |||
96b7483138 | |||
81281d04a4 | |||
e062ebbc29 | |||
b92cad0938 | |||
ea368a06cd | |||
3f48cdb380 | |||
17c7b46a69 | |||
a375df6f4c | |||
a3775bb4e8 | |||
1e0c6194b5 | |||
a6bd669854 | |||
6334bdc1c5 | |||
3b82f3eea5 | |||
38556bf60d | |||
d8d8af9826 | |||
3984a5fd77 | |||
397356096a | |||
365915da9a | |||
87152fbac6 | |||
22a9189ee0 | |||
4428818412 | |||
47ea98e0e3 | |||
6dd0513546 | |||
8abe51b71d | |||
69b8bc3bfa | |||
301b8aa0a5 | |||
e5b6c93323 | |||
9a045790ed | |||
82a103c8f9 | |||
0123039271 | |||
9a0e115a37 | |||
867bfc4378 | |||
feb1645f37 | |||
8ca37d6a65 | |||
ac163a7c18 | |||
9b6bddb24c | |||
f57ae48286 | |||
4cbd7eb7f9 | |||
310686726a | |||
ad5cee1d22 | |||
bad6e32075 | |||
8ae6d28cd4 | |||
ca1060862e | |||
8a0046f519 | |||
84cbdb35c4 | |||
1e93fbb5c1 | |||
619554af2b | |||
d5a48b5ce4 | |||
4e9cc3e97c | |||
492bc2ba63 | |||
995492100a | |||
854319d88c | |||
3189d05134 | |||
b2a43b987c | |||
6676409f7f | |||
44de5bcc00 | |||
e2956c605d | |||
b22b6c2299 | |||
90950c9c20 | |||
0c5b9e7820 | |||
a9ffa010c8 | |||
a6a903293b | |||
3fffcb5d77 | |||
a670b99db1 | |||
aefd74197a | |||
9ff747ef50 | |||
a08a198577 | |||
4cfb123448 | |||
198ebc6c86 | |||
a8abcd9b30 | |||
b7469f5a9a | |||
6bbe49aa14 | |||
5aa1019010 | |||
29a59b380c | |||
0bfcea6a11 | |||
19f5aa252f | |||
89e9134a3f | |||
b5a202acb6 | |||
0f860f712f | |||
7c66701366 | |||
585e90c0de | |||
5c852d5b82 | |||
484172b5f8 | |||
d148958b67 | |||
0a8d773ad0 | |||
427d90e6c1 | |||
9b2e4079d0 | |||
1a0b410554 | |||
2d50a6192f | |||
781da7f6f0 | |||
646221cc29 | |||
b168a27f73 | |||
a442bd9792 | |||
884fec7735 | |||
1cb89f302f | |||
da36bbe756 | |||
25e464c5ce | |||
8446fbca85 | |||
9738dd545f | |||
0bce2118e7 | |||
6543214dde | |||
d91c6fd4e1 | |||
711d1f6fc3 | |||
e422beec74 | |||
a484c9cf96 | |||
5654d8ceba | |||
31cf625af5 | |||
93be18ffd2 | |||
e96464c795 | |||
ad0ed40a59 | |||
63fd8e58b2 | |||
758a827c2d | |||
7ad33e8052 | |||
abfe0c0e70 | |||
f22dfb5ece | |||
4bda51688b | |||
eab25e2f33 | |||
94bd11bae2 | |||
759af9f00c | |||
f58e5132aa | |||
d831846706 | |||
1fc9ac0433 | |||
5c48d0af1f | |||
30fb19be35 | |||
fbeac4ea28 | |||
7f066a9b21 | |||
c5a767cd1d | |||
027ef213aa | |||
dc1fdd6267 | |||
96918252e5 | |||
014dc5f9d7 | |||
59e94227af | |||
e84b801c2e | |||
6638c034d2 | |||
04df41cec1 | |||
483da89d03 | |||
c92e3832bf | |||
edb90f6afa | |||
0057f0e580 | |||
e6217b8b36 | |||
6fe16039b9 | |||
42967bf185 | |||
5843268c47 | |||
7273ba3de2 | |||
0bf1c314da | |||
c7926d8e8c | |||
44ce25e7ac | |||
3a2cc5c66e | |||
3838ce3330 | |||
59217472aa | |||
df69a4fc59 | |||
25d3965769 | |||
08d8b2a4fd | |||
879569d73f | |||
b63f833d36 | |||
482c6e33dd | |||
46a1863f88 | |||
632756b6fb | |||
04eba29c55 | |||
0912878ecf | |||
d5035c5600 | |||
38ae42b11a | |||
a174854a0d | |||
c4b2b9ab41 | |||
ef942e04c2 | |||
f54cd66924 | |||
b40ab10d38 | |||
f8ccbfdedd | |||
470f1c798a | |||
5c012b392a | |||
165b641c1d | |||
66e42bec05 | |||
c503ea7045 | |||
745ec187ce | |||
f046313c0e | |||
74595b8821 | |||
c9fdd142a4 | |||
abaa6d0ac9 | |||
cfae8f0656 | |||
54f4ecd46a | |||
1835d86e9d | |||
b9b4b31284 | |||
b4772d1c43 | |||
9933dc3133 | |||
08ac90f920 | |||
13f5863561 | |||
81764111fe | |||
cb022525ff | |||
75656a78c6 | |||
284eb5daff | |||
ff58c51919 | |||
2fb1bdda20 | |||
12299b333b | |||
b017bbc441 | |||
9e8c0d2e33 | |||
250c29edd2 | |||
c431659d05 | |||
a33389c391 | |||
3460565414 | |||
26b62138ee | |||
afb0220642 | |||
0993923ed5 | |||
e0362b0d0f | |||
df3a74d7e0 | |||
d5d457e667 | |||
b27c32821c | |||
76b15a035f | |||
eb8feb1281 | |||
fc6ce9835b | |||
8ae9f4efc2 | |||
c9d13b0fc4 | |||
bfacc1d8c3 | |||
02d484370f | |||
5ae86dfaa1 | |||
dbe7e556b0 | |||
4799280ccd | |||
cb4865466e | |||
cb80d900b3 | |||
ee01737e87 | |||
2012825913 | |||
eb5e3420ae | |||
b2362a1207 | |||
54d968664a | |||
1e20f819d5 | |||
8001c82e81 | |||
baefbc444e | |||
4a227b54bf | |||
8a192bedde | |||
d5efa18ae4 | |||
5f79dc2805 | |||
9aa58f0143 | |||
8835664653 | |||
d37da6b7fc | |||
b9ee86efe1 | |||
d108b610fd | |||
0ec79339f7 | |||
2afdc7f27d | |||
26aa9aca40 | |||
3e2984bcb9 | |||
a7a5406c32 | |||
4f727a783e | |||
23dc68fdea | |||
b532dd00c4 | |||
c01742855a | |||
9c953dd260 | |||
3fbf2d2fcd | |||
e0af222ec3 | |||
73b5011786 | |||
2ea5abcd65 | |||
7137630d43 | |||
8acfd15d6e | |||
48fbbfeb7e | |||
9990af3042 | |||
fe6c19383b | |||
42150d263b | |||
9839d3f778 | |||
dd59e3c2a1 | |||
0b7432ae09 | |||
c1c2c8f635 | |||
7680525eec | |||
42298d5896 | |||
39478aa52c | |||
6a99b930c4 | |||
f6ce45b373 | |||
205e187613 | |||
a78348acbb | |||
410611b4f2 | |||
af07ec8f29 | |||
3f803af00b | |||
ac461bd651 | |||
ce955e1635 | |||
e20d008c6a | |||
fb657d8ee5 | |||
fba0b77469 | |||
b5c1296eaa | |||
065df12872 | |||
7e1d4712b8 | |||
49c965a497 | |||
6fe9aedd0b | |||
42cb9bd6a5 | |||
66dbe5639e | |||
2d87f2fb73 | |||
4c81273274 | |||
73b8f6793e | |||
663ef85992 | |||
e92c75815b | |||
6dbad5b4b5 | |||
bff7e3f3e4 | |||
83abc7497d | |||
8bc5eebeb8 | |||
1433b96ba0 | |||
be1a8c94ae | |||
4606f34353 | |||
7bb720cb4d | |||
c4d8542ec1 | |||
9700d5374a | |||
05e90d6463 | |||
55118ca18e | |||
f70d8091d3 | |||
a3c709ef21 | |||
4917f1e2d4 | |||
93829fc680 | |||
5605ca5619 | |||
e49f0c03d9 | |||
0098b712a5 | |||
5fb694e8c0 | |||
583a68a446 | |||
e6604cf391 | |||
43cfb3c35a | |||
8a16c571d2 | |||
314652a499 | |||
6b68e5d597 | |||
cafd51bf42 | |||
eaff09f483 | |||
9b93c62044 | |||
5d90860688 | |||
5ba83ed099 | |||
50bf10ad56 | |||
16d444c979 | |||
fa9c9be737 | |||
2e7014e31d | |||
a84050c1f0 | |||
7c9835465e | |||
ec00200411 | |||
956e5fec1f | |||
b107fdb99a | |||
7320e9ff4b | |||
c4d2d54a6d | |||
1142350e8d | |||
d735b31345 | |||
e211fee562 | |||
8c15560b68 | |||
327e93711f | |||
a076571470 | |||
ff50c07ebf | |||
179145dc24 | |||
6bd0a00c46 | |||
f6e28f4e62 | |||
37f1b7dd8d | |||
60e6ee46de | |||
2260f065d4 | |||
6eff8dec4f | |||
7e25b9aaaa | |||
f867ef9c4a | |||
fc8920e35d | |||
7f3b0f67e7 | |||
844660036b | |||
efcac39d34 | |||
cb4b721cb0 | |||
7956877f14 | |||
2241c6795f | |||
43e60ceb41 | |||
b760d8a23f | |||
2c1592263d | |||
616533823c | |||
913dddea85 | |||
3530430365 | |||
a4ba60be8f | |||
99e98f605c | |||
935ee97b17 | |||
6b9bfd7fe9 | |||
dd519bbad1 | |||
35fe981c7d | |||
b6570abe79 | |||
54813c650e | |||
781106f8c5 | |||
96f35520a0 | |||
490560e0c6 | |||
52f53d8280 | |||
27b8a3f671 | |||
abf9b6da42 | |||
0c9209b04c | |||
edebd52374 | |||
61205f00fb | |||
a303e00289 | |||
af9f72e9d8 | |||
5176346b30 | |||
731eeef25b | |||
a65e3e4bc0 | |||
027eb2bbe6 | |||
6982a54701 | |||
035c40e638 | |||
79c535955d | |||
8b7f8d3f3d | |||
866c859a1e | |||
23e4e90540 | |||
a4fa3fc241 | |||
81d10c3b37 | |||
f1e2904150 | |||
23f9503a31 | |||
a0ef68b93c | |||
6b127e6ea0 | |||
5e17dbf2bb | |||
dfb04575ad | |||
6f2626ae19 | |||
37e60ddcde | |||
05cdc05347 | |||
6364115b4b | |||
2133cd9103 | |||
01f84fcce1 | |||
08b3823025 | |||
968a0ab261 | |||
21b552848a | |||
fd19256470 | |||
1ed022576c | |||
f6aa7b38bf | |||
fdfcb74d67 | |||
98afc7b152 | |||
0d08fceeb9 | |||
3c945d73c2 | |||
58fcbf5ab7 | |||
3a3f31c947 | |||
8fc63287df | |||
172473e4de | |||
76f549debb | |||
c9097ff801 | |||
fb01fd3af6 | |||
fa4bcbcad0 | |||
189cdb7427 | |||
874bd5454d | |||
b649887e9a | |||
8c62c15f56 | |||
51ac17b56e | |||
fc5a012068 | |||
5e293f1315 | |||
87367decf2 | |||
f792220dd4 | |||
97030c9407 | |||
5d1d0f5d6c | |||
294466ee61 | |||
c100fe9108 | |||
e754da3ac2 | |||
bc1e52bc38 | |||
6f0073bbb5 | |||
2decf85d6e | |||
1d8f849457 | |||
beb07279b6 | |||
8c6854c8fd | |||
57f472d9bb | |||
94ffca10a2 | |||
0a274ab0a0 | |||
c0026563b0 | |||
e411924c7c | |||
709c15abaa | |||
b404e4d930 | |||
f507580c3f | |||
291b786076 | |||
06c9059dac | |||
d7c6ad60dd | |||
0a0ba0785b | |||
6ed79592f2 | |||
4c75ee3471 | |||
6f997da8cd | |||
03e40aa4ee | |||
be1d6cbcc6 | |||
ffaca016ad | |||
71f82a98d7 | |||
deef6fbc0c | |||
4ac529141f | |||
a108a2e967 | |||
ff7a29104c | |||
240b2ffb9b | |||
a86e703661 | |||
1ecf4e6d20 | |||
9f9a661b1a | |||
1b1cab8321 | |||
f4f9a503de | |||
a4971d5f90 | |||
477ebe6b78 | |||
38efbfc148 | |||
10052ea644 | |||
b57619ea29 | |||
445b0043b2 | |||
8b62cbe752 | |||
81f99362d9 | |||
414c23facb | |||
c5608cf86c | |||
5d08c750ef | |||
f3fde36beb | |||
0c83e8891e | |||
133de2dd1f | |||
c8219747f0 | |||
0247f794e9 | |||
710f787c41 | |||
d8916a326c | |||
924d6d4072 | |||
984ac33d5c | |||
0a4dfd63c9 | |||
a6e746f652 | |||
30f73fa2e0 | |||
9f0ee346e9 | |||
48d6dede4a | |||
8432e4a655 | |||
b35eb0a175 | |||
c3a1b34ed3 | |||
bb26843cd6 | |||
ee0ab12dd0 | |||
d5f7755467 | |||
5c64e83b1e | |||
0f6f99b4ec | |||
f668862ae0 | |||
c960d2b501 | |||
f5d9f2534b | |||
9a3ddcea33 | |||
030464d3a9 | |||
3f30b32c2e | |||
5eafe6aabc | |||
2c9f274efa | |||
31112c79ac | |||
d89f91b538 | |||
a6310ec294 | |||
98d9323534 | |||
09f1f28800 | |||
e1da9ca4bb | |||
625c7bfc0b | |||
d9503950e3 | |||
376e927980 | |||
5204cbcf0f | |||
e373dcc564 | |||
137a6ebcad | |||
ed1329ecf7 | |||
2371c1e371 | |||
63c07d950c | |||
a3cdb19e33 | |||
4623cd6497 | |||
ab81bb13ad | |||
616650a198 | |||
78763d21b1 | |||
f2d6324958 | |||
6e880f19cc | |||
64623f329e | |||
407f3fb994 | |||
0eb0c4bd63 | |||
82422c115a | |||
ed2beb334d | |||
f3b4820d06 | |||
8f7cd96df4 | |||
4accbc5853 | |||
2791318ff1 | |||
47208b4147 | |||
b783591fb5 | |||
9dd6175808 | |||
5e8b97178e | |||
38260cddf5 | |||
80b0423d54 | |||
b690bb69eb | |||
8a40e22691 | |||
f5c6a2c956 | |||
6d5803399b | |||
3896f80cb3 | |||
60d2a6157a | |||
b83b12cf80 | |||
86847f487b | |||
1b03910dea | |||
435a6c5e0a | |||
1f4befe136 | |||
7f0f366675 | |||
362e69610c | |||
bad26df102 | |||
790627b4bf | |||
6de14a55ed | |||
8b24c6880a | |||
5174956548 | |||
d669a739b2 | |||
c7fa61619e | |||
009a04f8d0 | |||
0953044cfb | |||
d923671a7b | |||
db8a606707 | |||
b614b29bea | |||
65595e169f | |||
10db4717f1 | |||
1d9d2f0f7c | |||
ad53c1d6dd | |||
beeadb8a4b | |||
b997524912 | |||
cc4a9d250a | |||
6227b9bab0 | |||
f608e74c8b | |||
08379a21d1 | |||
8f1d972149 | |||
b59c308219 | |||
0224c3c273 | |||
f0609851fc | |||
dbd45a72c3 | |||
4c979d5450 | |||
35c80d696f | |||
6823fdc7f9 | |||
3323798b54 | |||
67fd09791f | |||
1b37ebf6f6 | |||
043406d662 | |||
61db0851d6 | |||
ad54df3178 | |||
71103afd69 | |||
6465d809cd | |||
ae8635c307 | |||
e0100d618e | |||
455e5f7110 | |||
c26c9390ff | |||
9e45e03aef | |||
e144810d73 | |||
3c2dd8ad05 | |||
91e3b38da4 | |||
9d79cec4d5 | |||
4935681cf4 | |||
669fa672d9 | |||
a797583535 | |||
54ed1b2a71 | |||
8e12e86f0b | |||
fe7bdc9d29 | |||
546b6a23df | |||
4fdf13f95f | |||
385681c9ab | |||
be99df2767 | |||
30200b5c4a | |||
f47c1d3a2f | |||
6e545d0058 | |||
84006f98b2 | |||
42ca9e918a | |||
ea93bea7bf | |||
0081903f7c | |||
c53797f627 | |||
e1d367df47 | |||
71f413cd27 | |||
48aa2b93b7 | |||
641862ddad | |||
2f08ee1fe3 | |||
93f077c5cf | |||
941342f70e | |||
9a556c8a30 | |||
46dce62be6 | |||
b0ef9631e6 | |||
fb0d9833af | |||
bfe4b7d782 | |||
185dab7678 | |||
c1fa057cce | |||
f66565203a | |||
a2a7dd1535 | |||
e7dd169fdf | |||
fa31f4c54c | |||
038ee59960 | |||
e1c1533790 | |||
9de7c71a81 | |||
aa64e06540 | |||
18077ac633 | |||
a71a009313 | |||
b6ba5acd29 | |||
4fdf5ddf5b | |||
c724f65805 | |||
79c9bf55b9 | |||
788d82d9b7 | |||
2f0b92352d | |||
b7f2be5137 | |||
72aa1834dc | |||
fe4cc5b1a1 | |||
04b053d87e | |||
b469011fd1 | |||
a68768cf31 | |||
f3df613cb7 | |||
056ee78567 | |||
3cd529ea51 | |||
3aade17125 | |||
1dc2fe20dd | |||
645a47ff6e | |||
b1456a8ea7 | |||
a9fcbec9dc | |||
346a488e35 | |||
3066f56481 | |||
07ca4e3609 | |||
dcd75edb72 | |||
59af9ca98e | |||
f1694b062d | |||
fa7aceeb15 | |||
0e16f57e37 | |||
bc00289bce | |||
86d602457a | |||
33508b1237 | |||
b282557563 | |||
e6513bd5de | |||
5911f74096 | |||
0bb74e54b1 | |||
f254a27071 | |||
d0abba3397 | |||
54adea366c | |||
ba2e4b15da | |||
0ccdd1b6a4 | |||
fb66c85363 | |||
aae4c30ceb | |||
0656344ae4 | |||
1143f6ca93 | |||
90e94aa280 | |||
c0af05e143 | |||
4aef06f1b6 | |||
034cf70b72 | |||
8b600f9965 | |||
e4e280183e | |||
2fc45a97a9 | |||
b7ce2e575f | |||
09f6a24078 | |||
b728a69e7d | |||
1401f4be5f | |||
fdb4416bae | |||
abe1edfc95 | |||
e4a864bd21 | |||
7a7368ee08 | |||
e707fd2b3b | |||
625a56b75e | |||
6d8a1ac9e4 | |||
362739054e | |||
2762481cc8 | |||
652506e6b8 | |||
926d253126 | |||
1cd951c93e | |||
3b707fbb8f | |||
b15751bf55 | |||
82c05b41fa | |||
b8d9079835 | |||
f8a682a873 | |||
b03a19b6e8 | |||
603a6bd183 | |||
83b039af35 | |||
c9299e76fc | |||
2f1a46f748 | |||
2b38dfb456 | |||
f487a622ce | |||
906ef6c5bd | |||
ea1853a17b | |||
221177ba41 | |||
184a37635b | |||
b2da7fbd1c | |||
7fe76d3491 | |||
e6b5bf69a3 | |||
4615325f9e | |||
2156dec5a9 | |||
16245d540c | |||
bff8557298 | |||
34aa8e13b6 | |||
babab85b56 | |||
6746bbb1a2 | |||
942078c40b | |||
c30816c1f8 | |||
e6dc35acb8 | |||
e10c5c74f6 | |||
f8adf8f83f | |||
e0538349e2 | |||
0903403ce7 | |||
b6563f48ad | |||
932390bd46 | |||
6b7688aa98 | |||
ab0cf7e6a1 | |||
264779e704 | |||
7f3d91003c | |||
14e0862509 | |||
9e733dae48 | |||
bfea476be2 | |||
385cf2bd9d | |||
d6373f3525 | |||
01f37e01c3 | |||
b4fb262335 | |||
5499bd3dee | |||
d771a608f5 | |||
227a39b34b | |||
f9beae9cc9 | |||
4430f199c4 | |||
eef18365e8 | |||
319fe45261 | |||
f26080fab1 | |||
0cbdeed96b | |||
8b4f4d9ee4 | |||
b9cc905761 | |||
c9725bb829 | |||
40492a562f | |||
db67e4fe06 | |||
b4b14dc16e | |||
c4a45ec744 | |||
5428f5ca29 | |||
328df3b507 | |||
a4915dfc2b | |||
d642802d8c | |||
a20fcab060 | |||
b9e7bcc272 | |||
acc3d9df5a | |||
1298618a83 | |||
a12388d177 | |||
1f092c7802 | |||
cd82870015 | |||
8d6b6a045f | |||
1dceaed1e9 | |||
2565fdd075 | |||
7ece65a01e | |||
028d0a1352 | |||
68931742cb | |||
3ea148598a | |||
cd92fd7336 | |||
d58e6313e1 | |||
16f9f244cf | |||
b683fd589c | |||
a2285525be | |||
f23497b088 | |||
b57b3c9bfc | |||
d3444c0891 | |||
d28e688666 | |||
72c0e102ff | |||
7b22fb257f | |||
2e201e7da6 | |||
ee89416319 | |||
2357744bd0 | |||
52fe9e8ece | |||
eed1bae554 | |||
6eb41487ce | |||
9e61c01ce4 | |||
91c9b42da3 | |||
52d2ae48f0 | |||
1872050564 | |||
efeb92efee | |||
4ebda996e5 | |||
5eb9dd0c8a | |||
12bcbf0734 | |||
dc2876f6bb | |||
bdc208af48 | |||
2ef1b6290f | |||
df0bdf6be7 | |||
8b47a23002 | |||
29615fe838 | |||
133042b5d8 | |||
73df9c515b | |||
8d1beca7e8 | |||
9b2bad7af0 | |||
78efafc2d0 | |||
2d3d91b1db | |||
030c5c6d8a | |||
53a561a222 | |||
e832860a3c | |||
804f61432d | |||
943479f5f6 | |||
fdce52aa99 | |||
4e32d1c590 | |||
afef7f3bba | |||
b428af9781 | |||
c8774067ee | |||
23440482d4 | |||
6f757b8458 | |||
95ade8fdb5 | |||
9e870b5f39 | |||
7827e3b93e | |||
e6ca9c3235 | |||
0698f78df5 | |||
bcc2880461 | |||
115d927c15 | |||
df729017b4 | |||
455f2ad228 | |||
e4f5f59eea | |||
16cdb9563b | |||
02479720c0 | |||
97168f920e | |||
9809772b23 | |||
4940012d0d | |||
0c2f9621d5 | |||
e7372972b5 | |||
e5adbc3419 | |||
41255b4d95 | |||
0c4c6a7b1c | |||
c7e18ba08a | |||
bb14d46796 | |||
e6475b09e0 | |||
d39d095fa4 | |||
86f3c2363c | |||
8e7e2223d8 | |||
081c37cccf | |||
c0df91f8bd | |||
400c568f8e | |||
4703ba81ce | |||
29633e2fe9 | |||
b64e9a97f3 | |||
254b1f2213 | |||
1a374fcfd6 | |||
e07620028d | |||
b947b1e7ee | |||
1e80fb8e92 | |||
8d841f81ee | |||
d9f365d79f | |||
32a4695c46 | |||
2081327428 | |||
4c0ae82e23 | |||
883aa6d5a4 | |||
bfa54f2e85 | |||
238a872d1f | |||
7d6c4c39e9 | |||
f153930066 | |||
836c4a278d | |||
6cd8496008 | |||
61c6eafc08 | |||
8db1468952 |
44
Cargo.toml
@ -1,7 +1,16 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "0.9.1"
|
version = "1.0.7"
|
||||||
authors = ["Dietmar Maurer <dietmar@proxmox.com>"]
|
authors = [
|
||||||
|
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||||
|
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||||
|
"Christian Ebner <c.ebner@proxmox.com>",
|
||||||
|
"Fabian Grünbichler <f.gruenbichler@proxmox.com>",
|
||||||
|
"Stefan Reiter <s.reiter@proxmox.com>",
|
||||||
|
"Thomas Lamprecht <t.lamprecht@proxmox.com>",
|
||||||
|
"Wolfgang Bumiller <w.bumiller@proxmox.com>",
|
||||||
|
"Proxmox Support Team <support@proxmox.com>",
|
||||||
|
]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3"
|
license = "AGPL-3"
|
||||||
description = "Proxmox Backup"
|
description = "Proxmox Backup"
|
||||||
@ -14,22 +23,22 @@ name = "proxmox_backup"
|
|||||||
path = "src/lib.rs"
|
path = "src/lib.rs"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
apt-pkg-native = "0.3.1" # custom patched version
|
apt-pkg-native = "0.3.2"
|
||||||
base64 = "0.12"
|
base64 = "0.12"
|
||||||
bitflags = "1.2.1"
|
bitflags = "1.2.1"
|
||||||
bytes = "0.5"
|
bytes = "1.0"
|
||||||
crc32fast = "1"
|
crc32fast = "1"
|
||||||
endian_trait = { version = "0.6", features = ["arrays"] }
|
endian_trait = { version = "0.6", features = ["arrays"] }
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
h2 = { version = "0.2", features = ["stream"] }
|
h2 = { version = "0.3", features = [ "stream" ] }
|
||||||
handlebars = "3.0"
|
handlebars = "3.0"
|
||||||
http = "0.2"
|
http = "0.2"
|
||||||
hyper = "0.13.6"
|
hyper = { version = "0.14", features = [ "full" ] }
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
nix = "0.16"
|
nix = "0.19.1"
|
||||||
num-traits = "0.2"
|
num-traits = "0.2"
|
||||||
once_cell = "1.3.1"
|
once_cell = "1.3.1"
|
||||||
openssl = "0.10"
|
openssl = "0.10"
|
||||||
@ -37,31 +46,34 @@ pam = "0.7"
|
|||||||
pam-sys = "0.5"
|
pam-sys = "0.5"
|
||||||
percent-encoding = "2.1"
|
percent-encoding = "2.1"
|
||||||
pin-utils = "0.1.0"
|
pin-utils = "0.1.0"
|
||||||
|
pin-project = "1.0"
|
||||||
pathpatterns = "0.1.2"
|
pathpatterns = "0.1.2"
|
||||||
proxmox = { version = "0.4.3", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
proxmox = { version = "0.10.1", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
||||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
||||||
proxmox-fuse = "0.1.0"
|
proxmox-fuse = "0.1.1"
|
||||||
pxar = { version = "0.6.1", features = [ "tokio-io", "futures-io" ] }
|
pxar = { version = "0.8.0", features = [ "tokio-io" ] }
|
||||||
#pxar = { path = "../pxar", features = [ "tokio-io", "futures-io" ] }
|
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
|
||||||
regex = "1.2"
|
regex = "1.2"
|
||||||
rustyline = "6"
|
rustyline = "7"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
siphasher = "0.3"
|
siphasher = "0.3"
|
||||||
syslog = "4.0"
|
syslog = "4.0"
|
||||||
tokio = { version = "0.2.9", features = [ "blocking", "fs", "dns", "io-util", "macros", "process", "rt-threaded", "signal", "stream", "tcp", "time", "uds" ] }
|
tokio = { version = "1.0", features = [ "fs", "io-util", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
|
||||||
tokio-openssl = "0.4.0"
|
tokio-openssl = "0.6.1"
|
||||||
tokio-util = { version = "0.3", features = [ "codec" ] }
|
tokio-stream = "0.1.0"
|
||||||
|
tokio-util = { version = "0.6", features = [ "codec" ] }
|
||||||
tower-service = "0.3.0"
|
tower-service = "0.3.0"
|
||||||
udev = ">= 0.3, <0.5"
|
udev = ">= 0.3, <0.5"
|
||||||
url = "2.1"
|
url = "2.1"
|
||||||
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
||||||
walkdir = "2"
|
walkdir = "2"
|
||||||
|
webauthn-rs = "0.2.5"
|
||||||
xdg = "2.2"
|
xdg = "2.2"
|
||||||
zstd = { version = "0.4", features = [ "bindgen" ] }
|
zstd = { version = "0.4", features = [ "bindgen" ] }
|
||||||
nom = "5.1"
|
nom = "5.1"
|
||||||
crossbeam-channel = "0.4"
|
crossbeam-channel = "0.5"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
|
8
Makefile
@ -9,7 +9,8 @@ SUBDIRS := etc www docs
|
|||||||
# Binaries usable by users
|
# Binaries usable by users
|
||||||
USR_BIN := \
|
USR_BIN := \
|
||||||
proxmox-backup-client \
|
proxmox-backup-client \
|
||||||
pxar
|
pxar \
|
||||||
|
pmtx
|
||||||
|
|
||||||
# Binaries usable by admins
|
# Binaries usable by admins
|
||||||
USR_SBIN := \
|
USR_SBIN := \
|
||||||
@ -19,7 +20,8 @@ USR_SBIN := \
|
|||||||
SERVICE_BIN := \
|
SERVICE_BIN := \
|
||||||
proxmox-backup-api \
|
proxmox-backup-api \
|
||||||
proxmox-backup-banner \
|
proxmox-backup-banner \
|
||||||
proxmox-backup-proxy
|
proxmox-backup-proxy \
|
||||||
|
proxmox-daily-update
|
||||||
|
|
||||||
ifeq ($(BUILD_MODE), release)
|
ifeq ($(BUILD_MODE), release)
|
||||||
CARGO_BUILD_ARGS += --release
|
CARGO_BUILD_ARGS += --release
|
||||||
@ -140,6 +142,8 @@ install: $(COMPILED_BINS)
|
|||||||
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(SBINDIR)/ ; \
|
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(SBINDIR)/ ; \
|
||||||
install -m644 zsh-completions/_$(i) $(DESTDIR)$(ZSH_COMPL_DEST)/ ;)
|
install -m644 zsh-completions/_$(i) $(DESTDIR)$(ZSH_COMPL_DEST)/ ;)
|
||||||
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup
|
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup
|
||||||
|
# install sg-tape-cmd as setuid binary
|
||||||
|
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
|
||||||
$(foreach i,$(SERVICE_BIN), \
|
$(foreach i,$(SERVICE_BIN), \
|
||||||
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
|
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
|
||||||
$(MAKE) -C www install
|
$(MAKE) -C www install
|
||||||
|
80
README.rst
@ -53,3 +53,83 @@ Setup:
|
|||||||
Note: 2. may be skipped if you already added the PVE or PBS package repository
|
Note: 2. may be skipped if you already added the PVE or PBS package repository
|
||||||
|
|
||||||
You are now able to build using the Makefile or cargo itself.
|
You are now able to build using the Makefile or cargo itself.
|
||||||
|
|
||||||
|
|
||||||
|
Design Notes
|
||||||
|
============
|
||||||
|
|
||||||
|
Here are some random thought about the software design (unless I find a better place).
|
||||||
|
|
||||||
|
|
||||||
|
Large chunk sizes
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
It is important to notice that large chunk sizes are crucial for
|
||||||
|
performance. We have a multi-user system, where different people can do
|
||||||
|
different operations on a datastore at the same time, and most operation
|
||||||
|
involves reading a series of chunks.
|
||||||
|
|
||||||
|
So what is the maximal theoretical speed we can get when reading a
|
||||||
|
series of chunks? Reading a chunk sequence need the following steps:
|
||||||
|
|
||||||
|
- seek to the first chunk start location
|
||||||
|
- read the chunk data
|
||||||
|
- seek to the first chunk start location
|
||||||
|
- read the chunk data
|
||||||
|
- ...
|
||||||
|
|
||||||
|
Lets use the following disk performance metrics:
|
||||||
|
|
||||||
|
:AST: Average Seek Time (second)
|
||||||
|
:MRS: Maximum sequential Read Speed (bytes/second)
|
||||||
|
:ACS: Average Chunk Size (bytes)
|
||||||
|
|
||||||
|
The maximum performance you can get is::
|
||||||
|
|
||||||
|
MAX(ACS) = ACS /(AST + ACS/MRS)
|
||||||
|
|
||||||
|
Please note that chunk data is likely to be sequential arranged on disk, but
|
||||||
|
this it is sort of a best case assumption.
|
||||||
|
|
||||||
|
For a typical rotational disk, we assume the following values::
|
||||||
|
|
||||||
|
AST: 10ms
|
||||||
|
MRS: 170MB/s
|
||||||
|
|
||||||
|
MAX(4MB) = 115.37 MB/s
|
||||||
|
MAX(1MB) = 61.85 MB/s;
|
||||||
|
MAX(64KB) = 6.02 MB/s;
|
||||||
|
MAX(4KB) = 0.39 MB/s;
|
||||||
|
MAX(1KB) = 0.10 MB/s;
|
||||||
|
|
||||||
|
Modern SSD are much faster, lets assume the following::
|
||||||
|
|
||||||
|
max IOPS: 20000 => AST = 0.00005
|
||||||
|
MRS: 500Mb/s
|
||||||
|
|
||||||
|
MAX(4MB) = 474 MB/s
|
||||||
|
MAX(1MB) = 465 MB/s;
|
||||||
|
MAX(64KB) = 354 MB/s;
|
||||||
|
MAX(4KB) = 67 MB/s;
|
||||||
|
MAX(1KB) = 18 MB/s;
|
||||||
|
|
||||||
|
|
||||||
|
Also, the average chunk directly relates to the number of chunks produced by
|
||||||
|
a backup::
|
||||||
|
|
||||||
|
CHUNK_COUNT = BACKUP_SIZE / ACS
|
||||||
|
|
||||||
|
Here are some staticics from my developer worstation::
|
||||||
|
|
||||||
|
Disk Usage: 65 GB
|
||||||
|
Directories: 58971
|
||||||
|
Files: 726314
|
||||||
|
Files < 64KB: 617541
|
||||||
|
|
||||||
|
As you see, there are really many small files. If we would do file
|
||||||
|
level deduplication, i.e. generate one chunk per file, we end up with
|
||||||
|
more than 700000 chunks.
|
||||||
|
|
||||||
|
Instead, our current algorithm only produce large chunks with an
|
||||||
|
average chunks size of 4MB. With above data, this produce about 15000
|
||||||
|
chunks (factor 50 less chunks).
|
||||||
|
377
debian/changelog
vendored
@ -1,3 +1,376 @@
|
|||||||
|
rust-proxmox-backup (1.0.7-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* fix #3197: skip fingerprint check when restoring key
|
||||||
|
|
||||||
|
* client: add 'import-with-master-key' command
|
||||||
|
|
||||||
|
* fix #3192: correct sort in prune sim
|
||||||
|
|
||||||
|
* tools/daemon: improve reload behaviour
|
||||||
|
|
||||||
|
* http client: add timeouts for critical connects
|
||||||
|
|
||||||
|
* api: improve error messages for restricted endpoints
|
||||||
|
|
||||||
|
* api: allow tokens to list users
|
||||||
|
|
||||||
|
* ui: running tasks: Use gettext for column labels
|
||||||
|
|
||||||
|
* login: add two-factor authenication (TFA) and integrate in web-interface
|
||||||
|
|
||||||
|
* login: support webAuthn, recovery keys and TOTP as TFA methods
|
||||||
|
|
||||||
|
* make it possible to abort tasks with CTRL-C
|
||||||
|
|
||||||
|
* fix #3245: only use default schedule for new jobs
|
||||||
|
|
||||||
|
* manager CLI: user/token list: fix rendering 0 (never) expire date
|
||||||
|
|
||||||
|
* update the event-driven, non-blocking I/O tokio platform to 1.0
|
||||||
|
|
||||||
|
* access: limit editing all pam credentials to superuser
|
||||||
|
|
||||||
|
* access: restrict password changes on @pam realm to superuser
|
||||||
|
|
||||||
|
* patch out wrongly linked libraries from ELFs to avoid extra, bogus
|
||||||
|
dependencies in resulting package
|
||||||
|
|
||||||
|
* add "password hint" to encryption key config
|
||||||
|
|
||||||
|
* improve GC error handling
|
||||||
|
|
||||||
|
* cli: make it possible to abort tasks with CTRL-C
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 03 Feb 2021 10:34:23 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.6-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* stricter handling of file-descriptors, fixes some cases where some could
|
||||||
|
leak
|
||||||
|
|
||||||
|
* ui: fix various usages of the findRecord emthod, ensuring it matches exact
|
||||||
|
|
||||||
|
* garbage collection: improve task log format
|
||||||
|
|
||||||
|
* verification: improve progress log, make it similar to what's logged on
|
||||||
|
pull (sync)
|
||||||
|
|
||||||
|
* datastore: move manifest locking to /run. This avoids issues with
|
||||||
|
filesystems which cannot natively handle removing in-use files ("delete on
|
||||||
|
last close"), and create a virtual, internal, replacement file to work
|
||||||
|
around that. This is done, for example, by NFS or CIFS (samba).
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 11 Dec 2020 12:51:33 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.5-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* client: restore: print meta information exclusively to standard error
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 25 Nov 2020 15:29:58 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.4-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* fingerprint: add bytes() accessor
|
||||||
|
|
||||||
|
* ui: fix broken gettext use
|
||||||
|
|
||||||
|
* cli: move more commands into "snapshot" sub-command
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 25 Nov 2020 06:37:41 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.3-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* client: inform user when automatically using the default encryption key
|
||||||
|
|
||||||
|
* ui: UserView: render name as 'Firstname Lastname'
|
||||||
|
|
||||||
|
* proxmox-backup-manager: add versions command
|
||||||
|
|
||||||
|
* pxar: fix anchored exclusion at archive root
|
||||||
|
|
||||||
|
* pxar: include .pxarexclude files in the archive
|
||||||
|
|
||||||
|
* client: expose all-file-systems option
|
||||||
|
|
||||||
|
* api: make expensive parts of datastore status opt-in
|
||||||
|
|
||||||
|
* api: include datastore ID in invalid owner errors
|
||||||
|
|
||||||
|
* garbage collection: treat .bad files like regular chunks to ensure they
|
||||||
|
are removed if not referenced anymore
|
||||||
|
|
||||||
|
* client: fix issues with encoded UPID strings
|
||||||
|
|
||||||
|
* encryption: add fingerprint to key config
|
||||||
|
|
||||||
|
* client: add 'key show' command
|
||||||
|
|
||||||
|
* fix #3139: add key fingerprint to backup snapshot manifest and check it
|
||||||
|
when loading with a key
|
||||||
|
|
||||||
|
* ui: add snapshot/file fingerprint tooltip
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 24 Nov 2020 08:55:47 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.1-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* ui: datastore summary: drop 'removed bytes' display
|
||||||
|
|
||||||
|
* ui: datastore add: set default schedule
|
||||||
|
|
||||||
|
* prune sim: make backup schedule a form, bind update button to its validity
|
||||||
|
|
||||||
|
* daemon: add workaround for race in reloading and systemd 'ready' notification
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 11 Nov 2020 10:18:12 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* fix #3121: forbid removing used remotes
|
||||||
|
|
||||||
|
* docs: backup-client: encryption: discuss paperkey command
|
||||||
|
|
||||||
|
* pxar: log when skipping mount points
|
||||||
|
|
||||||
|
* ui: show also parent ACLs which affect a datastore in its panel
|
||||||
|
|
||||||
|
* api: node/apt: add versions call
|
||||||
|
|
||||||
|
* ui: make Datastore a selectable panel again. Show a datastore summary
|
||||||
|
list, and provide unfiltered access to all sync and verify jobs.
|
||||||
|
|
||||||
|
* ui: add help tool-button to various paneös
|
||||||
|
|
||||||
|
* ui: set various onlineHelp buttons
|
||||||
|
|
||||||
|
* zfs: mount new zpools created via API under /mnt/datastore/<id>
|
||||||
|
|
||||||
|
* ui: move disks/directory views to its own tab panel
|
||||||
|
|
||||||
|
* fix #3060: continue sync if we cannot aquire the group lock
|
||||||
|
|
||||||
|
* HttpsConnector: include destination on connect errors
|
||||||
|
|
||||||
|
* fix #3060:: improve get_owner error handling
|
||||||
|
|
||||||
|
* remote.cfg: rename userid to 'auth-id'
|
||||||
|
|
||||||
|
* verify: log/warn on invalid owner
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 10 Nov 2020 14:36:13 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.9.7-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* ui: add remote store selector
|
||||||
|
|
||||||
|
* tools/daemon: fix reload with open connections
|
||||||
|
|
||||||
|
* pxar/create: fix endless loop for shrinking files
|
||||||
|
|
||||||
|
* pxar/create: handle ErrorKind::Interrupted for file reads
|
||||||
|
|
||||||
|
* ui: add action-button for changing backup group owner
|
||||||
|
|
||||||
|
* docs: add interactive prune simulator
|
||||||
|
|
||||||
|
* verify: fix unprivileged verification jobs
|
||||||
|
|
||||||
|
* tasks: allow access to job tasks
|
||||||
|
|
||||||
|
* drop internal 'backup@pam' owner, sync jobs need to set a explicit owner
|
||||||
|
|
||||||
|
* api: datastore: allow to set "verify-new" option over API
|
||||||
|
|
||||||
|
* ui: datastore: add Options tab, allowing one to change per-datastore
|
||||||
|
notification and verify-new options
|
||||||
|
|
||||||
|
* docs: scroll navigation bar to current active section
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 09 Nov 2020 07:36:58 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.9.6-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* fix #3106: improve queueing new incoming connections
|
||||||
|
|
||||||
|
* fix #2870: sync: ensure a updated ticket is used, if available
|
||||||
|
|
||||||
|
* proxy: log if there are too many open connections
|
||||||
|
|
||||||
|
* ui: SyncJobEdit: fix sending 'delete' values on SyncJob creation
|
||||||
|
|
||||||
|
* datastore config: allow to configure who receives job notify emails
|
||||||
|
|
||||||
|
* ui: fix task description for log rotate
|
||||||
|
|
||||||
|
* proxy: also rotate auth.log file
|
||||||
|
|
||||||
|
* ui: add shell panel under administration
|
||||||
|
|
||||||
|
* ui: sync jobs: only set default schedule when creating new jobs and some
|
||||||
|
other small fixes
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 04 Nov 2020 19:12:57 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.9.5-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* ui: user menu: allow one to change the language while staying logged in
|
||||||
|
|
||||||
|
* proxmox-backup-manager: add subscription commands
|
||||||
|
|
||||||
|
* server/rest: also accept = as token separator
|
||||||
|
|
||||||
|
* privs: allow reading snapshot notes with Datastore.Audit
|
||||||
|
|
||||||
|
* privs: enforce Datastore.Modify|Backup to set backup notes
|
||||||
|
|
||||||
|
* verify: introduce and use new Datastore.Verify privilege
|
||||||
|
|
||||||
|
* docs: add API tokens to documentation
|
||||||
|
|
||||||
|
* ui: various smaller layout and icon improvements
|
||||||
|
|
||||||
|
* api: implement apt pkg cache for caching pending updates
|
||||||
|
|
||||||
|
* api: apt: implement support to send notification email on new updates
|
||||||
|
|
||||||
|
* add daily update and maintenance task
|
||||||
|
|
||||||
|
* fix #2864: add owner option to sync
|
||||||
|
|
||||||
|
* sync: allow sync for non-superusers under special conditions
|
||||||
|
|
||||||
|
* config: support depreacated netmask when parsing interfaces file
|
||||||
|
|
||||||
|
* server: implement access log rotation with re-open via command socket
|
||||||
|
|
||||||
|
* garbage collect: improve index error messages
|
||||||
|
|
||||||
|
* fix #3039: use the same ID regex for info and api
|
||||||
|
|
||||||
|
* ui: administration: allow extensive filtering of the worker task
|
||||||
|
|
||||||
|
* report: add api endpoint and function to generate report
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 03 Nov 2020 17:41:17 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.9.4-2) unstable; urgency=medium
|
||||||
|
|
||||||
|
* make postinst (update) script more resilient
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 29 Oct 2020 20:09:30 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.9.4-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* implement API-token
|
||||||
|
|
||||||
|
* client/remote: allow using API-token + secret
|
||||||
|
|
||||||
|
* ui/cli: implement API-token management interface and commands
|
||||||
|
|
||||||
|
* ui: add widget to view the effective permissions of a user or token
|
||||||
|
|
||||||
|
* ui: datastore summary: handle error when havin zero snapshot of any type
|
||||||
|
|
||||||
|
* ui: move user, token and permissions into an access control tab panel
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 29 Oct 2020 17:19:13 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.9.3-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* fix #2998: encode mtime as i64 instead of u64
|
||||||
|
|
||||||
|
* GC: log the number of leftover bad chunks we could not yet cleanup, as no
|
||||||
|
valid one replaced them. Also log deduplication factor.
|
||||||
|
|
||||||
|
* send sync job status emails
|
||||||
|
|
||||||
|
* api: datstore status: introduce proper structs and restore compatibility
|
||||||
|
to 0.9.1
|
||||||
|
|
||||||
|
* ui: drop id field from verify/sync add window, they are now seen as internal
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 29 Oct 2020 14:58:13 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (0.9.2-2) unstable; urgency=medium
|
||||||
|
|
||||||
|
* rework server web-interface, move more datastore related panels as tabs
|
||||||
|
inside the datastore view
|
||||||
|
|
||||||
|
* prune: never fail, just warn about failed removals
|
||||||
|
|
||||||
|
* prune/forget: skip snapshots with open readers (restore, verification)
|
||||||
|
|
||||||
|
* datastore: always ensure to remove individual snapshots before their group
|
||||||
|
|
||||||
|
* pxar: fix relative '!' rules in .pxarexclude
|
||||||
|
|
||||||
|
* pxar: anchor pxarexcludes starting with a slash
|
||||||
|
|
||||||
|
* GC: mark phase: ignore vanished index files
|
||||||
|
|
||||||
|
* server/rest: forward real client IP on proxied request and log it in
|
||||||
|
failed authentication requests
|
||||||
|
|
||||||
|
* server: rest: implement max URI path and query length request limits
|
||||||
|
|
||||||
|
* server/rest: implement request access log and log the query part of
|
||||||
|
URL and the user agent
|
||||||
|
|
||||||
|
* api: access: log to separate file, use syslog to errors only to reduce
|
||||||
|
syslog spam
|
||||||
|
|
||||||
|
* client: set HTTP connect timeout to 10 seconds
|
||||||
|
|
||||||
|
* client: sent TCP keep-alive after 2 minutes instead of the Linux default
|
||||||
|
of two hours.
|
||||||
|
|
||||||
|
* CLI completion: fix ACL path completion
|
||||||
|
|
||||||
|
* fix #2988: allow one to enable automatic verification after finishing a
|
||||||
|
snapshot, can be controlled as a per-datastore option
|
||||||
|
|
||||||
|
* various log-rotation improvements
|
||||||
|
|
||||||
|
* proxmox-backup-client: use HumanByte to render snapshot size
|
||||||
|
|
||||||
|
* paperkey: use svg as image format to provide better scalability
|
||||||
|
|
||||||
|
* backup: avoid Transport endpoint is not connected error
|
||||||
|
|
||||||
|
* fix #3038: check user before renewing ticket
|
||||||
|
|
||||||
|
* ui/tools: add zip module and allow to download an archive directory as a zip
|
||||||
|
|
||||||
|
* ui and api: add verification job config, allowing to schedule more
|
||||||
|
flexible jobs, filtering out already and/or recently verified snapshots
|
||||||
|
NOTE: the previous simple "verify all" schedule was dropped from the
|
||||||
|
datastore content, and does *not* gets migrated to the new job config.
|
||||||
|
|
||||||
|
* tasks: use systemd escape to decode/encode the task worker ID, avoiding
|
||||||
|
some display problems with problematic characters
|
||||||
|
|
||||||
|
* fix #2934: list also new to-be-installed packages in updates
|
||||||
|
|
||||||
|
* apt: add /changelog API call similar to PVE
|
||||||
|
|
||||||
|
* api: add world accessible ping dummy endpoint, to cheaply check for a
|
||||||
|
running PBS instance.
|
||||||
|
|
||||||
|
* ui: add datastore summary panel and move Statistics into it
|
||||||
|
|
||||||
|
* ui: navigation: add 'Add Datastore' button below datastore list
|
||||||
|
|
||||||
|
* ui: datastore panel: save and restore selected tab statefully
|
||||||
|
|
||||||
|
* send notification mails to email of root@pam account for GC and verify
|
||||||
|
jobs
|
||||||
|
|
||||||
|
* ui: datastore: use simple V. for verify action button
|
||||||
|
|
||||||
|
* ui: datastore: show snapshot manifest comment and allow to edit them
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 28 Oct 2020 23:05:41 +0100
|
||||||
|
|
||||||
rust-proxmox-backup (0.9.1-1) unstable; urgency=medium
|
rust-proxmox-backup (0.9.1-1) unstable; urgency=medium
|
||||||
|
|
||||||
* TLS speedups (use SslAcceptor::mozilla_intermediate_v5)
|
* TLS speedups (use SslAcceptor::mozilla_intermediate_v5)
|
||||||
@ -16,7 +389,7 @@ rust-proxmox-backup (0.9.1-1) unstable; urgency=medium
|
|||||||
|
|
||||||
* add "Build" section to README.rst
|
* add "Build" section to README.rst
|
||||||
|
|
||||||
* reader: actually allow users to downlod their own backups
|
* reader: actually allow users to download their own backups
|
||||||
|
|
||||||
* reader: track index chunks and limit access
|
* reader: track index chunks and limit access
|
||||||
|
|
||||||
@ -38,7 +411,7 @@ rust-proxmox-backup (0.9.1-1) unstable; urgency=medium
|
|||||||
|
|
||||||
* ui: Dashboard/TaskSummary: add Verifies to the Summary
|
* ui: Dashboard/TaskSummary: add Verifies to the Summary
|
||||||
|
|
||||||
* ui: implment task history limit and make it configurable
|
* ui: implement task history limit and make it configurable
|
||||||
|
|
||||||
* docs: installation: add system requirements section
|
* docs: installation: add system requirements section
|
||||||
|
|
||||||
|
91
debian/control
vendored
@ -7,24 +7,25 @@ Build-Depends: debhelper (>= 11),
|
|||||||
rustc:native,
|
rustc:native,
|
||||||
libstd-rust-dev,
|
libstd-rust-dev,
|
||||||
librust-anyhow-1+default-dev,
|
librust-anyhow-1+default-dev,
|
||||||
librust-apt-pkg-native-0.3+default-dev (>= 0.3.1-~~),
|
librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~),
|
||||||
librust-base64-0.12+default-dev,
|
librust-base64-0.12+default-dev,
|
||||||
librust-bitflags-1+default-dev (>= 1.2.1-~~),
|
librust-bitflags-1+default-dev (>= 1.2.1-~~),
|
||||||
librust-bytes-0.5+default-dev,
|
librust-bytes-1+default-dev,
|
||||||
librust-crc32fast-1+default-dev,
|
librust-crc32fast-1+default-dev,
|
||||||
librust-crossbeam-channel-0.4+default-dev,
|
librust-crossbeam-channel-0.5+default-dev,
|
||||||
librust-endian-trait-0.6+arrays-dev,
|
librust-endian-trait-0.6+arrays-dev,
|
||||||
librust-endian-trait-0.6+default-dev,
|
librust-endian-trait-0.6+default-dev,
|
||||||
librust-futures-0.3+default-dev,
|
librust-futures-0.3+default-dev,
|
||||||
librust-h2-0.2+default-dev,
|
librust-h2-0.3+default-dev,
|
||||||
librust-h2-0.2+stream-dev,
|
librust-h2-0.3+stream-dev,
|
||||||
librust-handlebars-3+default-dev,
|
librust-handlebars-3+default-dev,
|
||||||
librust-http-0.2+default-dev,
|
librust-http-0.2+default-dev,
|
||||||
librust-hyper-0.13+default-dev (>= 0.13.6-~~),
|
librust-hyper-0.14+default-dev,
|
||||||
|
librust-hyper-0.14+full-dev,
|
||||||
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
||||||
librust-libc-0.2+default-dev,
|
librust-libc-0.2+default-dev,
|
||||||
librust-log-0.4+default-dev,
|
librust-log-0.4+default-dev,
|
||||||
librust-nix-0.16+default-dev,
|
librust-nix-0.19+default-dev (>= 0.19.1-~~),
|
||||||
librust-nom-5+default-dev (>= 5.1-~~),
|
librust-nom-5+default-dev (>= 5.1-~~),
|
||||||
librust-num-traits-0.2+default-dev,
|
librust-num-traits-0.2+default-dev,
|
||||||
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
||||||
@ -33,42 +34,42 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-pam-sys-0.5+default-dev,
|
librust-pam-sys-0.5+default-dev,
|
||||||
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
||||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||||
|
librust-pin-project-1+default-dev,
|
||||||
librust-pin-utils-0.1+default-dev,
|
librust-pin-utils-0.1+default-dev,
|
||||||
librust-proxmox-0.4+api-macro-dev (>= 0.4.3-~~),
|
librust-proxmox-0.10+api-macro-dev (>= 0.10.1-~~),
|
||||||
librust-proxmox-0.4+default-dev (>= 0.4.3-~~),
|
librust-proxmox-0.10+default-dev (>= 0.10.1-~~),
|
||||||
librust-proxmox-0.4+sortable-macro-dev (>= 0.4.3-~~),
|
librust-proxmox-0.10+sortable-macro-dev (>= 0.10.1-~~),
|
||||||
librust-proxmox-0.4+websocket-dev (>= 0.4.3-~~),
|
librust-proxmox-0.10+websocket-dev (>= 0.10.1-~~),
|
||||||
librust-proxmox-fuse-0.1+default-dev,
|
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
||||||
librust-pxar-0.6+default-dev (>= 0.6.1-~~),
|
librust-pxar-0.8+default-dev,
|
||||||
librust-pxar-0.6+futures-io-dev (>= 0.6.1-~~),
|
librust-pxar-0.8+tokio-io-dev,
|
||||||
librust-pxar-0.6+tokio-io-dev (>= 0.6.1-~~),
|
|
||||||
librust-regex-1+default-dev (>= 1.2-~~),
|
librust-regex-1+default-dev (>= 1.2-~~),
|
||||||
librust-rustyline-6+default-dev,
|
librust-rustyline-7+default-dev,
|
||||||
librust-serde-1+default-dev,
|
librust-serde-1+default-dev,
|
||||||
librust-serde-1+derive-dev,
|
librust-serde-1+derive-dev,
|
||||||
librust-serde-json-1+default-dev,
|
librust-serde-json-1+default-dev,
|
||||||
librust-siphasher-0.3+default-dev,
|
librust-siphasher-0.3+default-dev,
|
||||||
librust-syslog-4+default-dev,
|
librust-syslog-4+default-dev,
|
||||||
librust-tokio-0.2+blocking-dev (>= 0.2.9-~~),
|
librust-tokio-1+default-dev,
|
||||||
librust-tokio-0.2+default-dev (>= 0.2.9-~~),
|
librust-tokio-1+fs-dev,
|
||||||
librust-tokio-0.2+dns-dev (>= 0.2.9-~~),
|
librust-tokio-1+io-util-dev,
|
||||||
librust-tokio-0.2+fs-dev (>= 0.2.9-~~),
|
librust-tokio-1+macros-dev,
|
||||||
librust-tokio-0.2+io-util-dev (>= 0.2.9-~~),
|
librust-tokio-1+net-dev,
|
||||||
librust-tokio-0.2+macros-dev (>= 0.2.9-~~),
|
librust-tokio-1+parking-lot-dev,
|
||||||
librust-tokio-0.2+process-dev (>= 0.2.9-~~),
|
librust-tokio-1+process-dev,
|
||||||
librust-tokio-0.2+rt-threaded-dev (>= 0.2.9-~~),
|
librust-tokio-1+rt-dev,
|
||||||
librust-tokio-0.2+signal-dev (>= 0.2.9-~~),
|
librust-tokio-1+rt-multi-thread-dev,
|
||||||
librust-tokio-0.2+stream-dev (>= 0.2.9-~~),
|
librust-tokio-1+signal-dev,
|
||||||
librust-tokio-0.2+tcp-dev (>= 0.2.9-~~),
|
librust-tokio-1+time-dev,
|
||||||
librust-tokio-0.2+time-dev (>= 0.2.9-~~),
|
librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~),
|
||||||
librust-tokio-0.2+uds-dev (>= 0.2.9-~~),
|
librust-tokio-stream-0.1+default-dev,
|
||||||
librust-tokio-openssl-0.4+default-dev,
|
librust-tokio-util-0.6+codec-dev,
|
||||||
librust-tokio-util-0.3+codec-dev,
|
librust-tokio-util-0.6+default-dev,
|
||||||
librust-tokio-util-0.3+default-dev,
|
|
||||||
librust-tower-service-0.3+default-dev,
|
librust-tower-service-0.3+default-dev,
|
||||||
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
|
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
|
||||||
librust-url-2+default-dev (>= 2.1-~~),
|
librust-url-2+default-dev (>= 2.1-~~),
|
||||||
librust-walkdir-2+default-dev,
|
librust-walkdir-2+default-dev,
|
||||||
|
librust-webauthn-rs-0.2+default-dev (>= 0.2.5-~~),
|
||||||
librust-xdg-2+default-dev (>= 2.2-~~),
|
librust-xdg-2+default-dev (>= 2.2-~~),
|
||||||
librust-zstd-0.4+bindgen-dev,
|
librust-zstd-0.4+bindgen-dev,
|
||||||
librust-zstd-0.4+default-dev,
|
librust-zstd-0.4+default-dev,
|
||||||
@ -76,43 +77,53 @@ Build-Depends: debhelper (>= 11),
|
|||||||
libfuse3-dev,
|
libfuse3-dev,
|
||||||
libsystemd-dev,
|
libsystemd-dev,
|
||||||
uuid-dev,
|
uuid-dev,
|
||||||
debhelper (>= 12~),
|
libsgutils2-dev,
|
||||||
bash-completion,
|
bash-completion,
|
||||||
pve-eslint,
|
debhelper (>= 12~),
|
||||||
python3-docutils,
|
|
||||||
python3-pygments,
|
|
||||||
rsync,
|
|
||||||
fonts-dejavu-core <!nodoc>,
|
fonts-dejavu-core <!nodoc>,
|
||||||
fonts-lato <!nodoc>,
|
fonts-lato <!nodoc>,
|
||||||
fonts-open-sans <!nodoc>,
|
fonts-open-sans <!nodoc>,
|
||||||
graphviz <!nodoc>,
|
graphviz <!nodoc>,
|
||||||
latexmk <!nodoc>,
|
latexmk <!nodoc>,
|
||||||
|
patchelf,
|
||||||
|
pve-eslint (>= 7.18.0-1),
|
||||||
|
python3-docutils,
|
||||||
|
python3-pygments,
|
||||||
python3-sphinx <!nodoc>,
|
python3-sphinx <!nodoc>,
|
||||||
|
rsync,
|
||||||
texlive-fonts-extra <!nodoc>,
|
texlive-fonts-extra <!nodoc>,
|
||||||
texlive-fonts-recommended <!nodoc>,
|
texlive-fonts-recommended <!nodoc>,
|
||||||
texlive-xetex <!nodoc>,
|
texlive-xetex <!nodoc>,
|
||||||
xindy <!nodoc>
|
xindy <!nodoc>
|
||||||
Maintainer: Proxmox Support Team <support@proxmox.com>
|
Maintainer: Proxmox Support Team <support@proxmox.com>
|
||||||
Standards-Version: 4.4.1
|
Standards-Version: 4.4.1
|
||||||
Vcs-Git:
|
Vcs-Git: git://git.proxmox.com/git/proxmox-backup.git
|
||||||
Vcs-Browser:
|
Vcs-Browser: https://git.proxmox.com/?p=proxmox-backup.git;a=summary
|
||||||
Homepage: https://www.proxmox.com
|
Homepage: https://www.proxmox.com
|
||||||
|
|
||||||
Package: proxmox-backup-server
|
Package: proxmox-backup-server
|
||||||
Architecture: any
|
Architecture: any
|
||||||
Depends: fonts-font-awesome,
|
Depends: fonts-font-awesome,
|
||||||
libjs-extjs (>= 6.0.1),
|
libjs-extjs (>= 6.0.1),
|
||||||
|
libjs-qrcodejs (>= 1.20201119),
|
||||||
|
libsgutils2-2,
|
||||||
libzstd1 (>= 1.3.8),
|
libzstd1 (>= 1.3.8),
|
||||||
lvm2,
|
lvm2,
|
||||||
|
mt-st,
|
||||||
|
mtx,
|
||||||
|
openssh-server,
|
||||||
pbs-i18n,
|
pbs-i18n,
|
||||||
|
postfix | mail-transport-agent,
|
||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.3-1),
|
proxmox-widget-toolkit (>= 2.3-6),
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
|
sg3-utils,
|
||||||
smartmontools,
|
smartmontools,
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
${shlibs:Depends},
|
${shlibs:Depends},
|
||||||
Recommends: zfsutils-linux,
|
Recommends: zfsutils-linux,
|
||||||
|
ifupdown2,
|
||||||
Description: Proxmox Backup Server daemon with tools and GUI
|
Description: Proxmox Backup Server daemon with tools and GUI
|
||||||
This package contains the Proxmox Backup Server daemons and related
|
This package contains the Proxmox Backup Server daemons and related
|
||||||
tools. This includes a web-based graphical user interface.
|
tools. This includes a web-based graphical user interface.
|
||||||
|
10
debian/control.in
vendored
@ -2,17 +2,25 @@ Package: proxmox-backup-server
|
|||||||
Architecture: any
|
Architecture: any
|
||||||
Depends: fonts-font-awesome,
|
Depends: fonts-font-awesome,
|
||||||
libjs-extjs (>= 6.0.1),
|
libjs-extjs (>= 6.0.1),
|
||||||
|
libjs-qrcodejs (>= 1.20201119),
|
||||||
|
libsgutils2-2,
|
||||||
libzstd1 (>= 1.3.8),
|
libzstd1 (>= 1.3.8),
|
||||||
lvm2,
|
lvm2,
|
||||||
|
mt-st,
|
||||||
|
mtx,
|
||||||
|
openssh-server,
|
||||||
pbs-i18n,
|
pbs-i18n,
|
||||||
|
postfix | mail-transport-agent,
|
||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.3-1),
|
proxmox-widget-toolkit (>= 2.3-6),
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
|
sg3-utils,
|
||||||
smartmontools,
|
smartmontools,
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
${shlibs:Depends},
|
${shlibs:Depends},
|
||||||
Recommends: zfsutils-linux,
|
Recommends: zfsutils-linux,
|
||||||
|
ifupdown2,
|
||||||
Description: Proxmox Backup Server daemon with tools and GUI
|
Description: Proxmox Backup Server daemon with tools and GUI
|
||||||
This package contains the Proxmox Backup Server daemons and related
|
This package contains the Proxmox Backup Server daemons and related
|
||||||
tools. This includes a web-based graphical user interface.
|
tools. This includes a web-based graphical user interface.
|
||||||
|
22
debian/debcargo.toml
vendored
@ -2,33 +2,32 @@ overlay = "."
|
|||||||
crate_src_path = ".."
|
crate_src_path = ".."
|
||||||
whitelist = ["tests/*.c"]
|
whitelist = ["tests/*.c"]
|
||||||
|
|
||||||
# needed for pinutils alpha
|
maintainer = "Proxmox Support Team <support@proxmox.com>"
|
||||||
allow_prerelease_deps = true
|
|
||||||
|
|
||||||
[source]
|
[source]
|
||||||
# TODO: update once public
|
vcs_git = "git://git.proxmox.com/git/proxmox-backup.git"
|
||||||
vcs_git = ""
|
vcs_browser = "https://git.proxmox.com/?p=proxmox-backup.git;a=summary"
|
||||||
vcs_browser = ""
|
|
||||||
maintainer = "Proxmox Support Team <support@proxmox.com>"
|
|
||||||
section = "admin"
|
section = "admin"
|
||||||
build_depends = [
|
build_depends = [
|
||||||
"debhelper (>= 12~)",
|
|
||||||
"bash-completion",
|
"bash-completion",
|
||||||
"pve-eslint",
|
"debhelper (>= 12~)",
|
||||||
"python3-docutils",
|
|
||||||
"python3-pygments",
|
|
||||||
"rsync",
|
|
||||||
"fonts-dejavu-core <!nodoc>",
|
"fonts-dejavu-core <!nodoc>",
|
||||||
"fonts-lato <!nodoc>",
|
"fonts-lato <!nodoc>",
|
||||||
"fonts-open-sans <!nodoc>",
|
"fonts-open-sans <!nodoc>",
|
||||||
"graphviz <!nodoc>",
|
"graphviz <!nodoc>",
|
||||||
"latexmk <!nodoc>",
|
"latexmk <!nodoc>",
|
||||||
|
"patchelf",
|
||||||
|
"pve-eslint (>= 7.18.0-1)",
|
||||||
|
"python3-docutils",
|
||||||
|
"python3-pygments",
|
||||||
"python3-sphinx <!nodoc>",
|
"python3-sphinx <!nodoc>",
|
||||||
|
"rsync",
|
||||||
"texlive-fonts-extra <!nodoc>",
|
"texlive-fonts-extra <!nodoc>",
|
||||||
"texlive-fonts-recommended <!nodoc>",
|
"texlive-fonts-recommended <!nodoc>",
|
||||||
"texlive-xetex <!nodoc>",
|
"texlive-xetex <!nodoc>",
|
||||||
"xindy <!nodoc>",
|
"xindy <!nodoc>",
|
||||||
]
|
]
|
||||||
|
|
||||||
build_depends_excludes = [
|
build_depends_excludes = [
|
||||||
"debhelper (>=11)",
|
"debhelper (>=11)",
|
||||||
]
|
]
|
||||||
@ -39,4 +38,5 @@ depends = [
|
|||||||
"libfuse3-dev",
|
"libfuse3-dev",
|
||||||
"libsystemd-dev",
|
"libsystemd-dev",
|
||||||
"uuid-dev",
|
"uuid-dev",
|
||||||
|
"libsgutils2-dev",
|
||||||
]
|
]
|
||||||
|
2
debian/lintian-overrides
vendored
@ -1,2 +1,2 @@
|
|||||||
proxmox-backup-server: package-installs-apt-sources etc/apt/sources.list.d/pbstest-beta.list
|
proxmox-backup-server: package-installs-apt-sources etc/apt/sources.list.d/pbs-enterprise.list
|
||||||
proxmox-backup-server: systemd-service-file-refers-to-unusual-wantedby-target lib/systemd/system/proxmox-backup-banner.service getty.target
|
proxmox-backup-server: systemd-service-file-refers-to-unusual-wantedby-target lib/systemd/system/proxmox-backup-banner.service getty.target
|
||||||
|
3
debian/pmtx.bc
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# pmtx bash completion
|
||||||
|
|
||||||
|
complete -C 'pmtx bashcomplete' pmtx
|
28
debian/postinst
vendored
@ -6,6 +6,9 @@ set -e
|
|||||||
|
|
||||||
case "$1" in
|
case "$1" in
|
||||||
configure)
|
configure)
|
||||||
|
# need to have user backup in the tapoe group
|
||||||
|
usermod -a -G tape backup
|
||||||
|
|
||||||
# modeled after dh_systemd_start output
|
# modeled after dh_systemd_start output
|
||||||
systemctl --system daemon-reload >/dev/null || true
|
systemctl --system daemon-reload >/dev/null || true
|
||||||
if [ -n "$2" ]; then
|
if [ -n "$2" ]; then
|
||||||
@ -15,10 +18,33 @@ case "$1" in
|
|||||||
fi
|
fi
|
||||||
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
||||||
|
|
||||||
|
# FIXME: Remove with 1.1
|
||||||
|
if test -n "$2"; then
|
||||||
|
if dpkg --compare-versions "$2" 'lt' '0.9.4-1'; then
|
||||||
|
if grep -s -q -P -e '^\s+verify-schedule ' /etc/proxmox-backup/datastore.cfg; then
|
||||||
|
echo "NOTE: drop all verify schedules from datastore config."
|
||||||
|
echo "You can now add more flexible verify jobs"
|
||||||
|
flock -w 30 /etc/proxmox-backup/.datastore.lck \
|
||||||
|
sed -i '/^\s\+verify-schedule /d' /etc/proxmox-backup/datastore.cfg || true
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
if dpkg --compare-versions "$2" 'le' '0.9.5-1'; then
|
||||||
|
chown --quiet backup:backup /var/log/proxmox-backup/api/auth.log || true
|
||||||
|
fi
|
||||||
|
if dpkg --compare-versions "$2" 'le' '0.9.7-1'; then
|
||||||
|
if [ -e /etc/proxmox-backup/remote.cfg ]; then
|
||||||
|
echo "NOTE: Switching over remote.cfg to new field names.."
|
||||||
|
flock -w 30 /etc/proxmox-backup/.remote.lck \
|
||||||
|
sed -i \
|
||||||
|
-e 's/^\s\+userid /\tauth-id /g' \
|
||||||
|
/etc/proxmox-backup/remote.cfg || true
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
||||||
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
||||||
echo "Fixing up termproxy user id in task log..."
|
echo "Fixing up termproxy user id in task log..."
|
||||||
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active
|
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
||||||
|
3
debian/prerm
vendored
@ -6,5 +6,6 @@ set -e
|
|||||||
|
|
||||||
# modeled after dh_systemd_start output
|
# modeled after dh_systemd_start output
|
||||||
if [ -d /run/systemd/system ] && [ "$1" = remove ]; then
|
if [ -d /run/systemd/system ] && [ "$1" = remove ]; then
|
||||||
deb-systemd-invoke stop 'proxmox-backup-banner.service' 'proxmox-backup-proxy.service' 'proxmox-backup.service' >/dev/null || true
|
deb-systemd-invoke stop 'proxmox-backup-banner.service' 'proxmox-backup-proxy.service' \
|
||||||
|
'proxmox-backup.service' 'proxmox-backup-daily-update.timer' >/dev/null || true
|
||||||
fi
|
fi
|
||||||
|
2
debian/proxmox-backup-docs.links
vendored
@ -1 +1,3 @@
|
|||||||
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
|
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
|
||||||
|
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/prune-simulator/extjs
|
||||||
|
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/lto-barcode/extjs
|
||||||
|
1
debian/proxmox-backup-server.bash-completion
vendored
@ -1 +1,2 @@
|
|||||||
debian/proxmox-backup-manager.bc proxmox-backup-manager
|
debian/proxmox-backup-manager.bc proxmox-backup-manager
|
||||||
|
debian/pmtx.bc pmtx
|
||||||
|
12
debian/proxmox-backup-server.install
vendored
@ -1,16 +1,22 @@
|
|||||||
etc/proxmox-backup-proxy.service /lib/systemd/system/
|
etc/proxmox-backup-proxy.service /lib/systemd/system/
|
||||||
etc/proxmox-backup.service /lib/systemd/system/
|
etc/proxmox-backup.service /lib/systemd/system/
|
||||||
etc/proxmox-backup-banner.service /lib/systemd/system/
|
etc/proxmox-backup-banner.service /lib/systemd/system/
|
||||||
etc/pbstest-beta.list /etc/apt/sources.list.d/
|
etc/proxmox-backup-daily-update.service /lib/systemd/system/
|
||||||
|
etc/proxmox-backup-daily-update.timer /lib/systemd/system/
|
||||||
|
etc/pbs-enterprise.list /etc/apt/sources.list.d/
|
||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api
|
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api
|
||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
|
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
|
||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
||||||
|
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-daily-update
|
||||||
|
usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd
|
||||||
usr/sbin/proxmox-backup-manager
|
usr/sbin/proxmox-backup-manager
|
||||||
|
usr/bin/pmtx
|
||||||
usr/share/javascript/proxmox-backup/index.hbs
|
usr/share/javascript/proxmox-backup/index.hbs
|
||||||
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
|
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
|
||||||
usr/share/javascript/proxmox-backup/images/logo-128.png
|
usr/share/javascript/proxmox-backup/images
|
||||||
usr/share/javascript/proxmox-backup/images/proxmox_logo.png
|
|
||||||
usr/share/javascript/proxmox-backup/js/proxmox-backup-gui.js
|
usr/share/javascript/proxmox-backup/js/proxmox-backup-gui.js
|
||||||
usr/share/man/man1/proxmox-backup-manager.1
|
usr/share/man/man1/proxmox-backup-manager.1
|
||||||
usr/share/man/man1/proxmox-backup-proxy.1
|
usr/share/man/man1/proxmox-backup-proxy.1
|
||||||
|
usr/share/man/man1/pmtx.1
|
||||||
usr/share/zsh/vendor-completions/_proxmox-backup-manager
|
usr/share/zsh/vendor-completions/_proxmox-backup-manager
|
||||||
|
usr/share/zsh/vendor-completions/_pmtx
|
||||||
|
1
debian/proxmox-backup-server.maintscript
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
rm_conffile /etc/apt/sources.list.d/pbstest-beta.list 1.0.0~ proxmox-backup-server
|
11
debian/rules
vendored
@ -38,13 +38,24 @@ override_dh_auto_install:
|
|||||||
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
|
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
|
||||||
|
|
||||||
override_dh_installsystemd:
|
override_dh_installsystemd:
|
||||||
|
dh_installsystemd -pproxmox-backup-server proxmox-backup-daily-update.timer
|
||||||
# note: we start/try-reload-restart services manually in postinst
|
# note: we start/try-reload-restart services manually in postinst
|
||||||
dh_installsystemd --no-start --no-restart-after-upgrade
|
dh_installsystemd --no-start --no-restart-after-upgrade
|
||||||
|
|
||||||
|
override_dh_fixperms:
|
||||||
|
dh_fixperms --exclude sg-tape-cmd
|
||||||
|
|
||||||
# workaround https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=933541
|
# workaround https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=933541
|
||||||
# TODO: remove once available (Debian 11 ?)
|
# TODO: remove once available (Debian 11 ?)
|
||||||
override_dh_dwz:
|
override_dh_dwz:
|
||||||
dh_dwz --no-dwz-multifile
|
dh_dwz --no-dwz-multifile
|
||||||
|
|
||||||
|
override_dh_strip:
|
||||||
|
dh_strip
|
||||||
|
for exe in $$(find debian/proxmox-backup-client/usr \
|
||||||
|
debian/proxmox-backup-server/usr -executable -type f); do \
|
||||||
|
debian/scripts/elf-strip-unused-dependencies.sh "$$exe" || true; \
|
||||||
|
done
|
||||||
|
|
||||||
override_dh_compress:
|
override_dh_compress:
|
||||||
dh_compress -X.pdf
|
dh_compress -X.pdf
|
||||||
|
20
debian/scripts/elf-strip-unused-dependencies.sh
vendored
Executable file
@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
binary=$1
|
||||||
|
|
||||||
|
exec 3< <(ldd -u "$binary" | grep -oP '[^/:]+$')
|
||||||
|
|
||||||
|
patchargs=""
|
||||||
|
dropped=""
|
||||||
|
while read -r dep; do
|
||||||
|
dropped="$dep $dropped"
|
||||||
|
patchargs="--remove-needed $dep $patchargs"
|
||||||
|
done <&3
|
||||||
|
exec 3<&-
|
||||||
|
|
||||||
|
if [[ $dropped == "" ]]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "patchelf '$binary' - removing unused dependencies:\n $dropped"
|
||||||
|
patchelf $patchargs $binary
|
@ -5,15 +5,35 @@ GENERATED_SYNOPSIS := \
|
|||||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||||
proxmox-backup-manager/synopsis.rst \
|
proxmox-backup-manager/synopsis.rst \
|
||||||
pxar/synopsis.rst \
|
pxar/synopsis.rst \
|
||||||
|
pmtx/synopsis.rst \
|
||||||
backup-protocol-api.rst \
|
backup-protocol-api.rst \
|
||||||
reader-protocol-api.rst
|
reader-protocol-api.rst
|
||||||
|
|
||||||
MANUAL_PAGES := \
|
MANUAL_PAGES := \
|
||||||
pxar.1 \
|
pxar.1 \
|
||||||
|
pmtx.1 \
|
||||||
proxmox-backup-proxy.1 \
|
proxmox-backup-proxy.1 \
|
||||||
proxmox-backup-client.1 \
|
proxmox-backup-client.1 \
|
||||||
proxmox-backup-manager.1
|
proxmox-backup-manager.1
|
||||||
|
|
||||||
|
PRUNE_SIMULATOR_FILES := \
|
||||||
|
prune-simulator/index.html \
|
||||||
|
prune-simulator/documentation.html \
|
||||||
|
prune-simulator/clear-trigger.png \
|
||||||
|
prune-simulator/prune-simulator.js
|
||||||
|
|
||||||
|
LTO_BARCODE_FILES := \
|
||||||
|
lto-barcode/index.html \
|
||||||
|
lto-barcode/code39.js \
|
||||||
|
lto-barcode/prefix-field.js \
|
||||||
|
lto-barcode/label-style.js \
|
||||||
|
lto-barcode/tape-type.js \
|
||||||
|
lto-barcode/paper-size.js \
|
||||||
|
lto-barcode/page-layout.js \
|
||||||
|
lto-barcode/page-calibration.js \
|
||||||
|
lto-barcode/label-list.js \
|
||||||
|
lto-barcode/label-setup.js \
|
||||||
|
lto-barcode/lto-barcode.js
|
||||||
|
|
||||||
# Sphinx documentation setup
|
# Sphinx documentation setup
|
||||||
SPHINXOPTS =
|
SPHINXOPTS =
|
||||||
@ -49,6 +69,14 @@ pxar/synopsis.rst: ${COMPILEDIR}/pxar
|
|||||||
pxar.1: pxar/man1.rst pxar/description.rst pxar/synopsis.rst
|
pxar.1: pxar/man1.rst pxar/description.rst pxar/synopsis.rst
|
||||||
rst2man $< >$@
|
rst2man $< >$@
|
||||||
|
|
||||||
|
|
||||||
|
pmtx/synopsis.rst: ${COMPILEDIR}/pmtx
|
||||||
|
${COMPILEDIR}/pmtx printdoc > pmtx/synopsis.rst
|
||||||
|
|
||||||
|
pmtx.1: pmtx/man1.rst pmtx/description.rst pmtx/synopsis.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
|
|
||||||
proxmox-backup-client/synopsis.rst: ${COMPILEDIR}/proxmox-backup-client
|
proxmox-backup-client/synopsis.rst: ${COMPILEDIR}/proxmox-backup-client
|
||||||
${COMPILEDIR}/proxmox-backup-client printdoc > proxmox-backup-client/synopsis.rst
|
${COMPILEDIR}/proxmox-backup-client printdoc > proxmox-backup-client/synopsis.rst
|
||||||
|
|
||||||
@ -74,10 +102,13 @@ onlinehelpinfo:
|
|||||||
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
|
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
|
||||||
|
|
||||||
.PHONY: html
|
.PHONY: html
|
||||||
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py
|
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES}
|
||||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||||
cp images/proxmox-logo.svg $(BUILDDIR)/html/_static/
|
install -m 0644 custom.js custom.css images/proxmox-logo.svg $(BUILDDIR)/html/_static/
|
||||||
cp custom.css $(BUILDDIR)/html/_static/
|
install -dm 0755 $(BUILDDIR)/html/prune-simulator
|
||||||
|
install -m 0644 ${PRUNE_SIMULATOR_FILES} $(BUILDDIR)/html/prune-simulator
|
||||||
|
install -dm 0755 $(BUILDDIR)/html/lto-barcode
|
||||||
|
install -m 0644 ${LTO_BARCODE_FILES} $(BUILDDIR)/html/lto-barcode
|
||||||
@echo
|
@echo
|
||||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ def scan_extjs_files(wwwdir="../www"): # a bit rough i know, but we can optimize
|
|||||||
js_files.append(os.path.join(root, filename))
|
js_files.append(os.path.join(root, filename))
|
||||||
for js_file in js_files:
|
for js_file in js_files:
|
||||||
fd = open(js_file).read()
|
fd = open(js_file).read()
|
||||||
allmatch = re.findall("onlineHelp:\s*[\'\"](.*?)[\'\"]", fd, re.M)
|
allmatch = re.findall("(?:onlineHelp:|get_help_tool\s*\()\s*[\'\"](.*?)[\'\"]", fd, re.M)
|
||||||
for match in allmatch:
|
for match in allmatch:
|
||||||
anchor = match
|
anchor = match
|
||||||
anchor = re.sub('_', '-', anchor) # normalize labels
|
anchor = re.sub('_', '-', anchor) # normalize labels
|
||||||
@ -73,7 +73,9 @@ class ReflabelMapper(Builder):
|
|||||||
'link': '/docs/index.html',
|
'link': '/docs/index.html',
|
||||||
'title': 'Proxmox Backup Server Documentation Index',
|
'title': 'Proxmox Backup Server Documentation Index',
|
||||||
}
|
}
|
||||||
self.env.used_anchors = scan_extjs_files()
|
# Disabled until we find a sensible way to scan proxmox-widget-toolkit
|
||||||
|
# as well
|
||||||
|
#self.env.used_anchors = scan_extjs_files()
|
||||||
|
|
||||||
if not os.path.isdir(self.outdir):
|
if not os.path.isdir(self.outdir):
|
||||||
os.mkdir(self.outdir)
|
os.mkdir(self.outdir)
|
||||||
@ -93,6 +95,9 @@ class ReflabelMapper(Builder):
|
|||||||
logger.info('traversing section {}'.format(title.astext()))
|
logger.info('traversing section {}'.format(title.astext()))
|
||||||
ref_name = getattr(title, 'rawsource', title.astext())
|
ref_name = getattr(title, 'rawsource', title.astext())
|
||||||
|
|
||||||
|
if (ref_name[:7] == ':term:`'):
|
||||||
|
ref_name = ref_name[7:-1]
|
||||||
|
|
||||||
self.env.online_help[labelid] = {'link': '', 'title': ''}
|
self.env.online_help[labelid] = {'link': '', 'title': ''}
|
||||||
self.env.online_help[labelid]['link'] = "/docs/" + os.path.basename(filename_html) + "#{}".format(labelid)
|
self.env.online_help[labelid]['link'] = "/docs/" + os.path.basename(filename_html) + "#{}".format(labelid)
|
||||||
self.env.online_help[labelid]['title'] = ref_name
|
self.env.online_help[labelid]['title'] = ref_name
|
||||||
@ -112,15 +117,18 @@ class ReflabelMapper(Builder):
|
|||||||
def validate_anchors(self):
|
def validate_anchors(self):
|
||||||
#pprint(self.env.online_help)
|
#pprint(self.env.online_help)
|
||||||
to_remove = []
|
to_remove = []
|
||||||
for anchor in self.env.used_anchors:
|
|
||||||
if anchor not in self.env.online_help:
|
# Disabled until we find a sensible way to scan proxmox-widget-toolkit
|
||||||
logger.info("[-] anchor {} is missing from onlinehelp!".format(anchor))
|
# as well
|
||||||
for anchor in self.env.online_help:
|
#for anchor in self.env.used_anchors:
|
||||||
if anchor not in self.env.used_anchors and anchor != 'pbs_documentation_index':
|
# if anchor not in self.env.online_help:
|
||||||
logger.info("[*] anchor {} not used! deleting...".format(anchor))
|
# logger.info("[-] anchor {} is missing from onlinehelp!".format(anchor))
|
||||||
to_remove.append(anchor)
|
#for anchor in self.env.online_help:
|
||||||
for anchor in to_remove:
|
# if anchor not in self.env.used_anchors and anchor != 'pbs_documentation_index':
|
||||||
self.env.online_help.pop(anchor, None)
|
# logger.info("[*] anchor {} not used! deleting...".format(anchor))
|
||||||
|
# to_remove.append(anchor)
|
||||||
|
#for anchor in to_remove:
|
||||||
|
# self.env.online_help.pop(anchor, None)
|
||||||
return
|
return
|
||||||
|
|
||||||
def finish(self):
|
def finish(self):
|
||||||
|
@ -18,25 +18,25 @@ the default is the local host (``localhost``).
|
|||||||
You can specify a port if your backup server is only reachable on a different
|
You can specify a port if your backup server is only reachable on a different
|
||||||
port (e.g. with NAT and port forwarding).
|
port (e.g. with NAT and port forwarding).
|
||||||
|
|
||||||
Note that if the server is an IPv6 address, you have to write it with
|
Note that if the server is an IPv6 address, you have to write it with square
|
||||||
square brackets (e.g. [fe80::01]).
|
brackets (for example, `[fe80::01]`).
|
||||||
|
|
||||||
You can pass the repository with the ``--repository`` command
|
You can pass the repository with the ``--repository`` command line option, or
|
||||||
line option, or by setting the ``PBS_REPOSITORY`` environment
|
by setting the ``PBS_REPOSITORY`` environment variable.
|
||||||
variable.
|
|
||||||
|
|
||||||
Here some examples of valid repositories and the real values
|
Here some examples of valid repositories and the real values
|
||||||
|
|
||||||
================================ ============ ================== ===========
|
================================ ================== ================== ===========
|
||||||
Example User Host:Port Datastore
|
Example User Host:Port Datastore
|
||||||
================================ ============ ================== ===========
|
================================ ================== ================== ===========
|
||||||
mydatastore ``root@pam`` localhost:8007 mydatastore
|
mydatastore ``root@pam`` localhost:8007 mydatastore
|
||||||
myhostname:mydatastore ``root@pam`` myhostname:8007 mydatastore
|
myhostname:mydatastore ``root@pam`` myhostname:8007 mydatastore
|
||||||
user@pbs@myhostname:mydatastore ``user@pbs`` myhostname:8007 mydatastore
|
user@pbs@myhostname:mydatastore ``user@pbs`` myhostname:8007 mydatastore
|
||||||
|
user\@pbs!token@host:store ``user@pbs!token`` myhostname:8007 mydatastore
|
||||||
192.168.55.55:1234:mydatastore ``root@pam`` 192.168.55.55:1234 mydatastore
|
192.168.55.55:1234:mydatastore ``root@pam`` 192.168.55.55:1234 mydatastore
|
||||||
[ff80::51]:mydatastore ``root@pam`` [ff80::51]:8007 mydatastore
|
[ff80::51]:mydatastore ``root@pam`` [ff80::51]:8007 mydatastore
|
||||||
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
|
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
|
||||||
================================ ============ ================== ===========
|
================================ ================== ================== ===========
|
||||||
|
|
||||||
Environment Variables
|
Environment Variables
|
||||||
---------------------
|
---------------------
|
||||||
@ -45,16 +45,16 @@ Environment Variables
|
|||||||
The default backup repository.
|
The default backup repository.
|
||||||
|
|
||||||
``PBS_PASSWORD``
|
``PBS_PASSWORD``
|
||||||
When set, this value is used for the password required for the
|
When set, this value is used for the password required for the backup server.
|
||||||
backup server.
|
You can also set this to a API token secret.
|
||||||
|
|
||||||
``PBS_ENCRYPTION_PASSWORD``
|
``PBS_ENCRYPTION_PASSWORD``
|
||||||
When set, this value is used to access the secret encryption key (if
|
When set, this value is used to access the secret encryption key (if
|
||||||
protected by password).
|
protected by password).
|
||||||
|
|
||||||
``PBS_FINGERPRINT`` When set, this value is used to verify the server
|
``PBS_FINGERPRINT`` When set, this value is used to verify the server
|
||||||
certificate (only used if the system CA certificates cannot
|
certificate (only used if the system CA certificates cannot validate the
|
||||||
validate the certificate).
|
certificate).
|
||||||
|
|
||||||
|
|
||||||
Output Format
|
Output Format
|
||||||
@ -246,6 +246,8 @@ Restoring this backup will result in:
|
|||||||
. .. file2
|
. .. file2
|
||||||
|
|
||||||
|
|
||||||
|
.. _encryption:
|
||||||
|
|
||||||
Encryption
|
Encryption
|
||||||
----------
|
----------
|
||||||
|
|
||||||
@ -351,8 +353,10 @@ To set up a master key:
|
|||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out /path/to/target
|
# proxmox-backup-client key import-with-master-key /path/to/target --master-keyfile /path/to/master-private.pem --encrypted-keyfile /path/to/rsa-encrypted.key
|
||||||
Enter pass phrase for ./master-private.pem: *********
|
Master Key Password: ******
|
||||||
|
New Password: ******
|
||||||
|
Verify Password: ******
|
||||||
|
|
||||||
7. The target file will now contain the encryption key information in plain
|
7. The target file will now contain the encryption key information in plain
|
||||||
text. The success of this can be confirmed by passing the resulting ``json``
|
text. The success of this can be confirmed by passing the resulting ``json``
|
||||||
@ -363,9 +367,22 @@ To set up a master key:
|
|||||||
backed up. It can happen, for example, that you back up an entire system, using
|
backed up. It can happen, for example, that you back up an entire system, using
|
||||||
a key on that system. If the system then becomes inaccessible for any reason
|
a key on that system. If the system then becomes inaccessible for any reason
|
||||||
and needs to be restored, this will not be possible as the encryption key will be
|
and needs to be restored, this will not be possible as the encryption key will be
|
||||||
lost along with the broken system. In preparation for the worst case scenario,
|
lost along with the broken system.
|
||||||
you should consider keeping a paper copy of this key locked away in
|
|
||||||
a safe place.
|
It is recommended that you keep your master key safe, but easily accessible, in
|
||||||
|
order for quick disaster recovery. For this reason, the best place to store it
|
||||||
|
is in your password manager, where it is immediately recoverable. As a backup to
|
||||||
|
this, you should also save the key to a USB drive and store that in a secure
|
||||||
|
place. This way, it is detached from any system, but is still easy to recover
|
||||||
|
from, in case of emergency. Finally, in preparation for the worst case scenario,
|
||||||
|
you should also consider keeping a paper copy of your master key locked away in
|
||||||
|
a safe place. The ``paperkey`` subcommand can be used to create a QR encoded
|
||||||
|
version of your master key. The following command sends the output of the
|
||||||
|
``paperkey`` command to a text file, for easy printing.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
proxmox-backup-client key paperkey --output-format text > qrkey.txt
|
||||||
|
|
||||||
|
|
||||||
Restoring Data
|
Restoring Data
|
||||||
@ -377,11 +394,11 @@ periodic recovery tests to ensure that you can access the data in
|
|||||||
case of problems.
|
case of problems.
|
||||||
|
|
||||||
First, you need to find the snapshot which you want to restore. The snapshot
|
First, you need to find the snapshot which you want to restore. The snapshot
|
||||||
command provides a list of all the snapshots on the server:
|
list command provides a list of all the snapshots on the server:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-client snapshots
|
# proxmox-backup-client snapshot list
|
||||||
┌────────────────────────────────┬─────────────┬────────────────────────────────────┐
|
┌────────────────────────────────┬─────────────┬────────────────────────────────────┐
|
||||||
│ snapshot │ size │ files │
|
│ snapshot │ size │ files │
|
||||||
╞════════════════════════════════╪═════════════╪════════════════════════════════════╡
|
╞════════════════════════════════╪═════════════╪════════════════════════════════════╡
|
||||||
@ -533,6 +550,29 @@ To remove the ticket, issue a logout:
|
|||||||
# proxmox-backup-client logout
|
# proxmox-backup-client logout
|
||||||
|
|
||||||
|
|
||||||
|
.. _changing-backup-owner:
|
||||||
|
|
||||||
|
Changing the Owner of a Backup Group
|
||||||
|
------------------------------------
|
||||||
|
|
||||||
|
By default, the owner of a backup group is the user which was used to originally
|
||||||
|
create that backup group (or in the case of sync jobs, ``root@pam``). This
|
||||||
|
means that if a user ``mike@pbs`` created a backup, another user ``john@pbs``
|
||||||
|
can not be used to create backups in that same backup group. In case you want
|
||||||
|
to change the owner of a backup, you can do so with the below command, using a
|
||||||
|
user that has ``Datastore.Modify`` privileges on the datastore.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client change-owner vm/103 john@pbs
|
||||||
|
|
||||||
|
This can also be done from within the web interface, by navigating to the
|
||||||
|
`Content` section of the datastore that contains the backup group and
|
||||||
|
selecting the user icon under the `Actions` column. Common cases for this could
|
||||||
|
be to change the owner of a sync job from ``root@pam``, or to repurpose a
|
||||||
|
backup group.
|
||||||
|
|
||||||
|
|
||||||
.. _backup-pruning:
|
.. _backup-pruning:
|
||||||
|
|
||||||
Pruning and Removing Backups
|
Pruning and Removing Backups
|
||||||
@ -543,7 +583,7 @@ command:
|
|||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-client forget <snapshot>
|
# proxmox-backup-client snapshot forget <snapshot>
|
||||||
|
|
||||||
|
|
||||||
.. caution:: This command removes all archives in this backup
|
.. caution:: This command removes all archives in this backup
|
||||||
|
@ -171,6 +171,8 @@ html_theme_options = {
|
|||||||
'extra_nav_links': {
|
'extra_nav_links': {
|
||||||
'Proxmox Homepage': 'https://proxmox.com',
|
'Proxmox Homepage': 'https://proxmox.com',
|
||||||
'PDF': 'proxmox-backup.pdf',
|
'PDF': 'proxmox-backup.pdf',
|
||||||
|
'Prune Simulator' : 'prune-simulator/index.html',
|
||||||
|
'LTO Barcode Generator' : 'lto-barcode/index.html',
|
||||||
},
|
},
|
||||||
|
|
||||||
'sidebar_width': '320px',
|
'sidebar_width': '320px',
|
||||||
@ -228,6 +230,10 @@ html_favicon = 'images/favicon.ico'
|
|||||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||||
html_static_path = ['_static']
|
html_static_path = ['_static']
|
||||||
|
|
||||||
|
html_js_files = [
|
||||||
|
'custom.js',
|
||||||
|
]
|
||||||
|
|
||||||
# Add any extra paths that contain custom files (such as robots.txt or
|
# Add any extra paths that contain custom files (such as robots.txt or
|
||||||
# .htaccess) here, relative to this directory. These files are copied
|
# .htaccess) here, relative to this directory. These files are copied
|
||||||
# directly to the root of the documentation.
|
# directly to the root of the documentation.
|
||||||
|
7
docs/custom.js
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
window.addEventListener('DOMContentLoaded', (event) => {
|
||||||
|
let activeSection = document.querySelector("a.current");
|
||||||
|
if (activeSection) {
|
||||||
|
// https://developer.mozilla.org/en-US/docs/Web/API/Element/scrollIntoView
|
||||||
|
activeSection.scrollIntoView({ block: 'center' });
|
||||||
|
}
|
||||||
|
});
|
11
docs/faq.rst
@ -27,7 +27,7 @@ How long will my Proxmox Backup Server version be supported?
|
|||||||
+-----------------------+--------------------+---------------+------------+--------------------+
|
+-----------------------+--------------------+---------------+------------+--------------------+
|
||||||
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
|
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
|
||||||
+=======================+====================+===============+============+====================+
|
+=======================+====================+===============+============+====================+
|
||||||
|Proxmox Backup 1.x | Debian 10 (Buster) | tba | tba | tba |
|
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | tba | tba |
|
||||||
+-----------------------+--------------------+---------------+------------+--------------------+
|
+-----------------------+--------------------+---------------+------------+--------------------+
|
||||||
|
|
||||||
|
|
||||||
@ -53,9 +53,12 @@ checksums. This manifest file is used to verify the integrity of each backup.
|
|||||||
When backing up to remote servers, do I have to trust the remote server?
|
When backing up to remote servers, do I have to trust the remote server?
|
||||||
------------------------------------------------------------------------
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
Proxmox Backup Server supports client-side encryption, meaning your data is
|
Proxmox Backup Server transfers data via `Transport Layer Security (TLS)
|
||||||
encrypted before it reaches the server. Thus, in the event that an attacker
|
<https://en.wikipedia.org/wiki/Transport_Layer_Security>`_ and additionally
|
||||||
gains access to the server, they will not be able to read the data.
|
supports client-side encryption. This means that data is transferred securely
|
||||||
|
and can be encrypted before it reaches the server. Thus, in the event that an
|
||||||
|
attacker gains access to the server or any point of the network, they will not
|
||||||
|
be able to read the data.
|
||||||
|
|
||||||
.. note:: Encryption is not enabled by default. To set up encryption, see the
|
.. note:: Encryption is not enabled by default. To set up encryption, see the
|
||||||
`Encryption
|
`Encryption
|
||||||
|
35
docs/gui.rst
@ -4,7 +4,7 @@ Graphical User Interface
|
|||||||
Proxmox Backup Server offers an integrated, web-based interface to manage the
|
Proxmox Backup Server offers an integrated, web-based interface to manage the
|
||||||
server. This means that you can carry out all administration tasks through your
|
server. This means that you can carry out all administration tasks through your
|
||||||
web browser, and that you don't have to worry about installing extra management
|
web browser, and that you don't have to worry about installing extra management
|
||||||
tools. The web interface also provides a built in console, so if you prefer the
|
tools. The web interface also provides a built-in console, so if you prefer the
|
||||||
command line or need some extra control, you have this option.
|
command line or need some extra control, you have this option.
|
||||||
|
|
||||||
The web interface can be accessed via https://youripaddress:8007. The default
|
The web interface can be accessed via https://youripaddress:8007. The default
|
||||||
@ -28,7 +28,6 @@ Login
|
|||||||
-----
|
-----
|
||||||
|
|
||||||
.. image:: images/screenshots/pbs-gui-login-window.png
|
.. image:: images/screenshots/pbs-gui-login-window.png
|
||||||
:width: 250
|
|
||||||
:align: right
|
:align: right
|
||||||
:alt: PBS login window
|
:alt: PBS login window
|
||||||
|
|
||||||
@ -44,14 +43,13 @@ GUI Overview
|
|||||||
------------
|
------------
|
||||||
|
|
||||||
.. image:: images/screenshots/pbs-gui-dashboard.png
|
.. image:: images/screenshots/pbs-gui-dashboard.png
|
||||||
:width: 250
|
|
||||||
:align: right
|
:align: right
|
||||||
:alt: PBS GUI Dashboard
|
:alt: PBS GUI Dashboard
|
||||||
|
|
||||||
The Proxmox Backup Server web interface consists of 3 main sections:
|
The Proxmox Backup Server web interface consists of 3 main sections:
|
||||||
|
|
||||||
* **Header**: At the top. This shows version information, and contains buttons to view
|
* **Header**: At the top. This shows version information, and contains buttons to view
|
||||||
documentation, monitor running tasks, and logout.
|
documentation, monitor running tasks, set the language and logout.
|
||||||
* **Sidebar**: On the left. This contains the configuration options for
|
* **Sidebar**: On the left. This contains the configuration options for
|
||||||
the server.
|
the server.
|
||||||
* **Configuration Panel**: In the center. This contains the control interface for the
|
* **Configuration Panel**: In the center. This contains the control interface for the
|
||||||
@ -79,18 +77,17 @@ Configuration
|
|||||||
The Configuration section contains some system configuration options, such as
|
The Configuration section contains some system configuration options, such as
|
||||||
time and network configuration. It also contains the following subsections:
|
time and network configuration. It also contains the following subsections:
|
||||||
|
|
||||||
* **User Management**: Add users and manage accounts
|
* **Access Control**: Add and manage users, API tokens, and the permissions
|
||||||
* **Permissions**: Manage permissions for various users
|
associated with these items
|
||||||
* **Remotes**: Add, edit and remove remotes (see :term:`Remote`)
|
* **Remotes**: Add, edit and remove remotes (see :term:`Remote`)
|
||||||
* **Sync Jobs**: Manage and run sync jobs to remotes
|
* **Subscription**: Upload a subscription key, view subscription status and
|
||||||
* **Subscription**: Upload a subscription key and view subscription status
|
access a text-based system report.
|
||||||
|
|
||||||
|
|
||||||
Administration
|
Administration
|
||||||
^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^
|
||||||
|
|
||||||
.. image:: images/screenshots/pbs-gui-administration-serverstatus.png
|
.. image:: images/screenshots/pbs-gui-administration-serverstatus.png
|
||||||
:width: 250
|
|
||||||
:align: right
|
:align: right
|
||||||
:alt: Administration: Server Status overview
|
:alt: Administration: Server Status overview
|
||||||
|
|
||||||
@ -105,7 +102,6 @@ tasks and information. These are:
|
|||||||
* **Tasks**: Task history with multiple filter options
|
* **Tasks**: Task history with multiple filter options
|
||||||
|
|
||||||
.. image:: images/screenshots/pbs-gui-disks.png
|
.. image:: images/screenshots/pbs-gui-disks.png
|
||||||
:width: 250
|
|
||||||
:align: right
|
:align: right
|
||||||
:alt: Administration: Disks
|
:alt: Administration: Disks
|
||||||
|
|
||||||
@ -120,16 +116,21 @@ The administration menu item also contains a disk management subsection:
|
|||||||
Datastore
|
Datastore
|
||||||
^^^^^^^^^
|
^^^^^^^^^
|
||||||
|
|
||||||
.. image:: images/screenshots/pbs-gui-datastore.png
|
.. image:: images/screenshots/pbs-gui-datastore-summary.png
|
||||||
:width: 250
|
|
||||||
:align: right
|
:align: right
|
||||||
:alt: Datastore Configuration
|
:alt: Datastore Configuration
|
||||||
|
|
||||||
The Datastore section provides an interface for creating and managing
|
The Datastore section contains interfaces for creating and managing
|
||||||
datastores. It contains a subsection for each datastore on the system, in
|
datastores. It contains a button to create a new datastore on the server, as
|
||||||
which you can use the top panel to view:
|
well as a subsection for each datastore on the system, in which you can use the
|
||||||
|
top panel to view:
|
||||||
|
|
||||||
|
* **Summary**: Access a range of datastore usage statistics
|
||||||
* **Content**: Information on the datastore's backup groups and their respective
|
* **Content**: Information on the datastore's backup groups and their respective
|
||||||
contents
|
contents
|
||||||
* **Statistics**: Usage statistics for the datastore
|
* **Prune & GC**: Schedule :ref:`pruning <backup-pruning>` and :ref:`garbage
|
||||||
* **Permissions**: View and manage permissions for the datastore
|
collection <garbage-collection>` operations, and run garbage collection
|
||||||
|
manually
|
||||||
|
* **Sync Jobs**: Create, manage and run :ref:`syncjobs` from remote servers
|
||||||
|
* **Verify Jobs**: Create, manage and run :ref:`maintenance_verification` jobs on the
|
||||||
|
datastore
|
||||||
|
Before Width: | Height: | Size: 127 KiB After Width: | Height: | Size: 140 KiB |
BIN
docs/images/screenshots/pbs-gui-apitoken-overview.png
Normal file
After Width: | Height: | Size: 60 KiB |
BIN
docs/images/screenshots/pbs-gui-apitoken-secret-value.png
Normal file
After Width: | Height: | Size: 18 KiB |
After Width: | Height: | Size: 33 KiB |
BIN
docs/images/screenshots/pbs-gui-datastore-content.png
Normal file
After Width: | Height: | Size: 90 KiB |
BIN
docs/images/screenshots/pbs-gui-datastore-prunegc.png
Normal file
After Width: | Height: | Size: 66 KiB |
BIN
docs/images/screenshots/pbs-gui-datastore-summary.png
Normal file
After Width: | Height: | Size: 130 KiB |
BIN
docs/images/screenshots/pbs-gui-datastore-verifyjob-add.png
Normal file
After Width: | Height: | Size: 15 KiB |
BIN
docs/images/screenshots/pbs-gui-tfa-add-recovery-keys.png
Normal file
After Width: | Height: | Size: 36 KiB |
BIN
docs/images/screenshots/pbs-gui-tfa-add-totp.png
Normal file
After Width: | Height: | Size: 31 KiB |
BIN
docs/images/screenshots/pbs-gui-tfa-login.png
Normal file
After Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 19 KiB |
Before Width: | Height: | Size: 54 KiB After Width: | Height: | Size: 62 KiB |
@ -9,7 +9,7 @@ Debian_ from the provided package repository.
|
|||||||
|
|
||||||
.. include:: package-repositories.rst
|
.. include:: package-repositories.rst
|
||||||
|
|
||||||
Server installation
|
Server Installation
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
The backup server stores the actual backed up data and provides a web based GUI
|
The backup server stores the actual backed up data and provides a web based GUI
|
||||||
@ -37,22 +37,21 @@ Download the ISO from |DOWNLOADS|.
|
|||||||
It includes the following:
|
It includes the following:
|
||||||
|
|
||||||
* The `Proxmox Backup`_ server installer, which partitions the local
|
* The `Proxmox Backup`_ server installer, which partitions the local
|
||||||
disk(s) with ext4, ext3, xfs or ZFS, and installs the operating
|
disk(s) with ext4, xfs or ZFS, and installs the operating system
|
||||||
system
|
|
||||||
|
|
||||||
* Complete operating system (Debian Linux, 64-bit)
|
* Complete operating system (Debian Linux, 64-bit)
|
||||||
|
|
||||||
* Our Linux kernel with ZFS support
|
* Proxmox Linux kernel with ZFS support
|
||||||
|
|
||||||
* Complete tool-set to administer backups and all necessary resources
|
* Complete tool-set to administer backups and all necessary resources
|
||||||
|
|
||||||
* Web based GUI management interface
|
* Web based management interface
|
||||||
|
|
||||||
.. note:: During the installation process, the complete server
|
.. note:: During the installation process, the complete server
|
||||||
is used by default and all existing data is removed.
|
is used by default and all existing data is removed.
|
||||||
|
|
||||||
|
|
||||||
Install `Proxmox Backup`_ server on Debian
|
Install `Proxmox Backup`_ Server on Debian
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Proxmox ships as a set of Debian packages which can be installed on top of a
|
Proxmox ships as a set of Debian packages which can be installed on top of a
|
||||||
@ -84,11 +83,11 @@ support, and a set of common and useful packages.
|
|||||||
when LVM_ or ZFS_ is used. The network configuration is completely up to you
|
when LVM_ or ZFS_ is used. The network configuration is completely up to you
|
||||||
as well.
|
as well.
|
||||||
|
|
||||||
.. note:: You can access the web interface of the Proxmox Backup Server with
|
.. Note:: You can access the web interface of the Proxmox Backup Server with
|
||||||
your web browser, using HTTPS on port 8007. For example at
|
your web browser, using HTTPS on port 8007. For example at
|
||||||
``https://<ip-or-dns-name>:8007``
|
``https://<ip-or-dns-name>:8007``
|
||||||
|
|
||||||
Install Proxmox Backup server on `Proxmox VE`_
|
Install Proxmox Backup Server on `Proxmox VE`_
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
After configuring the
|
After configuring the
|
||||||
@ -104,14 +103,14 @@ After configuring the
|
|||||||
server to store backups. Should the hypervisor server fail, you can
|
server to store backups. Should the hypervisor server fail, you can
|
||||||
still access the backups.
|
still access the backups.
|
||||||
|
|
||||||
.. note::
|
.. Note:: You can access the web interface of the Proxmox Backup Server with
|
||||||
You can access the web interface of the Proxmox Backup Server with your web
|
your web browser, using HTTPS on port 8007. For example at
|
||||||
browser, using HTTPS on port 8007. For example at ``https://<ip-or-dns-name>:8007``
|
``https://<ip-or-dns-name>:8007``
|
||||||
|
|
||||||
Client installation
|
Client Installation
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
Install `Proxmox Backup`_ client on Debian
|
Install `Proxmox Backup`_ Client on Debian
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Proxmox ships as a set of Debian packages to be installed on
|
Proxmox ships as a set of Debian packages to be installed on
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
Introduction
|
Introduction
|
||||||
============
|
============
|
||||||
|
|
||||||
What is Proxmox Backup Server
|
What is Proxmox Backup Server?
|
||||||
-----------------------------
|
------------------------------
|
||||||
|
|
||||||
Proxmox Backup Server is an enterprise-class, client-server backup software
|
Proxmox Backup Server is an enterprise-class, client-server backup software
|
||||||
package that backs up :term:`virtual machine`\ s, :term:`container`\ s, and
|
package that backs up :term:`virtual machine`\ s, :term:`container`\ s, and
|
||||||
@ -10,13 +10,16 @@ physical hosts. It is specially optimized for the `Proxmox Virtual Environment`_
|
|||||||
platform and allows you to back up your data securely, even between remote
|
platform and allows you to back up your data securely, even between remote
|
||||||
sites, providing easy management with a web-based user interface.
|
sites, providing easy management with a web-based user interface.
|
||||||
|
|
||||||
Proxmox Backup Server supports deduplication, compression, and authenticated
|
It supports deduplication, compression, and authenticated
|
||||||
encryption (AE_). Using :term:`Rust` as the implementation language guarantees high
|
encryption (AE_). Using :term:`Rust` as the implementation language guarantees high
|
||||||
performance, low resource usage, and a safe, high-quality codebase.
|
performance, low resource usage, and a safe, high-quality codebase.
|
||||||
|
|
||||||
It features strong client-side encryption. Thus, it's possible to
|
Proxmox Backup uses state of the art cryptography for both client-server
|
||||||
backup data to targets that are not fully trusted.
|
communication and backup content :ref:`encryption <encryption>`. All
|
||||||
|
client-server communication uses `TLS
|
||||||
|
<https://en.wikipedia.org/wiki/Transport_Layer_Security>`_, and backup data can
|
||||||
|
be encrypted on the client-side before sending, making it safer to back up data
|
||||||
|
to targets that are not fully trusted.
|
||||||
|
|
||||||
Architecture
|
Architecture
|
||||||
------------
|
------------
|
||||||
@ -63,8 +66,9 @@ Main Features
|
|||||||
several gigabytes of data per second.
|
several gigabytes of data per second.
|
||||||
|
|
||||||
:Encryption: Backups can be encrypted on the client-side, using AES-256 in
|
:Encryption: Backups can be encrypted on the client-side, using AES-256 in
|
||||||
Galois/Counter Mode (GCM_) mode. This authenticated encryption (AE_) mode
|
Galois/Counter Mode (GCM_). This authenticated encryption (AE_) mode
|
||||||
provides very high performance on modern hardware.
|
provides very high performance on modern hardware. In addition to client-side
|
||||||
|
encryption, all data is transferred via a secure TLS connection.
|
||||||
|
|
||||||
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
:Web interface: Manage the Proxmox Backup Server with the integrated, web-based
|
||||||
user interface.
|
user interface.
|
||||||
@ -125,8 +129,7 @@ language.
|
|||||||
|
|
||||||
-- `The Rust Programming Language <https://doc.rust-lang.org/book/ch00-00-introduction.html>`_
|
-- `The Rust Programming Language <https://doc.rust-lang.org/book/ch00-00-introduction.html>`_
|
||||||
|
|
||||||
.. todo:: further explain the software stack
|
.. _get_help:
|
||||||
|
|
||||||
|
|
||||||
Getting Help
|
Getting Help
|
||||||
------------
|
------------
|
||||||
@ -179,29 +182,28 @@ along with this program. If not, see AGPL3_.
|
|||||||
History
|
History
|
||||||
-------
|
-------
|
||||||
|
|
||||||
Backup is, and always was, as central aspect of IT administration.
|
Backup is, and always has been, a central aspect of IT administration.
|
||||||
The need to recover from data loss is fundamental and increases with
|
The need to recover from data loss is fundamental and only increases with
|
||||||
virtualization.
|
virtualization.
|
||||||
|
|
||||||
Not surprisingly, we shipped a backup tool with Proxmox VE from the
|
For this reason, we've been shipping a backup tool with Proxmox VE, from the
|
||||||
beginning. The tool is called ``vzdump`` and is able to make
|
beginning. This tool is called ``vzdump`` and is able to make
|
||||||
consistent snapshots of running LXC containers and KVM virtual
|
consistent snapshots of running LXC containers and KVM virtual
|
||||||
machines.
|
machines.
|
||||||
|
|
||||||
But ``vzdump`` only allowed for full backups. While this is perfect
|
However, ``vzdump`` only allows for full backups. While this is fine
|
||||||
for small backups, it becomes a burden for users with large VMs. Both
|
for small backups, it becomes a burden for users with large VMs. Both
|
||||||
backup time and space usage was too large for this case, specially
|
backup duration and storage usage are too high for this case, especially
|
||||||
when Users want to keep many backups of the same VMs. We need
|
for users who want to keep many backups of the same VMs. To solve these
|
||||||
deduplication and incremental backups to solve those problems.
|
problems, we needed to offer deduplication and incremental backups.
|
||||||
|
|
||||||
Back in October 2018 development started. We had been looking into
|
Back in October 2018, development started. We investigated
|
||||||
several technologies and frameworks and finally decided to use
|
several technologies and frameworks and finally decided to use
|
||||||
:term:`Rust` as implementation language to provide high speed and
|
:term:`Rust` as the implementation language, in order to provide high speed and
|
||||||
memory efficiency. The 2018-edition of Rust seemed to be promising and
|
memory efficiency. The 2018-edition of Rust seemed promising for our
|
||||||
useful for our requirements.
|
requirements.
|
||||||
|
|
||||||
In July 2020 we released the first beta version of Proxmox Backup
|
In July 2020, we released the first beta version of Proxmox Backup
|
||||||
Server, followed by a first stable version in November 2020. With the
|
Server, followed by the first stable version in November 2020. With support for
|
||||||
support of incremental, fully deduplicated backups, Proxmox Backup
|
incremental, fully deduplicated backups, Proxmox Backup significantly reduces
|
||||||
significantly reduces the network load and saves valuable storage
|
network load and saves valuable storage space.
|
||||||
space.
|
|
||||||
|
351
docs/lto-barcode/code39.js
Normal file
@ -0,0 +1,351 @@
|
|||||||
|
// Code39 barcode generator
|
||||||
|
// see https://en.wikipedia.org/wiki/Code_39
|
||||||
|
|
||||||
|
// IBM LTO Ultrium Cartridge Label Specification
|
||||||
|
// http://www-01.ibm.com/support/docview.wss?uid=ssg1S7000429
|
||||||
|
|
||||||
|
let code39_codes = {
|
||||||
|
"1": ['B', 's', 'b', 'S', 'b', 's', 'b', 's', 'B'],
|
||||||
|
"A": ['B', 's', 'b', 's', 'b', 'S', 'b', 's', 'B'],
|
||||||
|
"K": ['B', 's', 'b', 's', 'b', 's', 'b', 'S', 'B'],
|
||||||
|
"U": ['B', 'S', 'b', 's', 'b', 's', 'b', 's', 'B'],
|
||||||
|
|
||||||
|
"2": ['b', 's', 'B', 'S', 'b', 's', 'b', 's', 'B'],
|
||||||
|
"B": ['b', 's', 'B', 's', 'b', 'S', 'b', 's', 'B'],
|
||||||
|
"L": ['b', 's', 'B', 's', 'b', 's', 'b', 'S', 'B'],
|
||||||
|
"V": ['b', 'S', 'B', 's', 'b', 's', 'b', 's', 'B'],
|
||||||
|
|
||||||
|
"3": ['B', 's', 'B', 'S', 'b', 's', 'b', 's', 'b'],
|
||||||
|
"C": ['B', 's', 'B', 's', 'b', 'S', 'b', 's', 'b'],
|
||||||
|
"M": ['B', 's', 'B', 's', 'b', 's', 'b', 'S', 'b'],
|
||||||
|
"W": ['B', 'S', 'B', 's', 'b', 's', 'b', 's', 'b'],
|
||||||
|
|
||||||
|
"4": ['b', 's', 'b', 'S', 'B', 's', 'b', 's', 'B'],
|
||||||
|
"D": ['b', 's', 'b', 's', 'B', 'S', 'b', 's', 'B'],
|
||||||
|
"N": ['b', 's', 'b', 's', 'B', 's', 'b', 'S', 'B'],
|
||||||
|
"X": ['b', 'S', 'b', 's', 'B', 's', 'b', 's', 'B'],
|
||||||
|
|
||||||
|
"5": ['B', 's', 'b', 'S', 'B', 's', 'b', 's', 'b'],
|
||||||
|
"E": ['B', 's', 'b', 's', 'B', 'S', 'b', 's', 'b'],
|
||||||
|
"O": ['B', 's', 'b', 's', 'B', 's', 'b', 'S', 'b'],
|
||||||
|
"Y": ['B', 'S', 'b', 's', 'B', 's', 'b', 's', 'b'],
|
||||||
|
|
||||||
|
"6": ['b', 's', 'B', 'S', 'B', 's', 'b', 's', 'b'],
|
||||||
|
"F": ['b', 's', 'B', 's', 'B', 'S', 'b', 's', 'b'],
|
||||||
|
"P": ['b', 's', 'B', 's', 'B', 's', 'b', 'S', 'b'],
|
||||||
|
"Z": ['b', 'S', 'B', 's', 'B', 's', 'b', 's', 'b'],
|
||||||
|
|
||||||
|
"7": ['b', 's', 'b', 'S', 'b', 's', 'B', 's', 'B'],
|
||||||
|
"G": ['b', 's', 'b', 's', 'b', 'S', 'B', 's', 'B'],
|
||||||
|
"Q": ['b', 's', 'b', 's', 'b', 's', 'B', 'S', 'B'],
|
||||||
|
"-": ['b', 'S', 'b', 's', 'b', 's', 'B', 's', 'B'],
|
||||||
|
|
||||||
|
"8": ['B', 's', 'b', 'S', 'b', 's', 'B', 's', 'b'],
|
||||||
|
"H": ['B', 's', 'b', 's', 'b', 'S', 'B', 's', 'b'],
|
||||||
|
"R": ['B', 's', 'b', 's', 'b', 's', 'B', 'S', 'b'],
|
||||||
|
".": ['B', 'S', 'b', 's', 'b', 's', 'B', 's', 'b'],
|
||||||
|
|
||||||
|
"9": ['b', 's', 'B', 'S', 'b', 's', 'B', 's', 'b'],
|
||||||
|
"I": ['b', 's', 'B', 's', 'b', 'S', 'B', 's', 'b'],
|
||||||
|
"S": ['b', 's', 'B', 's', 'b', 's', 'B', 'S', 'b'],
|
||||||
|
" ": ['b', 'S', 'B', 's', 'b', 's', 'B', 's', 'b'],
|
||||||
|
|
||||||
|
"0": ['b', 's', 'b', 'S', 'B', 's', 'B', 's', 'b'],
|
||||||
|
"J": ['b', 's', 'b', 's', 'B', 'S', 'B', 's', 'b'],
|
||||||
|
"T": ['b', 's', 'b', 's', 'B', 's', 'B', 'S', 'b'],
|
||||||
|
"*": ['b', 'S', 'b', 's', 'B', 's', 'B', 's', 'b']
|
||||||
|
};
|
||||||
|
|
||||||
|
let colors = [
|
||||||
|
'#BB282E',
|
||||||
|
'#FAE54A',
|
||||||
|
'#9AC653',
|
||||||
|
'#01A5E2',
|
||||||
|
'#9EAAB6',
|
||||||
|
'#D97E35',
|
||||||
|
'#E27B99',
|
||||||
|
'#67A945',
|
||||||
|
'#F6B855',
|
||||||
|
'#705A81'
|
||||||
|
];
|
||||||
|
|
||||||
|
let lto_label_width = 70;
|
||||||
|
let lto_label_height = 17;
|
||||||
|
|
||||||
|
function foreach_label(page_layout, callback) {
|
||||||
|
|
||||||
|
let count = 0;
|
||||||
|
let row = 0;
|
||||||
|
let height = page_layout.margin_top;
|
||||||
|
|
||||||
|
while ((height + page_layout.label_height) <= page_layout.page_height) {
|
||||||
|
|
||||||
|
let column = 0;
|
||||||
|
let width = page_layout.margin_left;
|
||||||
|
|
||||||
|
while ((width + page_layout.label_width) <= page_layout.page_width) {
|
||||||
|
|
||||||
|
callback(column, row, count, width, height);
|
||||||
|
count += 1;
|
||||||
|
|
||||||
|
column += 1;
|
||||||
|
width += page_layout.label_width;
|
||||||
|
width += page_layout.column_spacing;
|
||||||
|
}
|
||||||
|
|
||||||
|
row += 1;
|
||||||
|
height += page_layout.label_height;
|
||||||
|
height += page_layout.row_spacing;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
function compute_max_labels(page_layout) {
|
||||||
|
|
||||||
|
let max_labels = 0;
|
||||||
|
foreach_label(page_layout, function() { max_labels += 1; });
|
||||||
|
return max_labels;
|
||||||
|
}
|
||||||
|
|
||||||
|
function svg_label(mode, label, label_type, pagex, pagey, label_borders) {
|
||||||
|
let svg = "";
|
||||||
|
|
||||||
|
if (label.length != 6) {
|
||||||
|
throw "wrong label length";
|
||||||
|
}
|
||||||
|
if (label_type.length != 2) {
|
||||||
|
throw "wrong label_type length";
|
||||||
|
}
|
||||||
|
|
||||||
|
let ratio = 2.75;
|
||||||
|
let parts = 3*ratio + 6; // 3*wide + 6*small;
|
||||||
|
let barcode_width = (lto_label_width/12)*10; // 10*code + 2margin
|
||||||
|
let small = barcode_width/(parts*10 + 9);
|
||||||
|
let code_width = small*parts;
|
||||||
|
let wide = small*ratio;
|
||||||
|
let xpos = pagex + code_width;
|
||||||
|
let height = 12;
|
||||||
|
|
||||||
|
if (mode === 'placeholder') {
|
||||||
|
if (label_borders) {
|
||||||
|
svg += `<rect class='unprintable' x='${pagex}' y='${pagey}' width='${lto_label_width}' height='${lto_label_height}' fill='none' style='stroke:black;stroke-width:0.1;'/>`;
|
||||||
|
}
|
||||||
|
return svg;
|
||||||
|
}
|
||||||
|
if (label_borders) {
|
||||||
|
svg += `<rect x='${pagex}' y='${pagey}' width='${lto_label_width}' height='${lto_label_height}' fill='none' style='stroke:black;stroke-width:0.1;'/>`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mode === "color" || mode == "frame") {
|
||||||
|
let w = lto_label_width/8;
|
||||||
|
let h = lto_label_height - height;
|
||||||
|
for (var i = 0; i < 7; i++) {
|
||||||
|
let textx = w/2 + pagex + i*w;
|
||||||
|
let texty = pagey;
|
||||||
|
|
||||||
|
let fill = "none";
|
||||||
|
if (mode === "color" && (i < 6)) {
|
||||||
|
let letter = label.charAt(i);
|
||||||
|
if (letter >= '0' && letter <= '9') {
|
||||||
|
fill = colors[parseInt(letter, 10)];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
svg += `<rect x='${textx}' y='${texty}' width='${w}' height='${h}' style='stroke:black;stroke-width:0.2;fill:${fill};'/>`;
|
||||||
|
|
||||||
|
if (i == 6) {
|
||||||
|
textx += 3;
|
||||||
|
texty += 3.7;
|
||||||
|
svg += `<text x='${textx}' y='${texty}' style='font-weight:bold;font-size:3px;font-family:sans-serif;'>${label_type}</text>`;
|
||||||
|
} else {
|
||||||
|
let letter = label.charAt(i);
|
||||||
|
textx += 3.5;
|
||||||
|
texty += 4;
|
||||||
|
svg += `<text x='${textx}' y='${texty}' style='font-weight:bold;font-size:4px;font-family:sans-serif;'>${letter}</text>`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let raw_label = `*${label}${label_type}*`;
|
||||||
|
|
||||||
|
for (var i = 0; i < raw_label.length; i++) {
|
||||||
|
let letter = raw_label.charAt(i);
|
||||||
|
|
||||||
|
let code = code39_codes[letter];
|
||||||
|
if (code === undefined) {
|
||||||
|
throw `unable to encode letter '${letter}' with code39`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mode === "simple") {
|
||||||
|
let textx = xpos + code_width/2;
|
||||||
|
let texty = pagey + 4;
|
||||||
|
|
||||||
|
if (i > 0 && (i+1) < raw_label.length) {
|
||||||
|
svg += `<text x='${textx}' y='${texty}' style='font-weight:bold;font-size:4px;font-family:sans-serif;'>${letter}</text>`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (let c of code) {
|
||||||
|
|
||||||
|
if (c === 's') {
|
||||||
|
xpos += small;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (c === 'S') {
|
||||||
|
xpos += wide;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let w = c === 'B' ? wide : small;
|
||||||
|
let ypos = pagey + lto_label_height - height;
|
||||||
|
|
||||||
|
svg += `<rect x='${xpos}' y='${ypos}' width='${w}' height='${height}' style='fill:black'/>`;
|
||||||
|
xpos = xpos + w;
|
||||||
|
}
|
||||||
|
xpos += small;
|
||||||
|
}
|
||||||
|
|
||||||
|
return svg;
|
||||||
|
}
|
||||||
|
|
||||||
|
function html_page_header() {
|
||||||
|
let html = "<html5>";
|
||||||
|
|
||||||
|
html += "<style>";
|
||||||
|
|
||||||
|
/* no page margins */
|
||||||
|
html += "@page{margin-left: 0px;margin-right: 0px;margin-top: 0px;margin-bottom: 0px;}";
|
||||||
|
/* to hide things on printed page */
|
||||||
|
html += "@media print { .unprintable { visibility: hidden; } }";
|
||||||
|
|
||||||
|
html += "</style>";
|
||||||
|
|
||||||
|
//html += "<body onload='window.print()'>";
|
||||||
|
html += "<body style='background-color: white;'>";
|
||||||
|
|
||||||
|
return html;
|
||||||
|
}
|
||||||
|
|
||||||
|
function svg_page_header(page_width, page_height) {
|
||||||
|
let svg = "<svg version='1.1' xmlns='http://www.w3.org/2000/svg'";
|
||||||
|
svg += ` width='${page_width}mm' height='${page_height}mm' viewBox='0 0 ${page_width} ${page_height}'>`;
|
||||||
|
|
||||||
|
return svg;
|
||||||
|
}
|
||||||
|
|
||||||
|
function printBarcodePage() {
|
||||||
|
let frame = document.getElementById("print_frame");
|
||||||
|
|
||||||
|
let window = frame.contentWindow;
|
||||||
|
window.print();
|
||||||
|
}
|
||||||
|
|
||||||
|
function generate_barcode_page(target_id, page_layout, label_list, calibration) {
|
||||||
|
|
||||||
|
let svg = svg_page_header(page_layout.page_width, page_layout.page_height);
|
||||||
|
|
||||||
|
let c = calibration;
|
||||||
|
|
||||||
|
console.log(calibration);
|
||||||
|
|
||||||
|
svg += "<g id='barcode_page'";
|
||||||
|
if (c !== undefined) {
|
||||||
|
svg += ` transform='scale(${c.scalex}, ${c.scaley}),translate(${c.offsetx}, ${c.offsety})'`;
|
||||||
|
}
|
||||||
|
svg += '>';
|
||||||
|
|
||||||
|
foreach_label(page_layout, function(column, row, count, xpos, ypos) {
|
||||||
|
|
||||||
|
if (count >= label_list.length) { return; }
|
||||||
|
|
||||||
|
let item = label_list[count];
|
||||||
|
|
||||||
|
svg += svg_label(item.mode, item.label, item.tape_type, xpos, ypos, page_layout.label_borders);
|
||||||
|
});
|
||||||
|
|
||||||
|
svg += "</g>";
|
||||||
|
svg += "</svg>";
|
||||||
|
|
||||||
|
let html = html_page_header();
|
||||||
|
html += svg;
|
||||||
|
html += "</body>";
|
||||||
|
html += "</html>";
|
||||||
|
|
||||||
|
let frame = document.getElementById(target_id);
|
||||||
|
|
||||||
|
setupPrintFrame(frame, page_layout.page_width, page_layout.page_height);
|
||||||
|
|
||||||
|
let fwindow = frame.contentWindow;
|
||||||
|
|
||||||
|
fwindow.document.open();
|
||||||
|
fwindow.document.write(html);
|
||||||
|
fwindow.document.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
function setupPrintFrame(frame, page_width, page_height) {
|
||||||
|
let dpi = 98;
|
||||||
|
|
||||||
|
let dpr = window.devicePixelRatio;
|
||||||
|
if (dpr !== undefined) {
|
||||||
|
dpi = dpi*dpr;
|
||||||
|
}
|
||||||
|
|
||||||
|
let ppmm = dpi/25.4;
|
||||||
|
|
||||||
|
frame.width = page_width*ppmm;
|
||||||
|
frame.height = page_height*ppmm;
|
||||||
|
}
|
||||||
|
|
||||||
|
function generate_calibration_page(target_id, page_layout, calibration) {
|
||||||
|
|
||||||
|
let frame = document.getElementById(target_id);
|
||||||
|
|
||||||
|
setupPrintFrame(frame, page_layout.page_width, page_layout.page_height);
|
||||||
|
|
||||||
|
let svg = svg_page_header( page_layout.page_width, page_layout.page_height);
|
||||||
|
|
||||||
|
svg += "<defs>";
|
||||||
|
svg += "<marker id='endarrow' markerWidth='10' markerHeight='7' ";
|
||||||
|
svg += "refX='10' refY='3.5' orient='auto'><polygon points='0 0, 10 3.5, 0 7' />";
|
||||||
|
svg += "</marker>";
|
||||||
|
|
||||||
|
svg += "<marker id='startarrow' markerWidth='10' markerHeight='7' ";
|
||||||
|
svg += "refX='0' refY='3.5' orient='auto'><polygon points='10 0, 10 7, 0 3.5' />";
|
||||||
|
svg += "</marker>";
|
||||||
|
svg += "</defs>";
|
||||||
|
|
||||||
|
svg += "<rect x='50' y='50' width='100' height='100' style='fill:none;stroke-width:0.05;stroke:rgb(0,0,0)'/>";
|
||||||
|
|
||||||
|
let text_style = "style='font-weight:bold;font-size:4;font-family:sans-serif;'";
|
||||||
|
|
||||||
|
svg += `<text x='10' y='99' ${text_style}>Sx = 50mm</text>`;
|
||||||
|
svg += "<line x1='0' y1='100' x2='50' y2='100' stroke='#000' marker-end='url(#endarrow)' stroke-width='.25'/>";
|
||||||
|
|
||||||
|
svg += `<text x='60' y='99' ${text_style}>Dx = 100mm</text>`;
|
||||||
|
svg += "<line x1='50' y1='100' x2='150' y2='100' stroke='#000' marker-start='url(#startarrow)' marker-end='url(#endarrow)' stroke-width='.25'/>";
|
||||||
|
|
||||||
|
svg += `<text x='142' y='10' ${text_style} writing-mode='tb'>Sy = 50mm</text>`;
|
||||||
|
svg += "<line x1='140' y1='0' x2='140' y2='50' stroke='#000' marker-end='url(#endarrow)' stroke-width='.25'/>";
|
||||||
|
|
||||||
|
svg += `<text x='142' y='60' ${text_style} writing-mode='tb'>Dy = 100mm</text>`;
|
||||||
|
svg += "<line x1='140' y1='50' x2='140' y2='150' stroke='#000' marker-start='url(#startarrow)' marker-end='url(#endarrow)' stroke-width='.25'/>";
|
||||||
|
|
||||||
|
let c = calibration;
|
||||||
|
if (c !== undefined) {
|
||||||
|
svg += `<rect x='50' y='50' width='100' height='100' style='fill:none;stroke-width:0.05;stroke:rgb(255,0,0)' `;
|
||||||
|
svg += `transform='scale(${c.scalex}, ${c.scaley}),translate(${c.offsetx}, ${c.offsety})'/>`;
|
||||||
|
}
|
||||||
|
|
||||||
|
svg += "</svg>";
|
||||||
|
|
||||||
|
let html = html_page_header();
|
||||||
|
html += svg;
|
||||||
|
html += "</body>";
|
||||||
|
html += "</html>";
|
||||||
|
|
||||||
|
let fwindow = frame.contentWindow;
|
||||||
|
|
||||||
|
fwindow.document.open();
|
||||||
|
fwindow.document.write(html);
|
||||||
|
fwindow.document.close();
|
||||||
|
}
|
51
docs/lto-barcode/index.html
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no">
|
||||||
|
<title>Proxmox LTO Barcode Label Generator</title>
|
||||||
|
<link rel="stylesheet" type="text/css" href="extjs/theme-crisp/resources/theme-crisp-all.css">
|
||||||
|
<style>
|
||||||
|
/* fix action column icons */
|
||||||
|
.x-action-col-icon {
|
||||||
|
font-size: 13px;
|
||||||
|
height: 13px;
|
||||||
|
}
|
||||||
|
.x-grid-cell-inner-action-col {
|
||||||
|
padding: 6px 10px 5px;
|
||||||
|
}
|
||||||
|
.x-action-col-icon:before {
|
||||||
|
color: #555;
|
||||||
|
}
|
||||||
|
.x-action-col-icon {
|
||||||
|
color: #21BF4B;
|
||||||
|
}
|
||||||
|
.x-action-col-icon {
|
||||||
|
margin: 0 1px;
|
||||||
|
font-size: 14px;
|
||||||
|
}
|
||||||
|
.x-action-col-icon:before, .x-action-col-icon:after {
|
||||||
|
font-size: 14px;
|
||||||
|
}
|
||||||
|
.x-action-col-icon:hover:before, .x-action-col-icon:hover:after {
|
||||||
|
text-shadow: 1px 1px 1px #AAA;
|
||||||
|
font-weight: 800;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
<link rel="stylesheet" type="text/css" href="font-awesome/css/font-awesome.css"/>
|
||||||
|
<script type="text/javascript" src="extjs/ext-all.js"></script>
|
||||||
|
|
||||||
|
<script type="text/javascript" src="code39.js"></script>
|
||||||
|
<script type="text/javascript" src="prefix-field.js"></script>
|
||||||
|
<script type="text/javascript" src="label-style.js"></script>
|
||||||
|
<script type="text/javascript" src="tape-type.js"></script>
|
||||||
|
<script type="text/javascript" src="paper-size.js"></script>
|
||||||
|
<script type="text/javascript" src="page-layout.js"></script>
|
||||||
|
<script type="text/javascript" src="page-calibration.js"></script>
|
||||||
|
<script type="text/javascript" src="label-list.js"></script>
|
||||||
|
<script type="text/javascript" src="label-setup.js"></script>
|
||||||
|
<script type="text/javascript" src="lto-barcode.js"></script>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
</body>
|
||||||
|
</html>
|
140
docs/lto-barcode/label-list.js
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
Ext.define('LabelList', {
|
||||||
|
extend: 'Ext.grid.Panel',
|
||||||
|
alias: 'widget.labelList',
|
||||||
|
|
||||||
|
plugins: {
|
||||||
|
ptype: 'cellediting',
|
||||||
|
clicksToEdit: 1
|
||||||
|
},
|
||||||
|
|
||||||
|
selModel: 'cellmodel',
|
||||||
|
|
||||||
|
store: {
|
||||||
|
field: [
|
||||||
|
'prefix',
|
||||||
|
'tape_type',
|
||||||
|
{
|
||||||
|
type: 'integer',
|
||||||
|
name: 'start',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'integer',
|
||||||
|
name: 'end',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
data: [],
|
||||||
|
},
|
||||||
|
|
||||||
|
listeners: {
|
||||||
|
validateedit: function(editor, context) {
|
||||||
|
console.log(context.field);
|
||||||
|
console.log(context.value);
|
||||||
|
context.record.set(context.field, context.value);
|
||||||
|
context.record.commit();
|
||||||
|
return true;
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
columns: [
|
||||||
|
{
|
||||||
|
text: 'Prefix',
|
||||||
|
dataIndex: 'prefix',
|
||||||
|
flex: 1,
|
||||||
|
editor: {
|
||||||
|
xtype: 'prefixfield',
|
||||||
|
allowBlank: false,
|
||||||
|
},
|
||||||
|
renderer: function (value, metaData, record) {
|
||||||
|
console.log(record);
|
||||||
|
if (record.data.mode === 'placeholder') {
|
||||||
|
return "-";
|
||||||
|
}
|
||||||
|
return value;
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: 'Type',
|
||||||
|
dataIndex: 'tape_type',
|
||||||
|
flex: 1,
|
||||||
|
editor: {
|
||||||
|
xtype: 'ltoTapeType',
|
||||||
|
allowBlank: false,
|
||||||
|
},
|
||||||
|
renderer: function (value, metaData, record) {
|
||||||
|
console.log(record);
|
||||||
|
if (record.data.mode === 'placeholder') {
|
||||||
|
return "-";
|
||||||
|
}
|
||||||
|
return value;
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: 'Mode',
|
||||||
|
dataIndex: 'mode',
|
||||||
|
flex: 1,
|
||||||
|
editor: {
|
||||||
|
xtype: 'ltoLabelStyle',
|
||||||
|
allowBlank: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: 'Start',
|
||||||
|
dataIndex: 'start',
|
||||||
|
flex: 1,
|
||||||
|
editor: {
|
||||||
|
xtype: 'numberfield',
|
||||||
|
allowBlank: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
text: 'End',
|
||||||
|
dataIndex: 'end',
|
||||||
|
flex: 1,
|
||||||
|
editor: {
|
||||||
|
xtype: 'numberfield',
|
||||||
|
},
|
||||||
|
renderer: function(value) {
|
||||||
|
if (value === null || value === '' || value === undefined) {
|
||||||
|
return "Fill";
|
||||||
|
}
|
||||||
|
return value;
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'actioncolumn',
|
||||||
|
width: 75,
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
tooltip: 'Move Up',
|
||||||
|
iconCls: 'fa fa-arrow-up',
|
||||||
|
handler: function(grid, rowIndex) {
|
||||||
|
if (rowIndex < 1) { return; }
|
||||||
|
let store = grid.getStore();
|
||||||
|
let record = store.getAt(rowIndex);
|
||||||
|
store.removeAt(rowIndex);
|
||||||
|
store.insert(rowIndex - 1, record);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
tooltip: 'Move Down',
|
||||||
|
iconCls: 'fa fa-arrow-down',
|
||||||
|
handler: function(grid, rowIndex) {
|
||||||
|
let store = grid.getStore();
|
||||||
|
if (rowIndex >= store.getCount()) { return; }
|
||||||
|
let record = store.getAt(rowIndex);
|
||||||
|
store.removeAt(rowIndex);
|
||||||
|
store.insert(rowIndex + 1, record);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
tooltip: 'Delete',
|
||||||
|
iconCls: 'fa fa-scissors',
|
||||||
|
//iconCls: 'fa critical fa-trash-o',
|
||||||
|
handler: function(grid, rowIndex) {
|
||||||
|
grid.getStore().removeAt(rowIndex);
|
||||||
|
},
|
||||||
|
}
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
107
docs/lto-barcode/label-setup.js
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
Ext.define('LabelSetupPanel', {
|
||||||
|
extend: 'Ext.panel.Panel',
|
||||||
|
alias: 'widget.labelSetupPanel',
|
||||||
|
|
||||||
|
layout: {
|
||||||
|
type: 'hbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
|
||||||
|
getValues: function() {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
let values = {};
|
||||||
|
|
||||||
|
Ext.Array.each(me.query('[isFormField]'), function(field) {
|
||||||
|
let data = field.getSubmitData();
|
||||||
|
Ext.Object.each(data, function(name, val) {
|
||||||
|
let parsed = parseInt(val, 10);
|
||||||
|
values[name] = isNaN(parsed) ? val : parsed;
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
return values;
|
||||||
|
},
|
||||||
|
|
||||||
|
controller: {
|
||||||
|
xclass: 'Ext.app.ViewController',
|
||||||
|
|
||||||
|
init: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
let list = view.down("labelList");
|
||||||
|
let store = list.getStore();
|
||||||
|
store.on('datachanged', function(store) {
|
||||||
|
view.fireEvent("listchanged", store);
|
||||||
|
});
|
||||||
|
store.on('update', function(store) {
|
||||||
|
view.fireEvent("listchanged", store);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
onAdd: function() {
|
||||||
|
let list = this.lookupReference('label_list');
|
||||||
|
let view = this.getView();
|
||||||
|
let params = view.getValues();
|
||||||
|
list.getStore().add(params);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
border: false,
|
||||||
|
layout: {
|
||||||
|
type: 'vbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'prefixfield',
|
||||||
|
name: 'prefix',
|
||||||
|
value: 'TEST',
|
||||||
|
fieldLabel: 'Prefix',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'ltoTapeType',
|
||||||
|
name: 'tape_type',
|
||||||
|
fieldLabel: 'Type',
|
||||||
|
value: 'L8',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'ltoLabelStyle',
|
||||||
|
name: 'mode',
|
||||||
|
fieldLabel: 'Mode',
|
||||||
|
value: 'color',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
name: 'start',
|
||||||
|
fieldLabel: 'Start',
|
||||||
|
minValue: 0,
|
||||||
|
allowBlank: false,
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
name: 'end',
|
||||||
|
fieldLabel: 'End',
|
||||||
|
minValue: 0,
|
||||||
|
emptyText: 'Fill',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'button',
|
||||||
|
text: 'Add',
|
||||||
|
handler: 'onAdd',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
margin: "0 0 0 10",
|
||||||
|
xtype: 'labelList',
|
||||||
|
reference: 'label_list',
|
||||||
|
flex: 1,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
20
docs/lto-barcode/label-style.js
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
Ext.define('LtoLabelStyle', {
|
||||||
|
extend: 'Ext.form.field.ComboBox',
|
||||||
|
alias: 'widget.ltoLabelStyle',
|
||||||
|
|
||||||
|
editable: false,
|
||||||
|
|
||||||
|
displayField: 'text',
|
||||||
|
valueField: 'value',
|
||||||
|
queryMode: 'local',
|
||||||
|
|
||||||
|
store: {
|
||||||
|
field: ['value', 'text'],
|
||||||
|
data: [
|
||||||
|
{ value: 'simple', text: "Simple" },
|
||||||
|
{ value: 'color', text: 'Color (frames with color)' },
|
||||||
|
{ value: 'frame', text: 'Frame (no color)' },
|
||||||
|
{ value: 'placeholder', text: 'Placeholder (empty)' },
|
||||||
|
],
|
||||||
|
},
|
||||||
|
});
|
214
docs/lto-barcode/lto-barcode.js
Normal file
@ -0,0 +1,214 @@
|
|||||||
|
// FIXME: HACK! Makes scrolling in number spinner work again. fixed in ExtJS >= 6.1
|
||||||
|
if (Ext.isFirefox) {
|
||||||
|
Ext.$eventNameMap.DOMMouseScroll = 'DOMMouseScroll';
|
||||||
|
}
|
||||||
|
|
||||||
|
function draw_labels(target_id, label_list, page_layout, calibration) {
|
||||||
|
|
||||||
|
let max_labels = compute_max_labels(page_layout);
|
||||||
|
|
||||||
|
let count_fixed = 0;
|
||||||
|
let count_fill = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < label_list.length; i++) {
|
||||||
|
let item = label_list[i];
|
||||||
|
if (item.end === null || item.end === '' || item.end === undefined) {
|
||||||
|
count_fill += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (item.end <= item.start) {
|
||||||
|
count_fixed += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
count_fixed += (item.end - item.start) + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
let rest = max_labels - count_fixed;
|
||||||
|
let fill_size = 1;
|
||||||
|
if (rest >= count_fill) {
|
||||||
|
fill_size = Math.floor(rest/count_fill);
|
||||||
|
}
|
||||||
|
|
||||||
|
let list = [];
|
||||||
|
|
||||||
|
let count_fill_2 = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < label_list.length; i++) {
|
||||||
|
let item = label_list[i];
|
||||||
|
let count;
|
||||||
|
if (item.end === null || item.end === '' || item.end === undefined) {
|
||||||
|
count_fill_2 += 1;
|
||||||
|
if (count_fill_2 === count_fill) {
|
||||||
|
count = rest;
|
||||||
|
} else {
|
||||||
|
count = fill_size;
|
||||||
|
}
|
||||||
|
rest -= count;
|
||||||
|
} else {
|
||||||
|
if (item.end <= item.start) {
|
||||||
|
count = 1;
|
||||||
|
} else {
|
||||||
|
count = (item.end - item.start) + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (j = 0; j < count; j++) {
|
||||||
|
|
||||||
|
let id = item.start + j;
|
||||||
|
|
||||||
|
if (item.prefix.length == 6) {
|
||||||
|
|
||||||
|
list.push({
|
||||||
|
label: item.prefix,
|
||||||
|
tape_type: item.tape_type,
|
||||||
|
mode: item.mode,
|
||||||
|
id: id,
|
||||||
|
});
|
||||||
|
rest += count - j - 1;
|
||||||
|
break;
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
let pad_len = 6-item.prefix.length;
|
||||||
|
let label = item.prefix + id.toString().padStart(pad_len, 0);
|
||||||
|
|
||||||
|
if (label.length != 6) {
|
||||||
|
rest += count - j;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
list.push({
|
||||||
|
label: label,
|
||||||
|
tape_type: item.tape_type,
|
||||||
|
mode: item.mode,
|
||||||
|
id: id,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
generate_barcode_page(target_id, page_layout, list, calibration);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ext.define('MainView', {
|
||||||
|
extend: 'Ext.container.Viewport',
|
||||||
|
alias: 'widget.mainview',
|
||||||
|
|
||||||
|
layout: {
|
||||||
|
type: 'vbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
width: 800,
|
||||||
|
|
||||||
|
controller: {
|
||||||
|
xclass: 'Ext.app.ViewController',
|
||||||
|
|
||||||
|
update_barcode_preview: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
let list_view = view.down("labelList");
|
||||||
|
|
||||||
|
let store = list_view.getStore();
|
||||||
|
let label_list = [];
|
||||||
|
store.each((record) => {
|
||||||
|
label_list.push(record.data);
|
||||||
|
});
|
||||||
|
|
||||||
|
let page_layout_view = view.down("pageLayoutPanel");
|
||||||
|
let page_layout = page_layout_view.getValues();
|
||||||
|
|
||||||
|
let calibration_view = view.down("pageCalibration");
|
||||||
|
let page_calibration = calibration_view.getValues();
|
||||||
|
|
||||||
|
draw_labels("print_frame", label_list, page_layout, page_calibration);
|
||||||
|
},
|
||||||
|
|
||||||
|
update_calibration_preview: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
let page_layout_view = view.down("pageLayoutPanel");
|
||||||
|
let page_layout = page_layout_view.getValues();
|
||||||
|
|
||||||
|
let calibration_view = view.down("pageCalibration");
|
||||||
|
let page_calibration = calibration_view.getValues();
|
||||||
|
console.log(page_calibration);
|
||||||
|
generate_calibration_page('print_frame', page_layout, page_calibration);
|
||||||
|
},
|
||||||
|
|
||||||
|
control: {
|
||||||
|
labelSetupPanel: {
|
||||||
|
listchanged: function(store) {
|
||||||
|
this.update_barcode_preview();
|
||||||
|
},
|
||||||
|
activate: function() {
|
||||||
|
this.update_barcode_preview();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
pageLayoutPanel: {
|
||||||
|
pagechanged: function(layout) {
|
||||||
|
this.update_barcode_preview();
|
||||||
|
},
|
||||||
|
activate: function() {
|
||||||
|
this.update_barcode_preview();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
pageCalibration: {
|
||||||
|
calibrationchanged: function() {
|
||||||
|
this.update_calibration_preview();
|
||||||
|
},
|
||||||
|
activate: function() {
|
||||||
|
this.update_calibration_preview();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'tabpanel',
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'labelSetupPanel',
|
||||||
|
title: 'Proxmox LTO Barcode Label Generator',
|
||||||
|
bodyPadding: 10,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'pageLayoutPanel',
|
||||||
|
title: 'Page Layout',
|
||||||
|
bodyPadding: 10,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'pageCalibration',
|
||||||
|
title: 'Printer Calibration',
|
||||||
|
bodyPadding: 10,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'panel',
|
||||||
|
layout: "center",
|
||||||
|
title: 'Print Preview',
|
||||||
|
bodyStyle: "background-color: grey;",
|
||||||
|
bodyPadding: 10,
|
||||||
|
html: '<center><iframe id="print_frame" frameBorder="0"></iframe></center>',
|
||||||
|
border: false,
|
||||||
|
flex: 1,
|
||||||
|
scrollable: true,
|
||||||
|
tools:[{
|
||||||
|
type: 'print',
|
||||||
|
tooltip: 'Open Print Dialog',
|
||||||
|
handler: function(event, toolEl, panelHeader) {
|
||||||
|
printBarcodePage();
|
||||||
|
}
|
||||||
|
}],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
Ext.onReady(function() {
|
||||||
|
|
||||||
|
Ext.create('MainView', {
|
||||||
|
renderTo: Ext.getBody(),
|
||||||
|
});
|
||||||
|
});
|
142
docs/lto-barcode/page-calibration.js
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
Ext.define('PageCalibration', {
|
||||||
|
extend: 'Ext.panel.Panel',
|
||||||
|
alias: 'widget.pageCalibration',
|
||||||
|
|
||||||
|
layout: {
|
||||||
|
type: 'hbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
|
||||||
|
getValues: function() {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
let values = {};
|
||||||
|
|
||||||
|
Ext.Array.each(me.query('[isFormField]'), function(field) {
|
||||||
|
if (field.isValid()) {
|
||||||
|
let data = field.getSubmitData();
|
||||||
|
Ext.Object.each(data, function(name, val) {
|
||||||
|
let parsed = parseFloat(val, 10);
|
||||||
|
values[name] = isNaN(parsed) ? val : parsed;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if (values.d_x === undefined) { return; }
|
||||||
|
if (values.d_y === undefined) { return; }
|
||||||
|
if (values.s_x === undefined) { return; }
|
||||||
|
if (values.s_y === undefined) { return; }
|
||||||
|
|
||||||
|
scalex = 100/values.d_x;
|
||||||
|
scaley = 100/values.d_y;
|
||||||
|
|
||||||
|
let offsetx = ((50*scalex) - values.s_x)/scalex;
|
||||||
|
let offsety = ((50*scaley) - values.s_y)/scaley;
|
||||||
|
|
||||||
|
return {
|
||||||
|
scalex: scalex,
|
||||||
|
scaley: scaley,
|
||||||
|
offsetx: offsetx,
|
||||||
|
offsety: offsety,
|
||||||
|
};
|
||||||
|
},
|
||||||
|
|
||||||
|
controller: {
|
||||||
|
xclass: 'Ext.app.ViewController',
|
||||||
|
|
||||||
|
control: {
|
||||||
|
'field': {
|
||||||
|
change: function() {
|
||||||
|
let view = this.getView();
|
||||||
|
let param = view.getValues();
|
||||||
|
view.fireEvent("calibrationchanged", param);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
border: false,
|
||||||
|
layout: {
|
||||||
|
type: 'vbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'displayfield',
|
||||||
|
value: 'a4',
|
||||||
|
fieldLabel: 'Start Offset Sx (mm)',
|
||||||
|
labelWidth: 150,
|
||||||
|
value: 50,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'displayfield',
|
||||||
|
value: 'a4',
|
||||||
|
fieldLabel: 'Length Dx (mm)',
|
||||||
|
labelWidth: 150,
|
||||||
|
value: 100,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'displayfield',
|
||||||
|
value: 'a4',
|
||||||
|
fieldLabel: 'Start Offset Sy (mm)',
|
||||||
|
labelWidth: 150,
|
||||||
|
value: 50,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'displayfield',
|
||||||
|
value: 'a4',
|
||||||
|
fieldLabel: 'Length Dy (mm)',
|
||||||
|
labelWidth: 150,
|
||||||
|
value: 100,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
border: false,
|
||||||
|
margin: '0 0 0 20',
|
||||||
|
layout: {
|
||||||
|
type: 'vbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
value: 'a4',
|
||||||
|
name: 's_x',
|
||||||
|
fieldLabel: 'Meassured Start Offset Sx (mm)',
|
||||||
|
allowBlank: false,
|
||||||
|
labelWidth: 200,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
value: 'a4',
|
||||||
|
name: 'd_x',
|
||||||
|
fieldLabel: 'Meassured Length Dx (mm)',
|
||||||
|
allowBlank: false,
|
||||||
|
labelWidth: 200,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
value: 'a4',
|
||||||
|
name: 's_y',
|
||||||
|
fieldLabel: 'Meassured Start Offset Sy (mm)',
|
||||||
|
allowBlank: false,
|
||||||
|
labelWidth: 200,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
value: 'a4',
|
||||||
|
name: 'd_y',
|
||||||
|
fieldLabel: 'Meassured Length Dy (mm)',
|
||||||
|
allowBlank: false,
|
||||||
|
labelWidth: 200,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
})
|
167
docs/lto-barcode/page-layout.js
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
Ext.define('PageLayoutPanel', {
|
||||||
|
extend: 'Ext.panel.Panel',
|
||||||
|
alias: 'widget.pageLayoutPanel',
|
||||||
|
|
||||||
|
layout: {
|
||||||
|
type: 'hbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
|
||||||
|
getValues: function() {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
let values = {};
|
||||||
|
|
||||||
|
Ext.Array.each(me.query('[isFormField]'), function(field) {
|
||||||
|
if (field.isValid()) {
|
||||||
|
let data = field.getSubmitData();
|
||||||
|
Ext.Object.each(data, function(name, val) {
|
||||||
|
values[name] = val;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let paper_size = values.paper_size || 'a4';
|
||||||
|
|
||||||
|
let param = Ext.apply({}, paper_sizes[paper_size]);
|
||||||
|
if (param === undefined) {
|
||||||
|
throw `unknown paper size ${paper_size}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
param.paper_size = paper_size;
|
||||||
|
|
||||||
|
Ext.Object.each(values, function(name, val) {
|
||||||
|
let parsed = parseFloat(val, 10);
|
||||||
|
param[name] = isNaN(parsed) ? val : parsed;
|
||||||
|
});
|
||||||
|
|
||||||
|
return param;
|
||||||
|
},
|
||||||
|
|
||||||
|
controller: {
|
||||||
|
xclass: 'Ext.app.ViewController',
|
||||||
|
|
||||||
|
control: {
|
||||||
|
'paperSize': {
|
||||||
|
change: function(field, paper_size) {
|
||||||
|
let view = this.getView();
|
||||||
|
let defaults = paper_sizes[paper_size];
|
||||||
|
|
||||||
|
let names = [
|
||||||
|
'label_width',
|
||||||
|
'label_height',
|
||||||
|
'margin_left',
|
||||||
|
'margin_top',
|
||||||
|
'column_spacing',
|
||||||
|
'row_spacing',
|
||||||
|
];
|
||||||
|
for (i = 0; i < names.length; i++) {
|
||||||
|
let name = names[i];
|
||||||
|
let f = view.down(`field[name=${name}]`);
|
||||||
|
let v = defaults[name];
|
||||||
|
if (v != undefined) {
|
||||||
|
f.setValue(v);
|
||||||
|
f.setDisabled(defaults.fixed);
|
||||||
|
} else {
|
||||||
|
f.setDisabled(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
'field': {
|
||||||
|
change: function() {
|
||||||
|
let view = this.getView();
|
||||||
|
let param = view.getValues();
|
||||||
|
view.fireEvent("pagechanged", param);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
border: false,
|
||||||
|
layout: {
|
||||||
|
type: 'vbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'paperSize',
|
||||||
|
name: 'paper_size',
|
||||||
|
value: 'a4',
|
||||||
|
fieldLabel: 'Paper Size',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
name: 'label_width',
|
||||||
|
fieldLabel: 'Label width',
|
||||||
|
minValue: 70,
|
||||||
|
allowBlank: false,
|
||||||
|
value: 70,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
name: 'label_height',
|
||||||
|
fieldLabel: 'Label height',
|
||||||
|
minValue: 17,
|
||||||
|
allowBlank: false,
|
||||||
|
value: 17,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'checkbox',
|
||||||
|
name: 'label_borders',
|
||||||
|
fieldLabel: 'Label borders',
|
||||||
|
value: true,
|
||||||
|
inputValue: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
border: false,
|
||||||
|
margin: '0 0 0 10',
|
||||||
|
layout: {
|
||||||
|
type: 'vbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
name: 'margin_left',
|
||||||
|
fieldLabel: 'Left margin',
|
||||||
|
minValue: 0,
|
||||||
|
allowBlank: false,
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
name: 'margin_top',
|
||||||
|
fieldLabel: 'Top margin',
|
||||||
|
minValue: 0,
|
||||||
|
allowBlank: false,
|
||||||
|
value: 4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
name: 'column_spacing',
|
||||||
|
fieldLabel: 'Column spacing',
|
||||||
|
minValue: 0,
|
||||||
|
allowBlank: false,
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
name: 'row_spacing',
|
||||||
|
fieldLabel: 'Row spacing',
|
||||||
|
minValue: 0,
|
||||||
|
allowBlank: false,
|
||||||
|
value: 0,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
|
||||||
|
});
|
49
docs/lto-barcode/paper-size.js
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
let paper_sizes = {
|
||||||
|
a4: {
|
||||||
|
comment: 'A4 (plain)',
|
||||||
|
page_width: 210,
|
||||||
|
page_height: 297,
|
||||||
|
},
|
||||||
|
letter: {
|
||||||
|
comment: 'Letter (plain)',
|
||||||
|
page_width: 215.9,
|
||||||
|
page_height: 279.4,
|
||||||
|
},
|
||||||
|
avery3420: {
|
||||||
|
fixed: true,
|
||||||
|
comment: 'Avery Zweckform 3420',
|
||||||
|
page_width: 210,
|
||||||
|
page_height: 297,
|
||||||
|
label_width: 70,
|
||||||
|
label_height: 17,
|
||||||
|
margin_left: 0,
|
||||||
|
margin_top: 4,
|
||||||
|
column_spacing: 0,
|
||||||
|
row_spacing: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
function paper_size_combo_data() {
|
||||||
|
let data = [];
|
||||||
|
|
||||||
|
for (let [key, value] of Object.entries(paper_sizes)) {
|
||||||
|
data.push({ value: key, text: value.comment });
|
||||||
|
}
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ext.define('PaperSize', {
|
||||||
|
extend: 'Ext.form.field.ComboBox',
|
||||||
|
alias: 'widget.paperSize',
|
||||||
|
|
||||||
|
editable: false,
|
||||||
|
|
||||||
|
displayField: 'text',
|
||||||
|
valueField: 'value',
|
||||||
|
queryMode: 'local',
|
||||||
|
|
||||||
|
store: {
|
||||||
|
field: ['value', 'text'],
|
||||||
|
data: paper_size_combo_data(),
|
||||||
|
},
|
||||||
|
});
|
15
docs/lto-barcode/prefix-field.js
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
Ext.define('PrefixField', {
|
||||||
|
extend: 'Ext.form.field.Text',
|
||||||
|
alias: 'widget.prefixfield',
|
||||||
|
|
||||||
|
maxLength: 6,
|
||||||
|
allowBlank: false,
|
||||||
|
|
||||||
|
maskRe: /([A-Za-z]+)$/,
|
||||||
|
|
||||||
|
listeners: {
|
||||||
|
change: function(field) {
|
||||||
|
field.setValue(field.getValue().toUpperCase());
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
23
docs/lto-barcode/tape-type.js
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
Ext.define('LtoTapeType', {
|
||||||
|
extend: 'Ext.form.field.ComboBox',
|
||||||
|
alias: 'widget.ltoTapeType',
|
||||||
|
|
||||||
|
editable: false,
|
||||||
|
|
||||||
|
displayField: 'text',
|
||||||
|
valueField: 'value',
|
||||||
|
queryMode: 'local',
|
||||||
|
|
||||||
|
store: {
|
||||||
|
field: ['value', 'text'],
|
||||||
|
data: [
|
||||||
|
{ value: 'L8', text: "LTO-8" },
|
||||||
|
{ value: 'L7', text: "LTO-7" },
|
||||||
|
{ value: 'L6', text: "LTO-6" },
|
||||||
|
{ value: 'L5', text: "LTO-5" },
|
||||||
|
{ value: 'L4', text: "LTO-4" },
|
||||||
|
{ value: 'L3', text: "LTO-3" },
|
||||||
|
{ value: 'CU', text: "Cleaning Unit" },
|
||||||
|
],
|
||||||
|
},
|
||||||
|
});
|
@ -1,13 +1,184 @@
|
|||||||
Maintenance Tasks
|
Maintenance Tasks
|
||||||
=================
|
=================
|
||||||
|
|
||||||
|
.. _maintenance_pruning:
|
||||||
|
|
||||||
|
Pruning
|
||||||
|
-------
|
||||||
|
|
||||||
|
Prune lets you specify which backup snapshots you want to keep. The
|
||||||
|
following retention options are available:
|
||||||
|
|
||||||
|
``keep-last <N>``
|
||||||
|
Keep the last ``<N>`` backup snapshots.
|
||||||
|
|
||||||
|
``keep-hourly <N>``
|
||||||
|
Keep backups for the last ``<N>`` hours. If there is more than one
|
||||||
|
backup for a single hour, only the latest is kept.
|
||||||
|
|
||||||
|
``keep-daily <N>``
|
||||||
|
Keep backups for the last ``<N>`` days. If there is more than one
|
||||||
|
backup for a single day, only the latest is kept.
|
||||||
|
|
||||||
|
``keep-weekly <N>``
|
||||||
|
Keep backups for the last ``<N>`` weeks. If there is more than one
|
||||||
|
backup for a single week, only the latest is kept.
|
||||||
|
|
||||||
|
.. note:: Weeks start on Monday and end on Sunday. The software
|
||||||
|
uses the `ISO week date`_ system and handles weeks at
|
||||||
|
the end of the year correctly.
|
||||||
|
|
||||||
|
``keep-monthly <N>``
|
||||||
|
Keep backups for the last ``<N>`` months. If there is more than one
|
||||||
|
backup for a single month, only the latest is kept.
|
||||||
|
|
||||||
|
``keep-yearly <N>``
|
||||||
|
Keep backups for the last ``<N>`` years. If there is more than one
|
||||||
|
backup for a single year, only the latest is kept.
|
||||||
|
|
||||||
|
The retention options are processed in the order given above. Each option
|
||||||
|
only covers backups within its time period. The next option does not take care
|
||||||
|
of already covered backups. It will only consider older backups.
|
||||||
|
|
||||||
|
Unfinished and incomplete backups will be removed by the prune command unless
|
||||||
|
they are newer than the last successful backup. In this case, the last failed
|
||||||
|
backup is retained.
|
||||||
|
|
||||||
|
Prune Simulator
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
You can use the built-in `prune simulator <prune-simulator/index.html>`_
|
||||||
|
to explore the effect of different retetion options with various backup
|
||||||
|
schedules.
|
||||||
|
|
||||||
|
Manual Pruning
|
||||||
|
^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-datastore-content-prune-group.png
|
||||||
|
:target: _images/pbs-gui-datastore-content-prune-group.png
|
||||||
|
:align: right
|
||||||
|
:alt: Prune and garbage collection options
|
||||||
|
|
||||||
|
To access pruning functionality for a specific backup group, you can use the
|
||||||
|
prune command line option discussed in :ref:`backup-pruning`, or navigate to
|
||||||
|
the **Content** tab of the datastore and click the scissors icon in the
|
||||||
|
**Actions** column of the relevant backup group.
|
||||||
|
|
||||||
|
Prune Schedules
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
To prune on a datastore level, scheduling options can be found under the
|
||||||
|
**Prune & GC** tab of the datastore. Here you can set retention settings and
|
||||||
|
edit the interval at which pruning takes place.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-datastore-prunegc.png
|
||||||
|
:target: _images/pbs-gui-datastore-prunegc.png
|
||||||
|
:align: right
|
||||||
|
:alt: Prune and garbage collection options
|
||||||
|
|
||||||
|
|
||||||
|
Retention Settings Example
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
The backup frequency and retention of old backups may depend on how often data
|
||||||
|
changes, and how important an older state may be, in a specific work load.
|
||||||
|
When backups act as a company's document archive, there may also be legal
|
||||||
|
requirements for how long backup snapshots must be kept.
|
||||||
|
|
||||||
|
For this example, we assume that you are doing daily backups, have a retention
|
||||||
|
period of 10 years, and the period between backups stored gradually grows.
|
||||||
|
|
||||||
|
- **keep-last:** ``3`` - even if only daily backups, an admin may want to create
|
||||||
|
an extra one just before or after a big upgrade. Setting keep-last ensures
|
||||||
|
this.
|
||||||
|
|
||||||
|
- **keep-hourly:** not set - for daily backups this is not relevant. You cover
|
||||||
|
extra manual backups already, with keep-last.
|
||||||
|
|
||||||
|
- **keep-daily:** ``13`` - together with keep-last, which covers at least one
|
||||||
|
day, this ensures that you have at least two weeks of backups.
|
||||||
|
|
||||||
|
- **keep-weekly:** ``8`` - ensures that you have at least two full months of
|
||||||
|
weekly backups.
|
||||||
|
|
||||||
|
- **keep-monthly:** ``11`` - together with the previous keep settings, this
|
||||||
|
ensures that you have at least a year of monthly backups.
|
||||||
|
|
||||||
|
- **keep-yearly:** ``9`` - this is for the long term archive. As you covered the
|
||||||
|
current year with the previous options, you would set this to nine for the
|
||||||
|
remaining ones, giving you a total of at least 10 years of coverage.
|
||||||
|
|
||||||
|
We recommend that you use a higher retention period than is minimally required
|
||||||
|
by your environment; you can always reduce it if you find it is unnecessarily
|
||||||
|
high, but you cannot recreate backup snapshots from the past.
|
||||||
|
|
||||||
|
|
||||||
|
.. _maintenance_gc:
|
||||||
|
|
||||||
Garbage Collection
|
Garbage Collection
|
||||||
------------------
|
------------------
|
||||||
|
|
||||||
You can monitor and run :ref:`garbage collection <garbage-collection>` on the
|
You can monitor and run :ref:`garbage collection <garbage-collection>` on the
|
||||||
Proxmox Backup Server using the ``garbage-collection`` subcommand of
|
Proxmox Backup Server using the ``garbage-collection`` subcommand of
|
||||||
``proxmox-backup-manager``. You can use the ``start`` subcommand to manually start garbage
|
``proxmox-backup-manager``. You can use the ``start`` subcommand to manually
|
||||||
collection on an entire datastore and the ``status`` subcommand to see
|
start garbage collection on an entire datastore and the ``status`` subcommand to
|
||||||
attributes relating to the :ref:`garbage collection <garbage-collection>`.
|
see attributes relating to the :ref:`garbage collection <garbage-collection>`.
|
||||||
|
|
||||||
.. todo:: Add section on verification
|
This functionality can also be accessed in the GUI, by navigating to **Prune &
|
||||||
|
GC** from the top panel. From here, you can edit the schedule at which garbage
|
||||||
|
collection runs and manually start the operation.
|
||||||
|
|
||||||
|
|
||||||
|
.. _maintenance_verification:
|
||||||
|
|
||||||
|
Verification
|
||||||
|
------------
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-datastore-verifyjob-add.png
|
||||||
|
:target: _images/pbs-gui-datastore-verifyjob-add.png
|
||||||
|
:align: right
|
||||||
|
:alt: Adding a verify job
|
||||||
|
|
||||||
|
Proxmox Backup offers various verification options to ensure that backup data is
|
||||||
|
intact. Verification is generally carried out through the creation of verify
|
||||||
|
jobs. These are scheduled tasks that run verification at a given interval (see
|
||||||
|
:ref:`calendar-events`). With these, you can set whether already verified
|
||||||
|
snapshots are ignored, as well as set a time period, after which verified jobs
|
||||||
|
are checked again. The interface for creating verify jobs can be found under the
|
||||||
|
**Verify Jobs** tab of the datastore.
|
||||||
|
|
||||||
|
.. Note:: It is recommended that you reverify all backups at least monthly, even
|
||||||
|
if a previous verification was successful. This is becuase physical drives
|
||||||
|
are susceptible to damage over time, which can cause an old, working backup
|
||||||
|
to become corrupted in a process known as `bit rot/data degradation
|
||||||
|
<https://en.wikipedia.org/wiki/Data_degradation>`_. It is good practice to
|
||||||
|
have a regularly recurring (hourly/daily) verification job, which checks new
|
||||||
|
and expired backups, then another weekly/monthly job that will reverify
|
||||||
|
everything. This way, there will be no surprises when it comes to restoring
|
||||||
|
data.
|
||||||
|
|
||||||
|
Aside from using verify jobs, you can also run verification manually on entire
|
||||||
|
datastores, backup groups, or snapshots. To do this, navigate to the **Content**
|
||||||
|
tab of the datastore and either click *Verify All*, or select the *V.* icon from
|
||||||
|
the *Actions* column in the table.
|
||||||
|
|
||||||
|
.. _maintenance_notification:
|
||||||
|
|
||||||
|
Notifications
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Proxmox Backup Server can send you notification emails about automatically
|
||||||
|
scheduled verification, garbage-collection and synchronization tasks results.
|
||||||
|
|
||||||
|
By default, notifications are send to the email address configured for the
|
||||||
|
`root@pam` user. You can set that user for each datastore.
|
||||||
|
|
||||||
|
You can also change the level of notification received per task type, the
|
||||||
|
following options are available:
|
||||||
|
|
||||||
|
* Always: send a notification for any scheduled task, independent of the
|
||||||
|
outcome
|
||||||
|
|
||||||
|
* Errors: send a notification for any scheduled task resulting in an error
|
||||||
|
|
||||||
|
* Never: do not send any notification at all
|
||||||
|
@ -59,13 +59,13 @@ Sync Jobs
|
|||||||
:alt: Add a Sync Job
|
:alt: Add a Sync Job
|
||||||
|
|
||||||
Sync jobs are configured to pull the contents of a datastore on a **Remote** to
|
Sync jobs are configured to pull the contents of a datastore on a **Remote** to
|
||||||
a local datastore. You can manage sync jobs under **Configuration -> Sync Jobs**
|
a local datastore. You can manage sync jobs in the web interface, from the
|
||||||
in the web interface, or using the ``proxmox-backup-manager sync-job`` command.
|
**Sync Jobs** tab of the datastore which you'd like to set one up for, or using
|
||||||
The configuration information for sync jobs is stored at
|
the ``proxmox-backup-manager sync-job`` command. The configuration information
|
||||||
``/etc/proxmox-backup/sync.cfg``. To create a new sync job, click the add button
|
for sync jobs is stored at ``/etc/proxmox-backup/sync.cfg``. To create a new
|
||||||
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
|
sync job, click the add button in the GUI, or use the ``create`` subcommand.
|
||||||
either start it manually on the GUI or provide it with a schedule (see
|
After creating a sync job, you can either start it manually from the GUI or
|
||||||
:ref:`calendar-events`) to run regularly.
|
provide it with a schedule (see :ref:`calendar-events`) to run regularly.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -79,4 +79,17 @@ either start it manually on the GUI or provide it with a schedule (see
|
|||||||
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
|
└────────────┴───────┴────────┴──────────────┴───────────┴─────────┘
|
||||||
# proxmox-backup-manager sync-job remove pbs2-local
|
# proxmox-backup-manager sync-job remove pbs2-local
|
||||||
|
|
||||||
|
For setting up sync jobs, the configuring user needs the following permissions:
|
||||||
|
|
||||||
|
#. ``Remote.Read`` on the ``/remote/{remote}/{remote-store}`` path
|
||||||
|
#. at least ``Datastore.Backup`` on the local target datastore (``/datastore/{store}``)
|
||||||
|
|
||||||
|
If the ``remove-vanished`` option is set, ``Datastore.Prune`` is required on
|
||||||
|
the local datastore as well. If the ``owner`` option is not set (defaulting to
|
||||||
|
``root@pam``) or set to something other than the configuring user,
|
||||||
|
``Datastore.Modify`` is required as well.
|
||||||
|
|
||||||
|
.. note:: A sync job can only sync backup groups that the configured remote's
|
||||||
|
user/API token can read. If a remote is configured with a user/API token that
|
||||||
|
only has ``Datastore.Backup`` privileges, only the limited set of accessible
|
||||||
|
snapshots owned by that user/API token can be synced.
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
.. _sysadmin_network_configuration:
|
||||||
|
|
||||||
Network Management
|
Network Management
|
||||||
==================
|
==================
|
||||||
|
|
||||||
|
@ -26,11 +26,8 @@ update``.
|
|||||||
|
|
||||||
.. FIXME for 7.0: change security update suite to bullseye-security
|
.. FIXME for 7.0: change security update suite to bullseye-security
|
||||||
|
|
||||||
In addition, you need a package repository from Proxmox to get Proxmox Backup updates.
|
In addition, you need a package repository from Proxmox to get Proxmox Backup
|
||||||
|
updates.
|
||||||
During the Proxmox Backup beta phase, only one repository (pbstest) will be
|
|
||||||
available. Once released, an Enterprise repository for production use and a
|
|
||||||
no-subscription repository will be provided.
|
|
||||||
|
|
||||||
SecureApt
|
SecureApt
|
||||||
~~~~~~~~~
|
~~~~~~~~~
|
||||||
@ -72,49 +69,45 @@ Here, the output should be:
|
|||||||
|
|
||||||
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||||
|
|
||||||
.. comment
|
`Proxmox Backup`_ Enterprise Repository
|
||||||
`Proxmox Backup`_ Enterprise Repository
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
This will be the default, stable, and recommended repository. It is available for
|
This will be the default, stable, and recommended repository. It is available for
|
||||||
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
|
||||||
and is suitable for production use. The ``pbs-enterprise`` repository is
|
and is suitable for production use. The ``pbs-enterprise`` repository is
|
||||||
enabled by default:
|
enabled by default:
|
||||||
|
|
||||||
.. note:: During the Proxmox Backup beta phase only one repository (pbstest)
|
.. code-block:: sources.list
|
||||||
will be available.
|
|
||||||
|
|
||||||
.. code-block:: sources.list
|
|
||||||
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
|
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
|
||||||
|
|
||||||
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
||||||
|
|
||||||
|
|
||||||
To never miss important security fixes, the superuser (``root@pam`` user) is
|
To never miss important security fixes, the superuser (``root@pam`` user) is
|
||||||
notified via email about new packages as soon as they are available. The
|
notified via email about new packages as soon as they are available. The
|
||||||
change-log and details of each package can be viewed in the GUI (if available).
|
change-log and details of each package can be viewed in the GUI (if available).
|
||||||
|
|
||||||
Please note that you need a valid subscription key to access this
|
Please note that you need a valid subscription key to access this
|
||||||
repository. More information regarding subscription levels and pricing can be
|
repository. More information regarding subscription levels and pricing can be
|
||||||
found at https://www.proxmox.com/en/proxmox-backup/pricing.
|
found at https://www.proxmox.com/en/proxmox-backup-server/pricing
|
||||||
|
|
||||||
.. note:: You can disable this repository by commenting out the above
|
.. note:: You can disable this repository by commenting out the above line
|
||||||
line using a `#` (at the start of the line). This prevents error
|
using a `#` (at the start of the line). This prevents error messages if you do
|
||||||
messages if you do not have a subscription key. Please configure the
|
not have a subscription key. Please configure the ``pbs-no-subscription``
|
||||||
``pbs-no-subscription`` repository in that case.
|
repository in that case.
|
||||||
|
|
||||||
|
|
||||||
`Proxmox Backup`_ No-Subscription Repository
|
`Proxmox Backup`_ No-Subscription Repository
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
As the name suggests, you do not need a subscription key to access
|
As the name suggests, you do not need a subscription key to access
|
||||||
this repository. It can be used for testing and non-production
|
this repository. It can be used for testing and non-production
|
||||||
use. It is not recommended to use it on production servers, because these
|
use. It is not recommended to use it on production servers, because these
|
||||||
packages are not always heavily tested and validated.
|
packages are not always heavily tested and validated.
|
||||||
|
|
||||||
We recommend to configure this repository in ``/etc/apt/sources.list``.
|
We recommend to configure this repository in ``/etc/apt/sources.list``.
|
||||||
|
|
||||||
.. code-block:: sources.list
|
.. code-block:: sources.list
|
||||||
:caption: File: ``/etc/apt/sources.list``
|
:caption: File: ``/etc/apt/sources.list``
|
||||||
|
|
||||||
deb http://ftp.debian.org/debian buster main contrib
|
deb http://ftp.debian.org/debian buster main contrib
|
||||||
@ -128,12 +121,11 @@ Here, the output should be:
|
|||||||
deb http://security.debian.org/debian-security buster/updates main contrib
|
deb http://security.debian.org/debian-security buster/updates main contrib
|
||||||
|
|
||||||
|
|
||||||
`Proxmox Backup`_ Beta Repository
|
`Proxmox Backup`_ Test Repository
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
During the public beta, there is a repository called ``pbstest``. This one
|
This repository contains the latest packages and is heavily used by developers
|
||||||
contains the latest packages and is heavily used by developers to test new
|
to test new features.
|
||||||
features.
|
|
||||||
|
|
||||||
.. .. warning:: the ``pbstest`` repository should (as the name implies)
|
.. .. warning:: the ``pbstest`` repository should (as the name implies)
|
||||||
only be used to test new features or bug fixes.
|
only be used to test new features or bug fixes.
|
||||||
@ -145,7 +137,3 @@ You can access this repository by adding the following line to
|
|||||||
:caption: sources.list entry for ``pbstest``
|
:caption: sources.list entry for ``pbstest``
|
||||||
|
|
||||||
deb http://download.proxmox.com/debian/pbs buster pbstest
|
deb http://download.proxmox.com/debian/pbs buster pbstest
|
||||||
|
|
||||||
If you installed Proxmox Backup Server from the official beta ISO, you should
|
|
||||||
have this repository already configured in
|
|
||||||
``/etc/apt/sources.list.d/pbstest-beta.list``
|
|
||||||
|
6
docs/pmtx/description.rst
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
Description
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
The ``pmtx`` command controls SCSI media changer devices (tape
|
||||||
|
autoloader).
|
||||||
|
|
28
docs/pmtx/man1.rst
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
==========================
|
||||||
|
pmtx
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../epilog.rst
|
||||||
|
|
||||||
|
-------------------------------------------------------------
|
||||||
|
Control SCSI media changer devices (tape autoloaders)
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 1
|
||||||
|
|
||||||
|
|
||||||
|
Synopsis
|
||||||
|
==========
|
||||||
|
|
||||||
|
.. include:: synopsis.rst
|
||||||
|
|
||||||
|
Description
|
||||||
|
============
|
||||||
|
|
||||||
|
.. include:: description.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../pbs-copyright.rst
|
||||||
|
|
@ -5,7 +5,7 @@ proxmox-backup-client
|
|||||||
.. include:: ../epilog.rst
|
.. include:: ../epilog.rst
|
||||||
|
|
||||||
-------------------------------------------------------------
|
-------------------------------------------------------------
|
||||||
Command line toot for Backup and Restore
|
Command line tool for Backup and Restore
|
||||||
-------------------------------------------------------------
|
-------------------------------------------------------------
|
||||||
|
|
||||||
:Author: |AUTHOR|
|
:Author: |AUTHOR|
|
||||||
|
BIN
docs/prune-simulator/clear-trigger.png
Normal file
After Width: | Height: | Size: 11 KiB |
102
docs/prune-simulator/documentation.html
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<style>
|
||||||
|
/* similar to sphinx alabaster theme ones */
|
||||||
|
body {
|
||||||
|
max-width: 90ch;
|
||||||
|
margin-left: 2ch;
|
||||||
|
margin-right: 2ch;
|
||||||
|
line-height: 1.4em;
|
||||||
|
/* avoid the very high contrast of black on white, tone it down a bit */
|
||||||
|
color: #3E4349;
|
||||||
|
hyphens: auto;
|
||||||
|
text-align: left;
|
||||||
|
font-family: 'Open Sans', sans-serif;
|
||||||
|
font-size: 17px;
|
||||||
|
}
|
||||||
|
h1, h2, h3 {
|
||||||
|
font-family: Lato, sans-serif;
|
||||||
|
font-size: 150%;
|
||||||
|
line-height:1.2
|
||||||
|
}
|
||||||
|
tt, code {
|
||||||
|
background-color: #ecf0f3;
|
||||||
|
color: #222;
|
||||||
|
}
|
||||||
|
pre, tt, code {
|
||||||
|
font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
|
||||||
|
font-size: 0.9em;
|
||||||
|
}
|
||||||
|
div.note {
|
||||||
|
background-color: #EEE;
|
||||||
|
border: 1px solid #CCC;
|
||||||
|
margin: 10px 0;
|
||||||
|
padding: 0px 20px;
|
||||||
|
}
|
||||||
|
p.note-title {
|
||||||
|
font-weight: bolder;
|
||||||
|
padding: 0;
|
||||||
|
margin: 10px 0 0 0;
|
||||||
|
}
|
||||||
|
div.note > p.last {
|
||||||
|
margin: 5px 0 10px 0;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<p>A simulator to experiment with different backup schedules and prune
|
||||||
|
options.</p>
|
||||||
|
|
||||||
|
<h3>Schedule</h3>
|
||||||
|
<p>Select weekdays with the combobox and input hour and minute
|
||||||
|
specification separated by a colon, i.e. <code>HOUR:MINUTE</code>. Each of
|
||||||
|
<code>HOUR</code> and <code>MINUTE</code> can be either a single value or
|
||||||
|
one of the following:</p>
|
||||||
|
<ul class="simple">
|
||||||
|
<li>a comma-separated list: e.g., <code>01,02,03</code></li>
|
||||||
|
<li>a range: e.g., <code>01..10</code></li>
|
||||||
|
<li>a repetition: e.g, <code>05/10</code> (means starting at <code>5</code> every <code>10</code>)</li>
|
||||||
|
<li>a combination of the above: e.g., <code>01,05..10,12/02</code></li>
|
||||||
|
<li>a <code>*</code> for every possible value</li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<h3>Pruning</h3>
|
||||||
|
<p>Prune lets you systematically delete older backups, retaining backups for
|
||||||
|
the last given number of time intervals. The following retention options are
|
||||||
|
available:</p>
|
||||||
|
<dl class="docutils">
|
||||||
|
<dt><code class="docutils literal notranslate"><span class="pre">keep-last</span> <span class="pre"><N></span></code></dt>
|
||||||
|
<dd>Keep the last <code class="docutils literal notranslate"><span class="pre"><N></span></code> backup snapshots.</dd>
|
||||||
|
<dt><code class="docutils literal notranslate"><span class="pre">keep-hourly</span> <span class="pre"><N></span></code></dt>
|
||||||
|
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre"><N></span></code> hours. If there is more than one
|
||||||
|
backup for a single hour, only the latest is kept.</dd>
|
||||||
|
<dt><code class="docutils literal notranslate"><span class="pre">keep-daily</span> <span class="pre"><N></span></code></dt>
|
||||||
|
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre"><N></span></code> days. If there is more than one
|
||||||
|
backup for a single day, only the latest is kept.</dd>
|
||||||
|
<dt><code class="docutils literal notranslate"><span class="pre">keep-weekly</span> <span class="pre"><N></span></code></dt>
|
||||||
|
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre"><N></span></code> weeks. If there is more than one
|
||||||
|
backup for a single week, only the latest is kept.
|
||||||
|
<div class="last admonition note">
|
||||||
|
<p class="note-title">Note:</p>
|
||||||
|
<p class="last">Weeks start on Monday and end on Sunday. The software
|
||||||
|
uses the <a class="reference external" href="https://en.wikipedia.org/wiki/ISO_week_date">ISO week date</a> system and handles weeks at
|
||||||
|
the end of the year correctly.</p>
|
||||||
|
</div>
|
||||||
|
</dd>
|
||||||
|
<dt><code class="docutils literal notranslate"><span class="pre">keep-monthly</span> <span class="pre"><N></span></code></dt>
|
||||||
|
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre"><N></span></code> months. If there is more than one
|
||||||
|
backup for a single month, only the latest is kept.</dd>
|
||||||
|
<dt><code class="docutils literal notranslate"><span class="pre">keep-yearly</span> <span class="pre"><N></span></code></dt>
|
||||||
|
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre"><N></span></code> years. If there is more than one
|
||||||
|
backup for a single year, only the latest is kept.</dd>
|
||||||
|
</dl>
|
||||||
|
<p>The retention options are processed in the order given above. Each option
|
||||||
|
only covers backups within its time period. The next option does not take care
|
||||||
|
of already covered backups. It will only consider older backups.</p>
|
||||||
|
<p>For example, in a week covered by <code>keep-weekly</code>, one backup is
|
||||||
|
kept while all others are removed; <code>keep-monthly</code> then does not
|
||||||
|
consider backups from that week anymore, even if part of the week is part of
|
||||||
|
an earlier month.</p>
|
||||||
|
</body>
|
||||||
|
</html>
|
45
docs/prune-simulator/index.html
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no">
|
||||||
|
<title>PBS Prune Simulator</title>
|
||||||
|
|
||||||
|
<link rel="stylesheet" type="text/css" href="extjs/theme-crisp/resources/theme-crisp-all.css">
|
||||||
|
<style>
|
||||||
|
.cal {
|
||||||
|
margin: 5px;
|
||||||
|
}
|
||||||
|
.cal-day {
|
||||||
|
vertical-align: top;
|
||||||
|
width: 150px;
|
||||||
|
height: 75px; /* this is like min-height when used in tables */
|
||||||
|
border: #939393 1px solid;
|
||||||
|
color: #454545;
|
||||||
|
}
|
||||||
|
.cal-day-date {
|
||||||
|
border-bottom: #444 1px solid;
|
||||||
|
color: #000;
|
||||||
|
}
|
||||||
|
.strikethrough {
|
||||||
|
text-decoration: line-through;
|
||||||
|
}
|
||||||
|
.black {
|
||||||
|
color: #000;
|
||||||
|
}
|
||||||
|
.sun {
|
||||||
|
background-color: #ededed;
|
||||||
|
}
|
||||||
|
.first-of-month {
|
||||||
|
border-right: dashed black 4px;
|
||||||
|
}
|
||||||
|
.clear-trigger {
|
||||||
|
background-image: url(./clear-trigger.png);
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
|
||||||
|
<script type="text/javascript" src="extjs/ext-all.js"></script>
|
||||||
|
<script type="text/javascript" src="prune-simulator.js"></script>
|
||||||
|
</head>
|
||||||
|
<body></body>
|
||||||
|
</html>
|
788
docs/prune-simulator/prune-simulator.js
Normal file
@ -0,0 +1,788 @@
|
|||||||
|
// FIXME: HACK! Makes scrolling in number spinner work again. fixed in ExtJS >= 6.1
|
||||||
|
if (Ext.isFirefox) {
|
||||||
|
Ext.$eventNameMap.DOMMouseScroll = 'DOMMouseScroll';
|
||||||
|
}
|
||||||
|
|
||||||
|
Ext.onReady(function() {
|
||||||
|
const NOW = new Date();
|
||||||
|
const COLORS = {
|
||||||
|
'keep-last': 'orange',
|
||||||
|
'keep-hourly': 'purple',
|
||||||
|
'keep-daily': 'yellow',
|
||||||
|
'keep-weekly': 'green',
|
||||||
|
'keep-monthly': 'blue',
|
||||||
|
'keep-yearly': 'red',
|
||||||
|
'all zero': 'white',
|
||||||
|
};
|
||||||
|
const TEXT_COLORS = {
|
||||||
|
'keep-last': 'black',
|
||||||
|
'keep-hourly': 'white',
|
||||||
|
'keep-daily': 'black',
|
||||||
|
'keep-weekly': 'white',
|
||||||
|
'keep-monthly': 'white',
|
||||||
|
'keep-yearly': 'white',
|
||||||
|
'all zero': 'black',
|
||||||
|
};
|
||||||
|
|
||||||
|
Ext.define('PBS.prunesimulator.Documentation', {
|
||||||
|
extend: 'Ext.Panel',
|
||||||
|
alias: 'widget.prunesimulatorDocumentation',
|
||||||
|
|
||||||
|
html: '<iframe style="width:100%;height:100%;border:0px;" src="./documentation.html"/>',
|
||||||
|
});
|
||||||
|
|
||||||
|
Ext.define('PBS.prunesimulator.CalendarEvent', {
|
||||||
|
extend: 'Ext.form.field.ComboBox',
|
||||||
|
alias: 'widget.prunesimulatorCalendarEvent',
|
||||||
|
|
||||||
|
editable: true,
|
||||||
|
|
||||||
|
displayField: 'text',
|
||||||
|
valueField: 'value',
|
||||||
|
queryMode: 'local',
|
||||||
|
|
||||||
|
store: {
|
||||||
|
field: ['value', 'text'],
|
||||||
|
data: [
|
||||||
|
{ value: '0/2:00', text: "Every two hours" },
|
||||||
|
{ value: '0/6:00', text: "Every six hours" },
|
||||||
|
{ value: '2,22:30', text: "At 02:30 and 22:30" },
|
||||||
|
{ value: '00:00', text: "At 00:00" },
|
||||||
|
{ value: '08..17:00/30', text: "From 08:00 to 17:30 every 30 minutes" },
|
||||||
|
{ value: 'HOUR:MINUTE', text: "Custom schedule" },
|
||||||
|
],
|
||||||
|
},
|
||||||
|
|
||||||
|
tpl: [
|
||||||
|
'<ul class="x-list-plain"><tpl for=".">',
|
||||||
|
'<li role="option" class="x-boundlist-item">{text}</li>',
|
||||||
|
'</tpl></ul>',
|
||||||
|
],
|
||||||
|
|
||||||
|
displayTpl: [
|
||||||
|
'<tpl for=".">',
|
||||||
|
'{value}',
|
||||||
|
'</tpl>',
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
Ext.define('PBS.prunesimulator.DayOfWeekSelector', {
|
||||||
|
extend: 'Ext.form.field.ComboBox',
|
||||||
|
alias: 'widget.prunesimulatorDayOfWeekSelector',
|
||||||
|
|
||||||
|
editable: false,
|
||||||
|
|
||||||
|
displayField: 'text',
|
||||||
|
valueField: 'value',
|
||||||
|
queryMode: 'local',
|
||||||
|
|
||||||
|
store: {
|
||||||
|
field: ['value', 'text'],
|
||||||
|
data: [
|
||||||
|
{ value: 'mon', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[1]) },
|
||||||
|
{ value: 'tue', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[2]) },
|
||||||
|
{ value: 'wed', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[3]) },
|
||||||
|
{ value: 'thu', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[4]) },
|
||||||
|
{ value: 'fri', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[5]) },
|
||||||
|
{ value: 'sat', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[6]) },
|
||||||
|
{ value: 'sun', text: Ext.util.Format.htmlDecode(Ext.Date.dayNames[0]) },
|
||||||
|
],
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
Ext.define('pbs-prune-list', {
|
||||||
|
extend: 'Ext.data.Model',
|
||||||
|
fields: [
|
||||||
|
{
|
||||||
|
name: 'backuptime',
|
||||||
|
type: 'date',
|
||||||
|
dateFormat: 'timestamp',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'mark',
|
||||||
|
type: 'string',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'keepName',
|
||||||
|
type: 'string',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
Ext.define('PBS.prunesimulator.PruneList', {
|
||||||
|
extend: 'Ext.panel.Panel',
|
||||||
|
alias: 'widget.prunesimulatorPruneList',
|
||||||
|
|
||||||
|
initComponent: function() {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
if (!me.store) {
|
||||||
|
throw "no store specified";
|
||||||
|
}
|
||||||
|
|
||||||
|
me.items = [
|
||||||
|
{
|
||||||
|
xtype: 'grid',
|
||||||
|
store: me.store,
|
||||||
|
border: false,
|
||||||
|
columns: [
|
||||||
|
{
|
||||||
|
header: 'Backup Time',
|
||||||
|
dataIndex: 'backuptime',
|
||||||
|
renderer: function(value, metaData, record) {
|
||||||
|
let text = Ext.Date.format(value, 'Y-m-d H:i:s');
|
||||||
|
if (record.data.mark === 'keep') {
|
||||||
|
if (me.useColors) {
|
||||||
|
let bgColor = COLORS[record.data.keepName];
|
||||||
|
let textColor = TEXT_COLORS[record.data.keepName];
|
||||||
|
return '<div style="background-color: ' + bgColor + '; ' +
|
||||||
|
'color: ' + textColor + ';">' + text + '</div>';
|
||||||
|
} else {
|
||||||
|
return text;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return '<div style="text-decoration: line-through;">' + text + '</div>';
|
||||||
|
}
|
||||||
|
},
|
||||||
|
flex: 1,
|
||||||
|
sortable: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: 'Keep (reason)',
|
||||||
|
dataIndex: 'mark',
|
||||||
|
renderer: function(value, metaData, record) {
|
||||||
|
if (record.data.mark === 'keep') {
|
||||||
|
if (record.data.keepCount) {
|
||||||
|
return 'keep (' + record.data.keepName +
|
||||||
|
': ' + record.data.keepCount + ')';
|
||||||
|
} else {
|
||||||
|
return 'keep (' + record.data.keepName + ')';
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
width: 200,
|
||||||
|
sortable: false,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
me.callParent();
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
Ext.define('PBS.prunesimulator.WeekTable', {
|
||||||
|
extend: 'Ext.panel.Panel',
|
||||||
|
alias: 'widget.prunesimulatorWeekTable',
|
||||||
|
|
||||||
|
reload: function() {
|
||||||
|
let me = this;
|
||||||
|
let backups = me.store.data.items;
|
||||||
|
|
||||||
|
let html = '<table class="cal">';
|
||||||
|
|
||||||
|
let now = new Date(NOW.getTime());
|
||||||
|
let skip = 7 - parseInt(Ext.Date.format(now, 'N'), 10);
|
||||||
|
let tableStartDate = Ext.Date.add(now, Ext.Date.DAY, skip);
|
||||||
|
|
||||||
|
let bIndex = 0;
|
||||||
|
|
||||||
|
for (let i = 0; bIndex < backups.length; i++) {
|
||||||
|
html += '<tr>';
|
||||||
|
|
||||||
|
for (let j = 0; j < 7; j++) {
|
||||||
|
let date = Ext.Date.subtract(tableStartDate, Ext.Date.DAY, j + 7 * i);
|
||||||
|
let currentDay = Ext.Date.format(date, 'd/m/Y');
|
||||||
|
|
||||||
|
let dayOfWeekCls = Ext.Date.format(date, 'D').toLowerCase();
|
||||||
|
let firstOfMonthCls = Ext.Date.format(date, 'd') === '01'
|
||||||
|
? 'first-of-month'
|
||||||
|
: '';
|
||||||
|
html += `<td class="cal-day ${dayOfWeekCls} ${firstOfMonthCls}">`;
|
||||||
|
|
||||||
|
const isBackupOnDay = function(backup, day) {
|
||||||
|
return backup && Ext.Date.format(backup.data.backuptime, 'd/m/Y') === day;
|
||||||
|
};
|
||||||
|
|
||||||
|
let backup = backups[bIndex];
|
||||||
|
|
||||||
|
html += '<table><tr>';
|
||||||
|
html += `<th class="cal-day-date">${Ext.Date.format(date, 'D, d M Y')}</th>`;
|
||||||
|
|
||||||
|
while (isBackupOnDay(backup, currentDay)) {
|
||||||
|
html += '<tr><td>';
|
||||||
|
|
||||||
|
let text = Ext.Date.format(backup.data.backuptime, 'H:i');
|
||||||
|
if (backup.data.mark === 'remove') {
|
||||||
|
html += `<span class="strikethrough">${text}</span>`;
|
||||||
|
} else {
|
||||||
|
if (backup.data.keepCount) {
|
||||||
|
text += ` (${backup.data.keepName} ${backup.data.keepCount})`;
|
||||||
|
} else {
|
||||||
|
text += ` (${backup.data.keepName})`;
|
||||||
|
}
|
||||||
|
if (me.useColors) {
|
||||||
|
let bgColor = COLORS[backup.data.keepName];
|
||||||
|
let textColor = TEXT_COLORS[backup.data.keepName];
|
||||||
|
html += `<span style="background-color: ${bgColor};
|
||||||
|
color: ${textColor};">${text}</span>`;
|
||||||
|
} else {
|
||||||
|
html += `<span class="black">${text}</span>`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
html += '</td></tr>';
|
||||||
|
backup = backups[++bIndex];
|
||||||
|
}
|
||||||
|
html += '</table>';
|
||||||
|
html += '</div>';
|
||||||
|
html += '</td>';
|
||||||
|
}
|
||||||
|
|
||||||
|
html += '</tr>';
|
||||||
|
}
|
||||||
|
|
||||||
|
me.setHtml(html);
|
||||||
|
},
|
||||||
|
|
||||||
|
initComponent: function() {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
if (!me.store) {
|
||||||
|
throw "no store specified";
|
||||||
|
}
|
||||||
|
|
||||||
|
let reload = function() {
|
||||||
|
me.reload();
|
||||||
|
};
|
||||||
|
|
||||||
|
me.store.on("datachanged", reload);
|
||||||
|
|
||||||
|
me.callParent();
|
||||||
|
|
||||||
|
me.reload();
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
Ext.define('PBS.PruneSimulatorKeepInput', {
|
||||||
|
extend: 'Ext.form.field.Number',
|
||||||
|
alias: 'widget.prunesimulatorKeepInput',
|
||||||
|
|
||||||
|
allowBlank: true,
|
||||||
|
fieldGroup: 'keep',
|
||||||
|
minValue: 1,
|
||||||
|
|
||||||
|
listeners: {
|
||||||
|
afterrender: function(field) {
|
||||||
|
this.triggers.clear.setVisible(field.value !== null);
|
||||||
|
},
|
||||||
|
change: function(field, newValue, oldValue) {
|
||||||
|
this.triggers.clear.setVisible(newValue !== null);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
triggers: {
|
||||||
|
clear: {
|
||||||
|
cls: 'clear-trigger',
|
||||||
|
weight: -1,
|
||||||
|
handler: function() {
|
||||||
|
this.triggers.clear.setVisible(false);
|
||||||
|
this.setValue(null);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
Ext.define('PBS.PruneSimulatorPanel', {
|
||||||
|
extend: 'Ext.panel.Panel',
|
||||||
|
alias: 'widget.prunesimulatorPanel',
|
||||||
|
|
||||||
|
viewModel: {
|
||||||
|
},
|
||||||
|
|
||||||
|
getValues: function() {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
let values = {};
|
||||||
|
|
||||||
|
Ext.Array.each(me.query('[isFormField]'), function(field) {
|
||||||
|
let data = field.getSubmitData();
|
||||||
|
Ext.Object.each(data, function(name, val) {
|
||||||
|
values[name] = val;
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
return values;
|
||||||
|
},
|
||||||
|
|
||||||
|
controller: {
|
||||||
|
xclass: 'Ext.app.ViewController',
|
||||||
|
|
||||||
|
init: function(view) {
|
||||||
|
this.reloadFull(); // initial load
|
||||||
|
this.switchColor(true);
|
||||||
|
},
|
||||||
|
|
||||||
|
control: {
|
||||||
|
'field[fieldGroup=keep]': { change: 'reloadPrune' },
|
||||||
|
},
|
||||||
|
|
||||||
|
reloadFull: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
|
||||||
|
let params = view.getValues();
|
||||||
|
|
||||||
|
let [hourSpec, minuteSpec] = params['schedule-time'].split(':');
|
||||||
|
|
||||||
|
if (!hourSpec || !minuteSpec) {
|
||||||
|
Ext.Msg.alert('Error', 'Invalid schedule');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let matchTimeSpec = function(timeSpec, rangeMin, rangeMax) {
|
||||||
|
let specValues = timeSpec.split(',');
|
||||||
|
let matches = {};
|
||||||
|
|
||||||
|
let assertValid = function(value) {
|
||||||
|
let num = Number(value);
|
||||||
|
if (isNaN(num)) {
|
||||||
|
throw value + " is not an integer";
|
||||||
|
} else if (value < rangeMin || value > rangeMax) {
|
||||||
|
throw "number '" + value + "' is not in the range '" + rangeMin + ".." + rangeMax + "'";
|
||||||
|
}
|
||||||
|
return num;
|
||||||
|
};
|
||||||
|
|
||||||
|
specValues.forEach(function(value) {
|
||||||
|
if (value.includes('..')) {
|
||||||
|
let [start, end] = value.split('..');
|
||||||
|
start = assertValid(start);
|
||||||
|
end = assertValid(end);
|
||||||
|
if (start > end) {
|
||||||
|
throw "interval start is bigger then interval end '" + start + " > " + end + "'";
|
||||||
|
}
|
||||||
|
for (let i = start; i <= end; i++) {
|
||||||
|
matches[i] = 1;
|
||||||
|
}
|
||||||
|
} else if (value.includes('/')) {
|
||||||
|
let [start, step] = value.split('/');
|
||||||
|
start = assertValid(start);
|
||||||
|
step = assertValid(step);
|
||||||
|
for (let i = start; i <= rangeMax; i += step) {
|
||||||
|
matches[i] = 1;
|
||||||
|
}
|
||||||
|
} else if (value === '*') {
|
||||||
|
for (let i = rangeMin; i <= rangeMax; i++) {
|
||||||
|
matches[i] = 1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
value = assertValid(value);
|
||||||
|
matches[value] = 1;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return Object.keys(matches);
|
||||||
|
};
|
||||||
|
|
||||||
|
let hours, minutes;
|
||||||
|
|
||||||
|
try {
|
||||||
|
hours = matchTimeSpec(hourSpec, 0, 23);
|
||||||
|
minutes = matchTimeSpec(minuteSpec, 0, 59);
|
||||||
|
} catch (err) {
|
||||||
|
Ext.Msg.alert('Error', err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let backups = me.populateFromSchedule(
|
||||||
|
params['schedule-weekdays'],
|
||||||
|
hours,
|
||||||
|
minutes,
|
||||||
|
params.numberOfWeeks,
|
||||||
|
);
|
||||||
|
|
||||||
|
me.pruneSelect(backups, params);
|
||||||
|
|
||||||
|
view.pruneStore.setData(backups);
|
||||||
|
},
|
||||||
|
|
||||||
|
reloadPrune: function() {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
|
||||||
|
let params = view.getValues();
|
||||||
|
|
||||||
|
let backups = [];
|
||||||
|
view.pruneStore.getData().items.forEach(function(item) {
|
||||||
|
backups.push({
|
||||||
|
backuptime: item.data.backuptime,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
me.pruneSelect(backups, params);
|
||||||
|
|
||||||
|
view.pruneStore.setData(backups);
|
||||||
|
},
|
||||||
|
|
||||||
|
// backups are sorted descending by date
|
||||||
|
populateFromSchedule: function(weekdays, hours, minutes, weekCount) {
|
||||||
|
let weekdayFlags = [
|
||||||
|
weekdays.includes('sun'),
|
||||||
|
weekdays.includes('mon'),
|
||||||
|
weekdays.includes('tue'),
|
||||||
|
weekdays.includes('wed'),
|
||||||
|
weekdays.includes('thu'),
|
||||||
|
weekdays.includes('fri'),
|
||||||
|
weekdays.includes('sat'),
|
||||||
|
];
|
||||||
|
|
||||||
|
let todaysDate = new Date(NOW.getTime());
|
||||||
|
|
||||||
|
let timesOnSingleDay = [];
|
||||||
|
|
||||||
|
hours.forEach(function(hour) {
|
||||||
|
minutes.forEach(function(minute) {
|
||||||
|
todaysDate.setHours(hour);
|
||||||
|
todaysDate.setMinutes(minute);
|
||||||
|
timesOnSingleDay.push(todaysDate.getTime());
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// sort recent times first, backups array below is ordered now -> past
|
||||||
|
timesOnSingleDay.sort((a, b) => b - a);
|
||||||
|
|
||||||
|
let backups = [];
|
||||||
|
|
||||||
|
for (let i = 0; i < 7 * weekCount; i++) {
|
||||||
|
let daysDate = Ext.Date.subtract(todaysDate, Ext.Date.DAY, i);
|
||||||
|
let weekday = parseInt(Ext.Date.format(daysDate, 'w'), 10);
|
||||||
|
if (weekdayFlags[weekday]) {
|
||||||
|
timesOnSingleDay.forEach(function(time) {
|
||||||
|
backups.push({
|
||||||
|
backuptime: Ext.Date.subtract(new Date(time), Ext.Date.DAY, i),
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return backups;
|
||||||
|
},
|
||||||
|
|
||||||
|
pruneMark: function(backups, keepCount, keepName, idFunc) {
|
||||||
|
if (!keepCount) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let alreadyIncluded = {};
|
||||||
|
let newlyIncluded = {};
|
||||||
|
let newlyIncludedCount = 0;
|
||||||
|
|
||||||
|
let finished = false;
|
||||||
|
|
||||||
|
backups.forEach(function(backup) {
|
||||||
|
let mark = backup.mark;
|
||||||
|
if (mark && mark === 'keep') {
|
||||||
|
let id = idFunc(backup);
|
||||||
|
alreadyIncluded[id] = true;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
backups.forEach(function(backup) {
|
||||||
|
let mark = backup.mark;
|
||||||
|
let id = idFunc(backup);
|
||||||
|
|
||||||
|
if (finished || alreadyIncluded[id] || mark) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!newlyIncluded[id]) {
|
||||||
|
if (newlyIncludedCount >= keepCount) {
|
||||||
|
finished = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
newlyIncluded[id] = true;
|
||||||
|
newlyIncludedCount++;
|
||||||
|
backup.mark = 'keep';
|
||||||
|
backup.keepName = keepName;
|
||||||
|
backup.keepCount = newlyIncludedCount;
|
||||||
|
} else {
|
||||||
|
backup.mark = 'remove';
|
||||||
|
}
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
// backups need to be sorted descending by date
|
||||||
|
pruneSelect: function(backups, keepParams) {
|
||||||
|
let me = this;
|
||||||
|
|
||||||
|
if (Number(keepParams['keep-last']) +
|
||||||
|
Number(keepParams['keep-hourly']) +
|
||||||
|
Number(keepParams['keep-daily']) +
|
||||||
|
Number(keepParams['keep-weekly']) +
|
||||||
|
Number(keepParams['keep-monthly']) +
|
||||||
|
Number(keepParams['keep-yearly']) === 0) {
|
||||||
|
backups.forEach(function(backup) {
|
||||||
|
backup.mark = 'keep';
|
||||||
|
backup.keepName = 'keep-all';
|
||||||
|
});
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
me.pruneMark(backups, keepParams['keep-last'], 'keep-last', function(backup) {
|
||||||
|
return backup.backuptime;
|
||||||
|
});
|
||||||
|
me.pruneMark(backups, keepParams['keep-hourly'], 'keep-hourly', function(backup) {
|
||||||
|
return Ext.Date.format(backup.backuptime, 'H/d/m/Y');
|
||||||
|
});
|
||||||
|
me.pruneMark(backups, keepParams['keep-daily'], 'keep-daily', function(backup) {
|
||||||
|
return Ext.Date.format(backup.backuptime, 'd/m/Y');
|
||||||
|
});
|
||||||
|
me.pruneMark(backups, keepParams['keep-weekly'], 'keep-weekly', function(backup) {
|
||||||
|
// ISO-8601 week and week-based year
|
||||||
|
return Ext.Date.format(backup.backuptime, 'W/o');
|
||||||
|
});
|
||||||
|
me.pruneMark(backups, keepParams['keep-monthly'], 'keep-monthly', function(backup) {
|
||||||
|
return Ext.Date.format(backup.backuptime, 'm/Y');
|
||||||
|
});
|
||||||
|
me.pruneMark(backups, keepParams['keep-yearly'], 'keep-yearly', function(backup) {
|
||||||
|
return Ext.Date.format(backup.backuptime, 'Y');
|
||||||
|
});
|
||||||
|
|
||||||
|
backups.forEach(function(backup) {
|
||||||
|
backup.mark = backup.mark || 'remove';
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
toggleColors: function(checkbox, checked) {
|
||||||
|
this.switchColor(checked);
|
||||||
|
},
|
||||||
|
|
||||||
|
switchColor: function(useColors) {
|
||||||
|
let me = this;
|
||||||
|
let view = me.getView();
|
||||||
|
|
||||||
|
const getStyle = name =>
|
||||||
|
`background-color: ${COLORS[name]}; color: ${TEXT_COLORS[name]};`;
|
||||||
|
|
||||||
|
for (const field of view.query('[isFormField]')) {
|
||||||
|
if (field.fieldGroup !== 'keep') {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (useColors) {
|
||||||
|
field.setFieldStyle(getStyle(field.name));
|
||||||
|
} else {
|
||||||
|
field.setFieldStyle('background-color: white; color: #444;');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
me.lookup('weekTable').useColors = useColors;
|
||||||
|
me.lookup('pruneList').useColors = useColors;
|
||||||
|
|
||||||
|
me.reloadPrune();
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
keepItems: [
|
||||||
|
{
|
||||||
|
xtype: 'prunesimulatorKeepInput',
|
||||||
|
name: 'keep-last',
|
||||||
|
fieldLabel: 'keep-last',
|
||||||
|
value: 4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'prunesimulatorKeepInput',
|
||||||
|
name: 'keep-hourly',
|
||||||
|
fieldLabel: 'keep-hourly',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'prunesimulatorKeepInput',
|
||||||
|
name: 'keep-daily',
|
||||||
|
fieldLabel: 'keep-daily',
|
||||||
|
value: 5,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'prunesimulatorKeepInput',
|
||||||
|
name: 'keep-weekly',
|
||||||
|
fieldLabel: 'keep-weekly',
|
||||||
|
value: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'prunesimulatorKeepInput',
|
||||||
|
name: 'keep-monthly',
|
||||||
|
fieldLabel: 'keep-monthly',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'prunesimulatorKeepInput',
|
||||||
|
name: 'keep-yearly',
|
||||||
|
fieldLabel: 'keep-yearly',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
|
||||||
|
initComponent: function() {
|
||||||
|
var me = this;
|
||||||
|
|
||||||
|
me.pruneStore = Ext.create('Ext.data.Store', {
|
||||||
|
model: 'pbs-prune-list',
|
||||||
|
sorters: { property: 'backuptime', direction: 'DESC' },
|
||||||
|
});
|
||||||
|
|
||||||
|
me.items = [
|
||||||
|
{
|
||||||
|
xtype: 'panel',
|
||||||
|
layout: {
|
||||||
|
type: 'hbox',
|
||||||
|
align: 'stretch',
|
||||||
|
},
|
||||||
|
border: false,
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
title: 'View',
|
||||||
|
layout: 'anchor',
|
||||||
|
flex: 1,
|
||||||
|
border: false,
|
||||||
|
bodyPadding: 10,
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'checkbox',
|
||||||
|
name: 'showCalendar',
|
||||||
|
reference: 'showCalendar',
|
||||||
|
fieldLabel: 'Show Calendar:',
|
||||||
|
checked: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'checkbox',
|
||||||
|
name: 'showColors',
|
||||||
|
reference: 'showColors',
|
||||||
|
fieldLabel: 'Show Colors:',
|
||||||
|
checked: true,
|
||||||
|
handler: 'toggleColors',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{ xtype: "panel", width: 1, border: 1 },
|
||||||
|
{
|
||||||
|
xtype: 'form',
|
||||||
|
layout: 'anchor',
|
||||||
|
flex: 1,
|
||||||
|
border: false,
|
||||||
|
title: 'Simulated Backup Schedule',
|
||||||
|
defaults: {
|
||||||
|
labelWidth: 120,
|
||||||
|
},
|
||||||
|
bodyPadding: 10,
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'prunesimulatorDayOfWeekSelector',
|
||||||
|
name: 'schedule-weekdays',
|
||||||
|
fieldLabel: 'Day of week',
|
||||||
|
value: ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'],
|
||||||
|
allowBlank: false,
|
||||||
|
multiSelect: true,
|
||||||
|
padding: '0 0 0 10',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'prunesimulatorCalendarEvent',
|
||||||
|
name: 'schedule-time',
|
||||||
|
allowBlank: false,
|
||||||
|
value: '0/6:00',
|
||||||
|
fieldLabel: 'Backup schedule',
|
||||||
|
padding: '0 0 0 10',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'numberfield',
|
||||||
|
name: 'numberOfWeeks',
|
||||||
|
allowBlank: false,
|
||||||
|
fieldLabel: 'Number of weeks',
|
||||||
|
minValue: 1,
|
||||||
|
value: 15,
|
||||||
|
maxValue: 260, // five years
|
||||||
|
padding: '0 0 0 10',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'button',
|
||||||
|
name: 'schedule-button',
|
||||||
|
text: 'Update Schedule',
|
||||||
|
formBind: true,
|
||||||
|
handler: 'reloadFull',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'panel',
|
||||||
|
layout: {
|
||||||
|
type: 'hbox',
|
||||||
|
align: 'stretch',
|
||||||
|
},
|
||||||
|
flex: 1,
|
||||||
|
border: false,
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
layout: 'anchor',
|
||||||
|
title: 'Prune Options',
|
||||||
|
border: false,
|
||||||
|
bodyPadding: 10,
|
||||||
|
scrollable: true,
|
||||||
|
items: me.keepItems,
|
||||||
|
flex: 1,
|
||||||
|
},
|
||||||
|
{ xtype: "panel", width: 1, border: 1 },
|
||||||
|
{
|
||||||
|
layout: 'fit',
|
||||||
|
title: 'Backups',
|
||||||
|
border: false,
|
||||||
|
xtype: 'prunesimulatorPruneList',
|
||||||
|
store: me.pruneStore,
|
||||||
|
reference: 'pruneList',
|
||||||
|
flex: 1,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
layout: 'anchor',
|
||||||
|
title: 'Calendar',
|
||||||
|
autoScroll: true,
|
||||||
|
flex: 2,
|
||||||
|
xtype: 'prunesimulatorWeekTable',
|
||||||
|
reference: 'weekTable',
|
||||||
|
store: me.pruneStore,
|
||||||
|
bind: {
|
||||||
|
hidden: '{!showCalendar.checked}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
me.callParent();
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
Ext.create('Ext.container.Viewport', {
|
||||||
|
layout: 'border',
|
||||||
|
renderTo: Ext.getBody(),
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
xtype: 'prunesimulatorPanel',
|
||||||
|
title: 'Proxmox Backup Server - Prune Simulator',
|
||||||
|
region: 'west',
|
||||||
|
layout: {
|
||||||
|
type: 'vbox',
|
||||||
|
align: 'stretch',
|
||||||
|
pack: 'start',
|
||||||
|
},
|
||||||
|
flex: 3,
|
||||||
|
maxWidth: 1090,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xtype: 'prunesimulatorDocumentation',
|
||||||
|
title: 'Usage',
|
||||||
|
border: false,
|
||||||
|
flex: 2,
|
||||||
|
region: 'center',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
@ -1,6 +1,8 @@
|
|||||||
Storage
|
Storage
|
||||||
=======
|
=======
|
||||||
|
|
||||||
|
.. _storage_disk_management:
|
||||||
|
|
||||||
Disk Management
|
Disk Management
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
@ -57,7 +59,7 @@ create a datastore at the location ``/mnt/datastore/store1``:
|
|||||||
You can also create a ``zpool`` with various raid levels from **Administration
|
You can also create a ``zpool`` with various raid levels from **Administration
|
||||||
-> Disks -> Zpool** in the web interface, or by using ``zpool create``. The command
|
-> Disks -> Zpool** in the web interface, or by using ``zpool create``. The command
|
||||||
below creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and
|
below creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and
|
||||||
mounts it on the root directory (default):
|
mounts it under ``/mnt/datastore/zpool1``:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -85,7 +87,7 @@ display S.M.A.R.T. attributes from the web interface or by using the command:
|
|||||||
|
|
||||||
.. _datastore_intro:
|
.. _datastore_intro:
|
||||||
|
|
||||||
:term:`DataStore`
|
:term:`Datastore`
|
||||||
-----------------
|
-----------------
|
||||||
|
|
||||||
A datastore refers to a location at which backups are stored. The current
|
A datastore refers to a location at which backups are stored. The current
|
||||||
@ -107,7 +109,7 @@ is stored in the file ``/etc/proxmox-backup/datastore.cfg``.
|
|||||||
Datastore Configuration
|
Datastore Configuration
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
.. image:: images/screenshots/pbs-gui-datastore.png
|
.. image:: images/screenshots/pbs-gui-datastore-content.png
|
||||||
:align: right
|
:align: right
|
||||||
:alt: Datastore Overview
|
:alt: Datastore Overview
|
||||||
|
|
||||||
@ -121,14 +123,17 @@ number of backups to keep in that store. :ref:`backup-pruning` and
|
|||||||
periodically based on a configured schedule (see :ref:`calendar-events`) per datastore.
|
periodically based on a configured schedule (see :ref:`calendar-events`) per datastore.
|
||||||
|
|
||||||
|
|
||||||
|
.. _storage_datastore_create:
|
||||||
|
|
||||||
Creating a Datastore
|
Creating a Datastore
|
||||||
^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^
|
||||||
.. image:: images/screenshots/pbs-gui-datastore-create-general.png
|
.. image:: images/screenshots/pbs-gui-datastore-create-general.png
|
||||||
:align: right
|
:align: right
|
||||||
:alt: Create a datastore
|
:alt: Create a datastore
|
||||||
|
|
||||||
You can create a new datastore from the web GUI, by navigating to **Datastore** in
|
You can create a new datastore from the web interface, by clicking **Add
|
||||||
the menu tree and clicking **Create**. Here:
|
Datastore** in the side menu, under the **Datastore** section. In the setup
|
||||||
|
window:
|
||||||
|
|
||||||
* *Name* refers to the name of the datastore
|
* *Name* refers to the name of the datastore
|
||||||
* *Backing Path* is the path to the directory upon which you want to create the
|
* *Backing Path* is the path to the directory upon which you want to create the
|
||||||
@ -136,7 +141,9 @@ the menu tree and clicking **Create**. Here:
|
|||||||
* *GC Schedule* refers to the time and intervals at which garbage collection
|
* *GC Schedule* refers to the time and intervals at which garbage collection
|
||||||
runs
|
runs
|
||||||
* *Prune Schedule* refers to the frequency at which pruning takes place
|
* *Prune Schedule* refers to the frequency at which pruning takes place
|
||||||
* *Prune Options* set the amount of backups which you would like to keep (see :ref:`backup-pruning`).
|
* *Prune Options* set the amount of backups which you would like to keep (see
|
||||||
|
:ref:`backup-pruning`).
|
||||||
|
* *Comment* can be used to add some contextual information to the datastore.
|
||||||
|
|
||||||
Alternatively you can create a new datastore from the command line. The
|
Alternatively you can create a new datastore from the command line. The
|
||||||
following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
following command creates a new datastore called ``store1`` on :file:`/backup/disk1/store1`
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
.. _sysadmin_host_administration:
|
||||||
|
|
||||||
Host System Administration
|
Host System Administration
|
||||||
==========================
|
==========================
|
||||||
|
|
||||||
|
732
docs/tape-backup.rst
Normal file
@ -0,0 +1,732 @@
|
|||||||
|
Tape Backup
|
||||||
|
===========
|
||||||
|
|
||||||
|
Proxmox tape backup provides an easy way to store datastore content
|
||||||
|
onto magnetic tapes. This increases data safety because you get:
|
||||||
|
|
||||||
|
- an additional copy of the data
|
||||||
|
- to a different media type (tape)
|
||||||
|
- to an additional location (you can move tapes offsite)
|
||||||
|
|
||||||
|
In most restore jobs, only data from the last backup job is restored.
|
||||||
|
Restore requests further decline the older the data
|
||||||
|
gets. Considering this, tape backup may also help to reduce disk
|
||||||
|
usage, because you can safely remove data from disk once archived on
|
||||||
|
tape. This is especially true if you need to keep data for several
|
||||||
|
years.
|
||||||
|
|
||||||
|
Tape backups do not provide random access to the stored data. Instead,
|
||||||
|
you need to restore the data to disk before you can access it
|
||||||
|
again. Also, if you store your tapes offsite (using some kind of tape
|
||||||
|
vaulting service), you need to bring them onsite before you can do any
|
||||||
|
restore. So please consider that restores from tapes can take much
|
||||||
|
longer than restores from disk.
|
||||||
|
|
||||||
|
|
||||||
|
Tape Technology Primer
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
.. _Linear Tape Open: https://en.wikipedia.org/wiki/Linear_Tape-Open
|
||||||
|
|
||||||
|
As of 2021, the only broadly available tape technology standard is
|
||||||
|
`Linear Tape Open`_, and different vendors offers LTO Ultrium tape
|
||||||
|
drives, autoloaders and LTO tape cartridges.
|
||||||
|
|
||||||
|
There are a few vendors offering proprietary drives with
|
||||||
|
slight advantages in performance and capacity, but they have
|
||||||
|
significant disadvantages:
|
||||||
|
|
||||||
|
- proprietary (single vendor)
|
||||||
|
- a much higher purchase cost
|
||||||
|
|
||||||
|
So we currently do not test such drives.
|
||||||
|
|
||||||
|
In general, LTO tapes offer the following advantages:
|
||||||
|
|
||||||
|
- Durable (30 years)
|
||||||
|
- High Capacity (12 TB)
|
||||||
|
- Relatively low cost per TB
|
||||||
|
- Cold Media
|
||||||
|
- Movable (storable inside vault)
|
||||||
|
- Multiple vendors (for both media and drives)
|
||||||
|
- Build in AES-CGM Encryption engine
|
||||||
|
|
||||||
|
Please note that `Proxmox Backup Server` already stores compressed
|
||||||
|
data, so we do not need/use the tape compression feature.
|
||||||
|
|
||||||
|
|
||||||
|
Supported Hardware
|
||||||
|
------------------
|
||||||
|
|
||||||
|
Proxmox Backup Server supports `Linear Tape Open`_ genertion 4 (LTO4)
|
||||||
|
or later. In general, all SCSI2 tape drives supported by the Linux
|
||||||
|
kernel should work, but feature like hardware encryptions needs LTO4
|
||||||
|
or later.
|
||||||
|
|
||||||
|
Tape changer support is done using the Linux 'mtx' command line
|
||||||
|
tool. So any changer device supported by that tool should work.
|
||||||
|
|
||||||
|
|
||||||
|
Drive Performance
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Current LTO-8 tapes provide read/write speeds up to 360MB/s. This means,
|
||||||
|
that it still takes a minimum of 9 hours to completely write or
|
||||||
|
read a single tape (even at maximum speed).
|
||||||
|
|
||||||
|
The only way to speed up that data rate is to use more than one
|
||||||
|
drive. That way you can run several backup jobs in parallel, or run
|
||||||
|
restore jobs while the other dives are used for backups.
|
||||||
|
|
||||||
|
Also consider that you need to read data first from your datastore
|
||||||
|
(disk). But a single spinning disk is unable to deliver data at this
|
||||||
|
rate. We measured a maximum rate of about 60MB/s to 100MB/s in practice,
|
||||||
|
so it takes 33 hours to read 12TB to fill up an LTO-8 tape. If you want
|
||||||
|
to run your tape at full speed, please make sure that the source
|
||||||
|
datastore is able to deliver that performance (e.g, by using SSDs).
|
||||||
|
|
||||||
|
|
||||||
|
Terminology
|
||||||
|
-----------
|
||||||
|
|
||||||
|
:Tape Labels: are used to uniquely indentify a tape. You normally use
|
||||||
|
some sticky paper labels and apply them on the front of the
|
||||||
|
cartridge. We additionally store the label text magnetically on the
|
||||||
|
tape (first file on tape).
|
||||||
|
|
||||||
|
.. _Code 39: https://en.wikipedia.org/wiki/Code_39
|
||||||
|
|
||||||
|
.. _LTO Ultrium Cartridge Label Specification: https://www.ibm.com/support/pages/ibm-lto-ultrium-cartridge-label-specification
|
||||||
|
|
||||||
|
.. _LTO Barcode Generator: lto-barcode/index.html
|
||||||
|
|
||||||
|
:Barcodes: are a special form of tape labels, which are electronically
|
||||||
|
readable. Most LTO tape robots use an 8 character string encoded as
|
||||||
|
`Code 39`_, as definded in the `LTO Ultrium Cartridge Label
|
||||||
|
Specification`_.
|
||||||
|
|
||||||
|
You can either buy such barcode labels from your cartridge vendor,
|
||||||
|
or print them yourself. You can use our `LTO Barcode Generator`_ App
|
||||||
|
for that.
|
||||||
|
|
||||||
|
.. Note:: Physical labels and the associated adhesive shall have an
|
||||||
|
environmental performance to match or exceed the environmental
|
||||||
|
specifications of the cartridge to which it is applied.
|
||||||
|
|
||||||
|
:Media Pools: A media pool is a logical container for tapes. A backup
|
||||||
|
job targets one media pool, so a job only uses tapes from that
|
||||||
|
pool. The pool additionally defines how long a backup job can
|
||||||
|
append data to tapes (allocation policy) and how long you want to
|
||||||
|
keep the data (retention policy).
|
||||||
|
|
||||||
|
:Media Set: A group of continuously written tapes (all from the same
|
||||||
|
media pool).
|
||||||
|
|
||||||
|
:Tape drive: The decive used to read and write data to the tape. There
|
||||||
|
are standalone drives, but drives often ship within tape libraries.
|
||||||
|
|
||||||
|
:Tape changer: A device which can change the tapes inside a tape drive
|
||||||
|
(tape robot). They are usually part of a tape library.
|
||||||
|
|
||||||
|
.. _Tape Library: https://en.wikipedia.org/wiki/Tape_library
|
||||||
|
|
||||||
|
:`Tape library`_: A storage device that contains one or more tape drives,
|
||||||
|
a number of slots to hold tape cartridges, a barcode reader to
|
||||||
|
identify tape cartridges and an automated method for loading tapes
|
||||||
|
(a robot).
|
||||||
|
|
||||||
|
People als call this 'autoloader', 'tape robot' or 'tape jukebox'.
|
||||||
|
|
||||||
|
:Inventory: The inventory stores the list of known tapes (with
|
||||||
|
additional status information).
|
||||||
|
|
||||||
|
:Catalog: A media catalog stores information about the media content.
|
||||||
|
|
||||||
|
|
||||||
|
Tape Quickstart
|
||||||
|
---------------
|
||||||
|
|
||||||
|
1. Configure your tape hardware (drives and changers)
|
||||||
|
|
||||||
|
2. Configure one or more media pools
|
||||||
|
|
||||||
|
3. Label your tape cartridges.
|
||||||
|
|
||||||
|
4. Start your first tape backup job ...
|
||||||
|
|
||||||
|
|
||||||
|
Configuration
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Please note that you can configure anything using the graphical user
|
||||||
|
interface or the command line interface. Both methods results in the
|
||||||
|
same configuration.
|
||||||
|
|
||||||
|
|
||||||
|
Tape changers
|
||||||
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Tape changers (robots) are part of a `Tape Library`_. You can skip
|
||||||
|
this step if you are using a standalone drive.
|
||||||
|
|
||||||
|
Linux is able to auto detect those devices, and you can get a list
|
||||||
|
of available devices using::
|
||||||
|
|
||||||
|
# proxmox-tape changer scan
|
||||||
|
┌─────────────────────────────┬─────────┬──────────────┬────────┐
|
||||||
|
│ path │ vendor │ model │ serial │
|
||||||
|
╞═════════════════════════════╪═════════╪══════════════╪════════╡
|
||||||
|
│ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
||||||
|
└─────────────────────────────┴─────────┴──────────────┴────────┘
|
||||||
|
|
||||||
|
In order to use that device with Proxmox, you need to create a
|
||||||
|
configuration entry::
|
||||||
|
|
||||||
|
# proxmox-tape changer create sl3 --path /dev/tape/by-id/scsi-CC2C52
|
||||||
|
|
||||||
|
Where ``sl3`` is an arbitrary name you can choose.
|
||||||
|
|
||||||
|
.. Note:: Please use stable device path names from inside
|
||||||
|
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
|
||||||
|
different device after reboot, and that is not what you want.
|
||||||
|
|
||||||
|
You can show the final configuration with::
|
||||||
|
|
||||||
|
# proxmox-tape changer config sl3
|
||||||
|
┌──────┬─────────────────────────────┐
|
||||||
|
│ Name │ Value │
|
||||||
|
╞══════╪═════════════════════════════╡
|
||||||
|
│ name │ sl3 │
|
||||||
|
├──────┼─────────────────────────────┤
|
||||||
|
│ path │ /dev/tape/by-id/scsi-CC2C52 │
|
||||||
|
└──────┴─────────────────────────────┘
|
||||||
|
|
||||||
|
Or simply list all configured changer devices::
|
||||||
|
|
||||||
|
# proxmox-tape changer list
|
||||||
|
┌──────┬─────────────────────────────┬─────────┬──────────────┬────────────┐
|
||||||
|
│ name │ path │ vendor │ model │ serial │
|
||||||
|
╞══════╪═════════════════════════════╪═════════╪══════════════╪════════════╡
|
||||||
|
│ sl3 │ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
||||||
|
└──────┴─────────────────────────────┴─────────┴──────────────┴────────────┘
|
||||||
|
|
||||||
|
The Vendor, Model and Serial number are auto detected, but only shown
|
||||||
|
if the device is online.
|
||||||
|
|
||||||
|
To test your setup, please query the status of the changer device with::
|
||||||
|
|
||||||
|
# proxmox-tape changer status sl3
|
||||||
|
┌───────────────┬──────────┬────────────┬─────────────┐
|
||||||
|
│ entry-kind │ entry-id │ changer-id │ loaded-slot │
|
||||||
|
╞═══════════════╪══════════╪════════════╪═════════════╡
|
||||||
|
│ drive │ 0 │ vtape1 │ 1 │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 1 │ │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 2 │ vtape2 │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ ... │ ... │ │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 16 │ │ │
|
||||||
|
└───────────────┴──────────┴────────────┴─────────────┘
|
||||||
|
|
||||||
|
Tape libraries usually provide some special import/export slots (also
|
||||||
|
called "mail slots"). Tapes inside those slots are acessible from
|
||||||
|
outside, making it easy to add/remove tapes to/from the library. Those
|
||||||
|
tapes are considered to be "offline", so backup jobs will not use
|
||||||
|
them. Those special slots are auto-detected and marked as
|
||||||
|
``import-export`` slot in the status command.
|
||||||
|
|
||||||
|
It's worth noting that some of the smaller tape libraries don't have
|
||||||
|
such slots. While they have something called "Mail Slot", that slot
|
||||||
|
is just a way to grab the tape from the gripper. But they are unable
|
||||||
|
to hold media while the robot does other things. They also do not
|
||||||
|
expose that "Mail Slot" over the SCSI interface, so you wont see them in
|
||||||
|
the status output.
|
||||||
|
|
||||||
|
As a workaround, you can mark some of the normal slots as export
|
||||||
|
slot. The software treats those slots like real ``import-export``
|
||||||
|
slots, and the media inside those slots is considered to be 'offline'
|
||||||
|
(not available for backup)::
|
||||||
|
|
||||||
|
# proxmox-tape changer update sl3 --export-slots 15,16
|
||||||
|
|
||||||
|
After that, you can see those artificial ``import-export`` slots in
|
||||||
|
the status output::
|
||||||
|
|
||||||
|
# proxmox-tape changer status sl3
|
||||||
|
┌───────────────┬──────────┬────────────┬─────────────┐
|
||||||
|
│ entry-kind │ entry-id │ changer-id │ loaded-slot │
|
||||||
|
╞═══════════════╪══════════╪════════════╪═════════════╡
|
||||||
|
│ drive │ 0 │ vtape1 │ 1 │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ import-export │ 15 │ │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ import-export │ 16 │ │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 1 │ │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 2 │ vtape2 │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ ... │ ... │ │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 14 │ │ │
|
||||||
|
└───────────────┴──────────┴────────────┴─────────────┘
|
||||||
|
|
||||||
|
|
||||||
|
Tape drives
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
Linux is able to auto detect tape drives, and you can get a list
|
||||||
|
of available tape drives using::
|
||||||
|
|
||||||
|
# proxmox-tape drive scan
|
||||||
|
┌────────────────────────────────┬────────┬─────────────┬────────┐
|
||||||
|
│ path │ vendor │ model │ serial │
|
||||||
|
╞════════════════════════════════╪════════╪═════════════╪════════╡
|
||||||
|
│ /dev/tape/by-id/scsi-12345-nst │ IBM │ ULT3580-TD4 │ 12345 │
|
||||||
|
└────────────────────────────────┴────────┴─────────────┴────────┘
|
||||||
|
|
||||||
|
In order to use that drive with Proxmox, you need to create a
|
||||||
|
configuration entry::
|
||||||
|
|
||||||
|
# proxmox-tape drive create mydrive --path /dev/tape/by-id/scsi-12345-nst
|
||||||
|
|
||||||
|
.. Note:: Please use stable device path names from inside
|
||||||
|
``/dev/tape/by-id/``. Names like ``/dev/nst0`` may point to a
|
||||||
|
different device after reboot, and that is not what you want.
|
||||||
|
|
||||||
|
If you have a tape library, you also need to set the associated
|
||||||
|
changer device::
|
||||||
|
|
||||||
|
# proxmox-tape drive update mydrive --changer sl3 --changer-drivenum 0
|
||||||
|
|
||||||
|
The ``--changer-drivenum`` is only necessary if the tape library
|
||||||
|
includes more than one drive (The changer status command lists all
|
||||||
|
drivenums).
|
||||||
|
|
||||||
|
You can show the final configuration with::
|
||||||
|
|
||||||
|
# proxmox-tape drive config mydrive
|
||||||
|
┌─────────┬────────────────────────────────┐
|
||||||
|
│ Name │ Value │
|
||||||
|
╞═════════╪════════════════════════════════╡
|
||||||
|
│ name │ mydrive │
|
||||||
|
├─────────┼────────────────────────────────┤
|
||||||
|
│ path │ /dev/tape/by-id/scsi-12345-nst │
|
||||||
|
├─────────┼────────────────────────────────┤
|
||||||
|
│ changer │ sl3 │
|
||||||
|
└─────────┴────────────────────────────────┘
|
||||||
|
|
||||||
|
.. NOTE:: The ``changer-drivenum`` value 0 is not stored in the
|
||||||
|
configuration, because that is the default.
|
||||||
|
|
||||||
|
To list all configured drives use::
|
||||||
|
|
||||||
|
# proxmox-tape drive list
|
||||||
|
┌──────────┬────────────────────────────────┬─────────┬────────┬─────────────┬────────┐
|
||||||
|
│ name │ path │ changer │ vendor │ model │ serial │
|
||||||
|
╞══════════╪════════════════════════════════╪═════════╪════════╪═════════════╪════════╡
|
||||||
|
│ mydrive │ /dev/tape/by-id/scsi-12345-nst │ sl3 │ IBM │ ULT3580-TD4 │ 12345 │
|
||||||
|
└──────────┴────────────────────────────────┴─────────┴────────┴─────────────┴────────┘
|
||||||
|
|
||||||
|
The Vendor, Model and Serial number are auto detected, but only shown
|
||||||
|
if the device is online.
|
||||||
|
|
||||||
|
For testing, you can simply query the drive status with::
|
||||||
|
|
||||||
|
# proxmox-tape status --drive mydrive
|
||||||
|
┌───────────┬────────────────────────┐
|
||||||
|
│ Name │ Value │
|
||||||
|
╞═══════════╪════════════════════════╡
|
||||||
|
│ blocksize │ 0 │
|
||||||
|
├───────────┼────────────────────────┤
|
||||||
|
│ status │ DRIVE_OPEN | IM_REP_EN │
|
||||||
|
└───────────┴────────────────────────┘
|
||||||
|
|
||||||
|
.. NOTE:: Blocksize should always be 0 (variable block size
|
||||||
|
mode). This is the default anyways.
|
||||||
|
|
||||||
|
|
||||||
|
Media Pools
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
A media pool is a logical container for tapes. A backup job targets
|
||||||
|
one media pool, so a job only uses tapes from that pool.
|
||||||
|
|
||||||
|
.. topic:: Media Set
|
||||||
|
|
||||||
|
A media set is a group of continuously written tapes, used to split
|
||||||
|
the larger pool into smaller, restorable units. One or more backup
|
||||||
|
jobs write to a media set, producing an ordered group of
|
||||||
|
tapes. Media sets are identified by an unique ID. That ID and the
|
||||||
|
sequence number is stored on each tape of that set (tape label).
|
||||||
|
|
||||||
|
Media sets are the basic unit for restore tasks, i.e. you need all
|
||||||
|
tapes in the set to restore the media set content. Data is fully
|
||||||
|
deduplicated inside a media set.
|
||||||
|
|
||||||
|
|
||||||
|
.. topic:: Media Set Allocation Policy
|
||||||
|
|
||||||
|
The pool additionally defines how long backup jobs can append data
|
||||||
|
to a media set. The following settings are possible:
|
||||||
|
|
||||||
|
- Try to use the current media set.
|
||||||
|
|
||||||
|
This setting produce one large media set. While this is very
|
||||||
|
space efficient (deduplication, no unused space), it can lead to
|
||||||
|
long restore times, because restore jobs needs to read all tapes in the
|
||||||
|
set.
|
||||||
|
|
||||||
|
.. NOTE:: Data is fully deduplicated inside a media set. That
|
||||||
|
also means that data is randomly distributed over the tapes in
|
||||||
|
the set. So even if you restore a single VM, this may have to
|
||||||
|
read data from all tapes inside the media set.
|
||||||
|
|
||||||
|
Larger media sets are also more error prone, because a single
|
||||||
|
damaged media makes the restore fail.
|
||||||
|
|
||||||
|
Usage scenario: Mostly used with tape libraries, and you manually
|
||||||
|
trigger new set creation by running a backup job with the
|
||||||
|
``--export`` option.
|
||||||
|
|
||||||
|
.. NOTE:: Retention period starts with the existence of a newer
|
||||||
|
media set.
|
||||||
|
|
||||||
|
- Always create a new media set.
|
||||||
|
|
||||||
|
With this setting each backup job creates a new media set. This
|
||||||
|
is less space efficient, because the last media from the last set
|
||||||
|
may not be fully written, leaving the remaining space unused.
|
||||||
|
|
||||||
|
The advantage is that this procudes media sets of minimal
|
||||||
|
size. Small set are easier to handle, you can move sets to an
|
||||||
|
off-site vault, and restore is much faster.
|
||||||
|
|
||||||
|
.. NOTE:: Retention period starts with the creation time of the
|
||||||
|
media set.
|
||||||
|
|
||||||
|
- Create a new set when the specified Calendar Event triggers.
|
||||||
|
|
||||||
|
.. _systemd.time manpage: https://manpages.debian.org/buster/systemd/systemd.time.7.en.html
|
||||||
|
|
||||||
|
This allows you to specify points in time by using systemd like
|
||||||
|
Calendar Event specifications (see `systemd.time manpage`_).
|
||||||
|
|
||||||
|
For example, the value ``weekly`` (or ``Mon *-*-* 00:00:00``)
|
||||||
|
will create a new set each week.
|
||||||
|
|
||||||
|
This balances between space efficency and media count.
|
||||||
|
|
||||||
|
.. NOTE:: Retention period starts when the calendar event
|
||||||
|
triggers.
|
||||||
|
|
||||||
|
Additionally, the following events may allocate a new media set:
|
||||||
|
|
||||||
|
- Required tape is offline (and you use a tape library).
|
||||||
|
|
||||||
|
- Current set contains damaged of retired tapes.
|
||||||
|
|
||||||
|
- Media pool encryption changed
|
||||||
|
|
||||||
|
- Database consistency errors, e.g. if the inventory does not
|
||||||
|
contain required media info, or contain conflicting infos
|
||||||
|
(outdated data).
|
||||||
|
|
||||||
|
.. topic:: Retention Policy
|
||||||
|
|
||||||
|
Defines how long we want to keep the data.
|
||||||
|
|
||||||
|
- Always overwrite media.
|
||||||
|
|
||||||
|
- Protect data for the duration specified.
|
||||||
|
|
||||||
|
We use systemd like time spans to specify durations, e.g. ``2
|
||||||
|
weeks`` (see `systemd.time manpage`_).
|
||||||
|
|
||||||
|
- Never overwrite data.
|
||||||
|
|
||||||
|
.. topic:: Hardware Encryption
|
||||||
|
|
||||||
|
LTO4 (or later) tape drives support hardware encryption. If you
|
||||||
|
configure the media pool to use encryption, all data written to the
|
||||||
|
tapes is encrypted using the configured key.
|
||||||
|
|
||||||
|
That way, unauthorized users cannot read data from the media,
|
||||||
|
e.g. if you loose a media while shipping to an offsite location.
|
||||||
|
|
||||||
|
.. Note:: If the backup client also encrypts data, data on tape
|
||||||
|
will be double encrypted.
|
||||||
|
|
||||||
|
The password protected key is stored on each media, so it is
|
||||||
|
possbible to `restore the key <restore_encryption_key_>`_ using the password. Please make sure
|
||||||
|
you remember the password in case you need to restore the key.
|
||||||
|
|
||||||
|
|
||||||
|
.. NOTE:: FIXME: Add note about global content namespace. (We do not store
|
||||||
|
the source datastore, so it is impossible to distinguish
|
||||||
|
store1:/vm/100 from store2:/vm/100. Please use different media
|
||||||
|
pools if the source is from a different name space)
|
||||||
|
|
||||||
|
|
||||||
|
The following command creates a new media pool::
|
||||||
|
|
||||||
|
// proxmox-tape pool create <name> --drive <string> [OPTIONS]
|
||||||
|
|
||||||
|
# proxmox-tape pool create daily --drive mydrive
|
||||||
|
|
||||||
|
|
||||||
|
Additional option can be set later using the update command::
|
||||||
|
|
||||||
|
# proxmox-tape pool update daily --allocation daily --retention 7days
|
||||||
|
|
||||||
|
|
||||||
|
To list all configured pools use::
|
||||||
|
|
||||||
|
# proxmox-tape pool list
|
||||||
|
┌───────┬──────────┬────────────┬───────────┬──────────┐
|
||||||
|
│ name │ drive │ allocation │ retention │ template │
|
||||||
|
╞═══════╪══════════╪════════════╪═══════════╪══════════╡
|
||||||
|
│ daily │ mydrive │ daily │ 7days │ │
|
||||||
|
└───────┴──────────┴────────────┴───────────┴──────────┘
|
||||||
|
|
||||||
|
|
||||||
|
Tape Jobs
|
||||||
|
~~~~~~~~~
|
||||||
|
|
||||||
|
|
||||||
|
Administration
|
||||||
|
--------------
|
||||||
|
|
||||||
|
Many sub-command of the ``proxmox-tape`` command line tools take a
|
||||||
|
parameter called ``--drive``, which specifies the tape drive you want
|
||||||
|
to work on. For convenience, you can set that in an environment
|
||||||
|
variable::
|
||||||
|
|
||||||
|
# export PROXMOX_TAPE_DRIVE=mydrive
|
||||||
|
|
||||||
|
You can then omit the ``--drive`` parameter from the command. If the
|
||||||
|
drive has an associated changer device, you may also omit the changer
|
||||||
|
parameter from commands that needs a changer device, for example::
|
||||||
|
|
||||||
|
# proxmox-tape changer status
|
||||||
|
|
||||||
|
Should displays the changer status of the changer device associated with
|
||||||
|
drive ``mydrive``.
|
||||||
|
|
||||||
|
|
||||||
|
Label Tapes
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
By default, tape cartidges all looks the same, so you need to put a
|
||||||
|
label on them for unique identification. So first, put a sticky paper
|
||||||
|
label with some human readable text on the cartridge.
|
||||||
|
|
||||||
|
If you use a `Tape Library`_, you should use an 8 character string
|
||||||
|
encoded as `Code 39`_, as definded in the `LTO Ultrium Cartridge Label
|
||||||
|
Specification`_. You can either bye such barcode labels from your
|
||||||
|
cartidge vendor, or print them yourself. You can use our `LTO Barcode
|
||||||
|
Generator`_ App for that.
|
||||||
|
|
||||||
|
Next, you need to write that same label text to the tape, so that the
|
||||||
|
software can uniquely identify the tape too.
|
||||||
|
|
||||||
|
For a standalone drive, manually insert the new tape cartidge into the
|
||||||
|
drive and run::
|
||||||
|
|
||||||
|
# proxmox-tape label --changer-id <label-text> [--pool <pool-name>]
|
||||||
|
|
||||||
|
You may omit the ``--pool`` argument to allow the tape to be used by any pool.
|
||||||
|
|
||||||
|
.. Note:: For safety reasons, this command fails if the tape contain
|
||||||
|
any data. If you want to overwrite it anways, erase the tape first.
|
||||||
|
|
||||||
|
You can verify success by reading back the label::
|
||||||
|
|
||||||
|
# proxmox-tape read-label
|
||||||
|
┌─────────────────┬──────────────────────────────────────┐
|
||||||
|
│ Name │ Value │
|
||||||
|
╞═════════════════╪══════════════════════════════════════╡
|
||||||
|
│ changer-id │ vtape1 │
|
||||||
|
├─────────────────┼──────────────────────────────────────┤
|
||||||
|
│ uuid │ 7f42c4dd-9626-4d89-9f2b-c7bc6da7d533 │
|
||||||
|
├─────────────────┼──────────────────────────────────────┤
|
||||||
|
│ ctime │ Wed Jan 6 09:07:51 2021 │
|
||||||
|
├─────────────────┼──────────────────────────────────────┤
|
||||||
|
│ pool │ daily │
|
||||||
|
├─────────────────┼──────────────────────────────────────┤
|
||||||
|
│ media-set-uuid │ 00000000-0000-0000-0000-000000000000 │
|
||||||
|
├─────────────────┼──────────────────────────────────────┤
|
||||||
|
│ media-set-ctime │ Wed Jan 6 09:07:51 2021 │
|
||||||
|
└─────────────────┴──────────────────────────────────────┘
|
||||||
|
|
||||||
|
.. NOTE:: The ``media-set-uuid`` using all zeros indicates an empty
|
||||||
|
tape (not used by any media set).
|
||||||
|
|
||||||
|
If you have a tape library, apply the sticky barcode label to the tape
|
||||||
|
cartridges first. Then load those empty tapes into the library. You
|
||||||
|
can then label all unlabeled tapes with a single command::
|
||||||
|
|
||||||
|
# proxmox-tape barcode-label [--pool <pool-name>]
|
||||||
|
|
||||||
|
|
||||||
|
Run Tape Backups
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
To manually run a backup job use::
|
||||||
|
|
||||||
|
# proxmox-tape backup <store> <pool> [OPTIONS]
|
||||||
|
|
||||||
|
The following options are available:
|
||||||
|
|
||||||
|
--eject-media Eject media upon job completion.
|
||||||
|
|
||||||
|
It is normally good practice to eject the tape after use. This unmounts the
|
||||||
|
tape from the drive and prevents the tape from getting dirty with dust.
|
||||||
|
|
||||||
|
--export-media-set Export media set upon job completion.
|
||||||
|
|
||||||
|
After a sucessful backup job, this moves all tapes from the used
|
||||||
|
media set into import-export slots. The operator can then pick up
|
||||||
|
those tapes and move them to a media vault.
|
||||||
|
|
||||||
|
|
||||||
|
Restore from Tape
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Restore is done at media-set granularity, so you first need to find
|
||||||
|
out which media set contains the data you want to restore. This
|
||||||
|
information is stored in the media catalog. If you do not have media
|
||||||
|
catalogs, you need to restore them first. Please note that you need
|
||||||
|
the catalog to find your data, but restoring a complete media-set does
|
||||||
|
not need media catalogs.
|
||||||
|
|
||||||
|
The following command shows the media content (from catalog)::
|
||||||
|
|
||||||
|
# proxmox-tape media content
|
||||||
|
┌────────────┬──────┬──────────────────────────┬────────┬────────────────────────────────┬──────────────────────────────────────┐
|
||||||
|
│ label-text │ pool │ media-set-name │ seq-nr │ snapshot │ media-set-uuid │
|
||||||
|
╞════════════╪══════╪══════════════════════════╪════════╪════════════════════════════════╪══════════════════════════════════════╡
|
||||||
|
│ TEST01L8 │ p2 │ Wed Jan 13 13:55:55 2021 │ 0 │ vm/201/2021-01-11T10:43:48Z │ 9da37a55-aac7-4deb-91c6-482b3b675f30 │
|
||||||
|
├────────────┼──────┼──────────────────────────┼────────┼────────────────────────────────┼──────────────────────────────────────┤
|
||||||
|
│ ... │ ... │ ... │ ... │ ... │ ... │
|
||||||
|
└────────────┴──────┴──────────────────────────┴────────┴────────────────────────────────┴──────────────────────────────────────┘
|
||||||
|
|
||||||
|
|
||||||
|
A restore job reads the data from the media set and moves it back to
|
||||||
|
data disk (datastore)::
|
||||||
|
|
||||||
|
// proxmox-tape restore <media-set-uuid> <datastore>
|
||||||
|
|
||||||
|
# proxmox-tape restore 9da37a55-aac7-4deb-91c6-482b3b675f30 mystore
|
||||||
|
|
||||||
|
|
||||||
|
Update Inventory
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
|
||||||
|
Restore Catalog
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
|
||||||
|
Encryption Key Management
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Creating a new encryption key::
|
||||||
|
|
||||||
|
# proxmox-tape key create --hint "tape pw 2020"
|
||||||
|
Tape Encryption Key Password: **********
|
||||||
|
Verify Password: **********
|
||||||
|
"14:f8:79:b9:f5:13:e5:dc:bf:b6:f9:88:48:51:81:dc:79:bf:a0:22:68:47:d1:73:35:2d:b6:20:e1:7f:f5:0f"
|
||||||
|
|
||||||
|
List existing encryption keys::
|
||||||
|
|
||||||
|
# proxmox-tape key list
|
||||||
|
┌───────────────────────────────────────────────────┬───────────────┐
|
||||||
|
│ fingerprint │ hint │
|
||||||
|
╞═══════════════════════════════════════════════════╪═══════════════╡
|
||||||
|
│ 14:f8:79:b9:f5:13:e5:dc: ... :b6:20:e1:7f:f5:0f │ tape pw 2020 │
|
||||||
|
└───────────────────────────────────────────────────┴───────────────┘
|
||||||
|
|
||||||
|
To show encryption key details::
|
||||||
|
|
||||||
|
# proxmox-tape key show 14:f8:79:b9:f5:13:e5:dc:...:b6:20:e1:7f:f5:0f
|
||||||
|
┌─────────────┬───────────────────────────────────────────────┐
|
||||||
|
│ Name │ Value │
|
||||||
|
╞═════════════╪═══════════════════════════════════════════════╡
|
||||||
|
│ kdf │ scrypt │
|
||||||
|
├─────────────┼───────────────────────────────────────────────┤
|
||||||
|
│ created │ Sat Jan 23 14:47:21 2021 │
|
||||||
|
├─────────────┼───────────────────────────────────────────────┤
|
||||||
|
│ modified │ Sat Jan 23 14:47:21 2021 │
|
||||||
|
├─────────────┼───────────────────────────────────────────────┤
|
||||||
|
│ fingerprint │ 14:f8:79:b9:f5:13:e5:dc:...:b6:20:e1:7f:f5:0f │
|
||||||
|
├─────────────┼───────────────────────────────────────────────┤
|
||||||
|
│ hint │ tape pw 2020 │
|
||||||
|
└─────────────┴───────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
The ``paperkey`` subcommand can be used to create a QR encoded
|
||||||
|
version of a tape encryption key. The following command sends the output of the
|
||||||
|
``paperkey`` command to a text file, for easy printing::
|
||||||
|
|
||||||
|
proxmox-tape key paperkey <fingerprint> --output-format text > qrkey.txt
|
||||||
|
|
||||||
|
|
||||||
|
.. _restore_encryption_key:
|
||||||
|
|
||||||
|
Restoring Encryption Keys
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
You can restore the encryption key from the tape, using the password
|
||||||
|
used to generate the key. First, load the tape you want to restore
|
||||||
|
into the drive. Then run::
|
||||||
|
|
||||||
|
# proxmox-tape key restore
|
||||||
|
Tepe Encryption Key Password: ***********
|
||||||
|
|
||||||
|
If the password is correct, the key will get imported to the
|
||||||
|
database. Further restore jobs automatically use any availbale key.
|
||||||
|
|
||||||
|
|
||||||
|
Tape Cleaning
|
||||||
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
LTO tape drives requires regular cleaning. This is done by loading a
|
||||||
|
cleaning cartridge into the drive, which is a manual task for
|
||||||
|
standalone drives.
|
||||||
|
|
||||||
|
For tape libraries, cleaning cartridges are identified using special
|
||||||
|
labels starting with letters "CLN". For example, our tape library has a
|
||||||
|
cleaning cartridge inside slot 3::
|
||||||
|
|
||||||
|
# proxmox-tape changer status sl3
|
||||||
|
┌───────────────┬──────────┬────────────┬─────────────┐
|
||||||
|
│ entry-kind │ entry-id │ changer-id │ loaded-slot │
|
||||||
|
╞═══════════════╪══════════╪════════════╪═════════════╡
|
||||||
|
│ drive │ 0 │ vtape1 │ 1 │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 1 │ │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 2 │ vtape2 │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ slot │ 3 │ CLN001CU │ │
|
||||||
|
├───────────────┼──────────┼────────────┼─────────────┤
|
||||||
|
│ ... │ ... │ │ │
|
||||||
|
└───────────────┴──────────┴────────────┴─────────────┘
|
||||||
|
|
||||||
|
To initiate a cleaning operation simply run::
|
||||||
|
|
||||||
|
# proxmox-tape clean
|
||||||
|
|
||||||
|
This command does the following:
|
||||||
|
|
||||||
|
- find the cleaning tape (in slot 3)
|
||||||
|
|
||||||
|
- unload the current media from the drive (back to slot1)
|
||||||
|
|
||||||
|
- load the cleaning tape into the drive
|
||||||
|
|
||||||
|
- run drive cleaning operation
|
||||||
|
|
||||||
|
- unload the cleaning tape (to slot 3)
|
@ -41,11 +41,12 @@ users:
|
|||||||
:alt: Add a new user
|
:alt: Add a new user
|
||||||
|
|
||||||
The superuser has full administration rights on everything, so you
|
The superuser has full administration rights on everything, so you
|
||||||
normally want to add other users with less privileges. You can create a new
|
normally want to add other users with less privileges. You can add a new
|
||||||
user with the ``user create`` subcommand or through the web interface, under
|
user with the ``user create`` subcommand or through the web
|
||||||
**Configuration -> User Management**. The ``create`` subcommand lets you specify
|
interface, under the **User Management** tab of **Configuration -> Access
|
||||||
many options like ``--email`` or ``--password``. You can update or change any
|
Control**. The ``create`` subcommand lets you specify many options like
|
||||||
user properties using the ``update`` subcommand later (**Edit** in the GUI):
|
``--email`` or ``--password``. You can update or change any user properties
|
||||||
|
using the ``update`` subcommand later (**Edit** in the GUI):
|
||||||
|
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -70,7 +71,7 @@ The resulting user list looks like this:
|
|||||||
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
│ root@pam │ 1 │ │ │ │ │ Superuser │
|
||||||
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
|
└──────────┴────────┴────────┴───────────┴──────────┴──────────────────┴──────────────────┘
|
||||||
|
|
||||||
Newly created users do not have any permissions. Please read the next
|
Newly created users do not have any permissions. Please read the Access Control
|
||||||
section to learn how to set access permissions.
|
section to learn how to set access permissions.
|
||||||
|
|
||||||
If you want to disable a user account, you can do that by setting ``--enable`` to ``0``
|
If you want to disable a user account, you can do that by setting ``--enable`` to ``0``
|
||||||
@ -85,15 +86,77 @@ Or completely remove the user with:
|
|||||||
|
|
||||||
# proxmox-backup-manager user remove john@pbs
|
# proxmox-backup-manager user remove john@pbs
|
||||||
|
|
||||||
|
.. _user_tokens:
|
||||||
|
|
||||||
|
API Tokens
|
||||||
|
----------
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-apitoken-overview.png
|
||||||
|
:align: right
|
||||||
|
:alt: API Token Overview
|
||||||
|
|
||||||
|
Any authenticated user can generate API tokens which can in turn be used to
|
||||||
|
configure various clients, instead of directly providing the username and
|
||||||
|
password.
|
||||||
|
|
||||||
|
API tokens serve two purposes:
|
||||||
|
|
||||||
|
#. Easy revocation in case client gets compromised
|
||||||
|
#. Limit permissions for each client/token within the users' permission
|
||||||
|
|
||||||
|
An API token consists of two parts: an identifier consisting of the user name,
|
||||||
|
the realm and a tokenname (``user@realm!tokenname``), and a secret value. Both
|
||||||
|
need to be provided to the client in place of the user ID (``user@realm``) and
|
||||||
|
the user password, respectively.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-apitoken-secret-value.png
|
||||||
|
:align: right
|
||||||
|
:alt: API secret value
|
||||||
|
|
||||||
|
The API token is passed from the client to the server by setting the
|
||||||
|
``Authorization`` HTTP header with method ``PBSAPIToken`` to the value
|
||||||
|
``TOKENID:TOKENSECRET``.
|
||||||
|
|
||||||
|
Generating new tokens can done using ``proxmox-backup-manager`` or the GUI:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager user generate-token john@pbs client1
|
||||||
|
Result: {
|
||||||
|
"tokenid": "john@pbs!client1",
|
||||||
|
"value": "d63e505a-e3ec-449a-9bc7-1da610d4ccde"
|
||||||
|
}
|
||||||
|
|
||||||
|
.. note:: The displayed secret value needs to be saved, since it cannot be
|
||||||
|
displayed again after generating the API token.
|
||||||
|
|
||||||
|
The ``user list-tokens`` sub-command can be used to display tokens and their
|
||||||
|
metadata:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager user list-tokens john@pbs
|
||||||
|
┌──────────────────┬────────┬────────┬─────────┐
|
||||||
|
│ tokenid │ enable │ expire │ comment │
|
||||||
|
╞══════════════════╪════════╪════════╪═════════╡
|
||||||
|
│ john@pbs!client1 │ 1 │ │ │
|
||||||
|
└──────────────────┴────────┴────────┴─────────┘
|
||||||
|
|
||||||
|
Similarly, the ``user delete-token`` subcommand can be used to delete a token
|
||||||
|
again.
|
||||||
|
|
||||||
|
Newly generated API tokens don't have any permissions. Please read the next
|
||||||
|
section to learn how to set access permissions.
|
||||||
|
|
||||||
|
|
||||||
.. _user_acl:
|
.. _user_acl:
|
||||||
|
|
||||||
Access Control
|
Access Control
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
By default new users do not have any permission. Instead you need to
|
By default new users and API tokens do not have any permission. Instead you
|
||||||
specify what is allowed and what is not. You can do this by assigning
|
need to specify what is allowed and what is not. You can do this by assigning
|
||||||
roles to users on specific objects like datastores or remotes. The
|
roles to users/tokens on specific objects like datastores or remotes. The
|
||||||
following roles exist:
|
following roles exist:
|
||||||
|
|
||||||
**NoAccess**
|
**NoAccess**
|
||||||
@ -130,7 +193,7 @@ following roles exist:
|
|||||||
**RemoteSyncOperator**
|
**RemoteSyncOperator**
|
||||||
Is allowed to read data from a remote.
|
Is allowed to read data from a remote.
|
||||||
|
|
||||||
.. image:: images/screenshots/pbs-gui-permissions-add.png
|
.. image:: images/screenshots/pbs-gui-user-management-add-user.png
|
||||||
:align: right
|
:align: right
|
||||||
:alt: Add permissions for user
|
:alt: Add permissions for user
|
||||||
|
|
||||||
@ -148,31 +211,32 @@ The data represented in each field is as follows:
|
|||||||
#. The object on which the permission is set. This can be a specific object
|
#. The object on which the permission is set. This can be a specific object
|
||||||
(single datastore, remote, etc.) or a top level object, which with
|
(single datastore, remote, etc.) or a top level object, which with
|
||||||
propagation enabled, represents all children of the object also.
|
propagation enabled, represents all children of the object also.
|
||||||
#. The user for which the permission is set
|
#. The user(s)/token(s) for which the permission is set
|
||||||
#. The role being set
|
#. The role being set
|
||||||
|
|
||||||
You can manage datastore permissions from **Configuration -> Permissions** in the
|
You can manage permissions via **Configuration -> Access Control ->
|
||||||
web interface. Likewise, you can use the ``acl`` subcommand to manage and
|
Permissions** in the web interface. Likewise, you can use the ``acl``
|
||||||
monitor user permissions from the command line. For example, the command below
|
subcommand to manage and monitor user permissions from the command line. For
|
||||||
will add the user ``john@pbs`` as a **DatastoreAdmin** for the datastore
|
example, the command below will add the user ``john@pbs`` as a
|
||||||
``store1``, located at ``/backup/disk1/store1``:
|
**DatastoreAdmin** for the datastore ``store1``, located at
|
||||||
|
``/backup/disk1/store1``:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-manager acl update /datastore/store1 DatastoreAdmin --userid john@pbs
|
# proxmox-backup-manager acl update /datastore/store1 DatastoreAdmin --auth-id john@pbs
|
||||||
|
|
||||||
You can monitor the roles of each user using the following command:
|
You can list the ACLs of each user/token using the following command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-manager acl list
|
# proxmox-backup-manager acl list
|
||||||
┌──────────┬──────────────────┬───────────┬────────────────┐
|
┌──────────┬───────────────────┬───────────┬────────────────┐
|
||||||
│ ugid │ path │ propagate │ roleid │
|
│ ugid │ path │ propagate │ roleid │
|
||||||
╞══════════╪══════════════════╪═══════════╪════════════════╡
|
╞══════════╪═══════════════════╪═══════════╪════════════════╡
|
||||||
│ john@pbs │ /datastore/disk1 │ 1 │ DatastoreAdmin │
|
│ john@pbs │ /datastore/store1 │ 1 │ DatastoreAdmin │
|
||||||
└──────────┴──────────────────┴───────────┴────────────────┘
|
└──────────┴───────────────────┴───────────┴────────────────┘
|
||||||
|
|
||||||
A single user can be assigned multiple permission sets for different datastores.
|
A single user/token can be assigned multiple permission sets for different datastores.
|
||||||
|
|
||||||
.. Note::
|
.. Note::
|
||||||
Naming convention is important here. For datastores on the host,
|
Naming convention is important here. For datastores on the host,
|
||||||
@ -183,4 +247,141 @@ A single user can be assigned multiple permission sets for different datastores.
|
|||||||
remote (see `Remote` below) and ``{storename}`` is the name of the datastore on
|
remote (see `Remote` below) and ``{storename}`` is the name of the datastore on
|
||||||
the remote.
|
the remote.
|
||||||
|
|
||||||
|
API Token permissions
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
API token permissions are calculated based on ACLs containing their ID
|
||||||
|
independent of those of their corresponding user. The resulting permission set
|
||||||
|
on a given path is then intersected with that of the corresponding user.
|
||||||
|
|
||||||
|
In practice this means:
|
||||||
|
|
||||||
|
#. API tokens require their own ACL entries
|
||||||
|
#. API tokens can never do more than their corresponding user
|
||||||
|
|
||||||
|
Effective permissions
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
To calculate and display the effective permission set of a user or API token
|
||||||
|
you can use the ``proxmox-backup-manager user permission`` command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager user permissions john@pbs --path /datastore/store1
|
||||||
|
Privileges with (*) have the propagate flag set
|
||||||
|
|
||||||
|
Path: /datastore/store1
|
||||||
|
- Datastore.Audit (*)
|
||||||
|
- Datastore.Backup (*)
|
||||||
|
- Datastore.Modify (*)
|
||||||
|
- Datastore.Prune (*)
|
||||||
|
- Datastore.Read (*)
|
||||||
|
- Datastore.Verify (*)
|
||||||
|
|
||||||
|
# proxmox-backup-manager acl update /datastore/store1 DatastoreBackup --auth-id 'john@pbs!client1'
|
||||||
|
# proxmox-backup-manager user permissions 'john@pbs!client1' --path /datastore/store1
|
||||||
|
Privileges with (*) have the propagate flag set
|
||||||
|
|
||||||
|
Path: /datastore/store1
|
||||||
|
- Datastore.Backup (*)
|
||||||
|
|
||||||
|
.. _user_tfa:
|
||||||
|
Two-factor authentication
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
Introduction
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Simple authentication requires only secret piece of evidence (one factor) that
|
||||||
|
a user can successfully claim a identiy (authenticate), for example, that you
|
||||||
|
are allowed to login as `root@pam` on a specific Proxmox Backup Server.
|
||||||
|
If the password gets stolen, or leaked in another way, anybody can use it to
|
||||||
|
login - even if they should not be allowed to do so.
|
||||||
|
|
||||||
|
With Two-factor authentication (TFA) a user is asked for an additional factor,
|
||||||
|
to proof his authenticity. The extra factor is different from a password
|
||||||
|
(something only the user knows), it is something only the user has, for example
|
||||||
|
a piece of hardware (security key) or an secret saved on the users smartphone.
|
||||||
|
|
||||||
|
This means that a remote user can never get hold on such a physical object. So,
|
||||||
|
even if that user would know your password they cannot successfully
|
||||||
|
authenticate as you, as your second factor is missing.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tfa-login.png
|
||||||
|
:align: right
|
||||||
|
:alt: Add a new user
|
||||||
|
|
||||||
|
Available Second Factors
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
You can setup more than one second factor to avoid that losing your smartphone
|
||||||
|
or security key permanently locks you out from your account.
|
||||||
|
|
||||||
|
There are three different two-factor authentication methods supported:
|
||||||
|
|
||||||
|
* TOTP (`Time-based One-Time Password <https://en.wikipedia.org/wiki/Time-based_One-Time_Password>`_).
|
||||||
|
A short code derived from a shared secret and the current time, it switches
|
||||||
|
every 30 seconds.
|
||||||
|
|
||||||
|
* WebAuthn (`Web Authentication <https://en.wikipedia.org/wiki/WebAuthn>`_).
|
||||||
|
A general standard for authentication. It is implemented by various security
|
||||||
|
devices like hardware keys or trusted platform modules (TPM) from a computer
|
||||||
|
or smart phone.
|
||||||
|
|
||||||
|
* Single use Recovery Keys. A list of keys which should either be printed out
|
||||||
|
and locked in a secure fault or saved digitally in a electronic vault.
|
||||||
|
Each key can be used only once, they are perfect for ensuring you are not
|
||||||
|
locked out even if all of your other second factors are lost or corrupt.
|
||||||
|
|
||||||
|
|
||||||
|
Setup
|
||||||
|
~~~~~
|
||||||
|
|
||||||
|
.. _user_tfa_setup_totp:
|
||||||
|
TOTP
|
||||||
|
^^^^
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tfa-add-totp.png
|
||||||
|
:align: right
|
||||||
|
:alt: Add a new user
|
||||||
|
|
||||||
|
There is not server setup required, simply install a TOTP app on your
|
||||||
|
smartphone (for example, `FreeOTP <https://freeotp.github.io/>`_) and use the
|
||||||
|
Proxmox Backup Server web-interface to add a TOTP factor.
|
||||||
|
|
||||||
|
.. _user_tfa_setup_webauthn:
|
||||||
|
WebAuthn
|
||||||
|
^^^^^^^^
|
||||||
|
|
||||||
|
For WebAuthn to work you need to have two things:
|
||||||
|
|
||||||
|
* a trusted HTTPS certificate (for example, by using `Let's Encrypt
|
||||||
|
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_)
|
||||||
|
|
||||||
|
* setup the WebAuthn configuration (see *Configuration -> Authentication* in the
|
||||||
|
Proxmox Backup Server web-interface). This can be auto-filled in most setups.
|
||||||
|
|
||||||
|
Once you fullfilled both of those requirements, you can add a WebAuthn
|
||||||
|
configuration in the *Access Control* panel.
|
||||||
|
|
||||||
|
.. _user_tfa_setup_recovery_keys:
|
||||||
|
Recovery Keys
|
||||||
|
^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tfa-add-recovery-keys.png
|
||||||
|
:align: right
|
||||||
|
:alt: Add a new user
|
||||||
|
|
||||||
|
Recovery key codes do not need any preparation, you can simply create a set of
|
||||||
|
recovery keys in the *Access Control* panel.
|
||||||
|
|
||||||
|
.. note:: There can only be one set of single-use recovery keys per user at any
|
||||||
|
time.
|
||||||
|
|
||||||
|
TFA and Automated Access
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Two-factor authentication is only implemented for the web-interface, you should
|
||||||
|
use :ref:`API Tokens <user_tokens>` for all other use cases, especially
|
||||||
|
non-interactive ones (for example, adding a Proxmox Backup server to Proxmox VE
|
||||||
|
as a storage).
|
||||||
|
@ -1,13 +1,15 @@
|
|||||||
include ../defines.mk
|
include ../defines.mk
|
||||||
|
|
||||||
UNITS :=
|
UNITS := \
|
||||||
|
proxmox-backup-daily-update.timer \
|
||||||
|
|
||||||
DYNAMIC_UNITS := \
|
DYNAMIC_UNITS := \
|
||||||
proxmox-backup-banner.service \
|
proxmox-backup-banner.service \
|
||||||
|
proxmox-backup-daily-update.service \
|
||||||
proxmox-backup.service \
|
proxmox-backup.service \
|
||||||
proxmox-backup-proxy.service
|
proxmox-backup-proxy.service
|
||||||
|
|
||||||
all: $(UNITS) $(DYNAMIC_UNITS) pbstest-beta.list
|
all: $(UNITS) $(DYNAMIC_UNITS) pbs-enterprise.list
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -f $(DYNAMIC_UNITS)
|
rm -f $(DYNAMIC_UNITS)
|
||||||
|
1
etc/pbs-enterprise.list
Normal file
@ -0,0 +1 @@
|
|||||||
|
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
@ -1 +0,0 @@
|
|||||||
deb http://download.proxmox.com/debian/pbs buster pbstest
|
|
8
etc/proxmox-backup-daily-update.service.in
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Daily Proxmox Backup Server update and maintenance activities
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
ExecStart=%LIBEXECDIR%/proxmox-backup/proxmox-daily-update
|
10
etc/proxmox-backup-daily-update.timer
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Daily Proxmox Backup Server update and maintenance activities
|
||||||
|
|
||||||
|
[Timer]
|
||||||
|
OnCalendar=*-*-* 1:00
|
||||||
|
RandomizedDelaySec=5h
|
||||||
|
Persistent=true
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=timers.target
|
@ -9,6 +9,7 @@ After=proxmox-backup.service
|
|||||||
Type=notify
|
Type=notify
|
||||||
ExecStart=%LIBEXECDIR%/proxmox-backup/proxmox-backup-proxy
|
ExecStart=%LIBEXECDIR%/proxmox-backup/proxmox-backup-proxy
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
|
PIDFile=/run/proxmox-backup/proxy.pid
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
User=%PROXY_USER%
|
User=%PROXY_USER%
|
||||||
Group=%PROXY_USER%
|
Group=%PROXY_USER%
|
||||||
|
@ -7,6 +7,7 @@ After=network.target
|
|||||||
Type=notify
|
Type=notify
|
||||||
ExecStart=%LIBEXECDIR%/proxmox-backup/proxmox-backup-api
|
ExecStart=%LIBEXECDIR%/proxmox-backup/proxmox-backup-api
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
|
PIDFile=/run/proxmox-backup/api.pid
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
|
@ -2,7 +2,7 @@ use std::io::Write;
|
|||||||
|
|
||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
|
|
||||||
use proxmox_backup::api2::types::Userid;
|
use proxmox_backup::api2::types::Authid;
|
||||||
use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader};
|
use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader};
|
||||||
|
|
||||||
pub struct DummyWriter {
|
pub struct DummyWriter {
|
||||||
@ -26,13 +26,13 @@ async fn run() -> Result<(), Error> {
|
|||||||
|
|
||||||
let host = "localhost";
|
let host = "localhost";
|
||||||
|
|
||||||
let username = Userid::root_userid();
|
let auth_id = Authid::root_auth_id();
|
||||||
|
|
||||||
let options = HttpClientOptions::new()
|
let options = HttpClientOptions::default()
|
||||||
.interactive(true)
|
.interactive(true)
|
||||||
.ticket_cache(true);
|
.ticket_cache(true);
|
||||||
|
|
||||||
let client = HttpClient::new(host, 8007, username, options)?;
|
let client = HttpClient::new(host, 8007, auth_id, options)?;
|
||||||
|
|
||||||
let backup_time = proxmox::tools::time::parse_rfc3339("2019-06-28T10:49:48Z")?;
|
let backup_time = proxmox::tools::time::parse_rfc3339("2019-06-28T10:49:48Z")?;
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@ use std::future::Future;
|
|||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
use anyhow::{Error};
|
use anyhow::Error;
|
||||||
use futures::future::TryFutureExt;
|
use futures::future::TryFutureExt;
|
||||||
use futures::stream::Stream;
|
use futures::stream::Stream;
|
||||||
use tokio::net::TcpStream;
|
use tokio::net::TcpStream;
|
||||||
@ -38,11 +38,11 @@ impl Future for Process {
|
|||||||
this.body.flow_control().release_capacity(chunk.len())?;
|
this.body.flow_control().release_capacity(chunk.len())?;
|
||||||
this.bytes += chunk.len();
|
this.bytes += chunk.len();
|
||||||
// println!("GOT FRAME {}", chunk.len());
|
// println!("GOT FRAME {}", chunk.len());
|
||||||
},
|
}
|
||||||
Some(Err(err)) => return Poll::Ready(Err(Error::from(err))),
|
Some(Err(err)) => return Poll::Ready(Err(Error::from(err))),
|
||||||
None => {
|
None => {
|
||||||
this.trailers = true;
|
this.trailers = true;
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -52,7 +52,6 @@ impl Future for Process {
|
|||||||
fn send_request(
|
fn send_request(
|
||||||
mut client: h2::client::SendRequest<bytes::Bytes>,
|
mut client: h2::client::SendRequest<bytes::Bytes>,
|
||||||
) -> impl Future<Output = Result<usize, Error>> {
|
) -> impl Future<Output = Result<usize, Error>> {
|
||||||
|
|
||||||
println!("sending request");
|
println!("sending request");
|
||||||
|
|
||||||
let request = http::Request::builder()
|
let request = http::Request::builder()
|
||||||
@ -62,10 +61,10 @@ fn send_request(
|
|||||||
|
|
||||||
let (response, _stream) = client.send_request(request, true).unwrap();
|
let (response, _stream) = client.send_request(request, true).unwrap();
|
||||||
|
|
||||||
response
|
response.map_err(Error::from).and_then(|response| Process {
|
||||||
.map_err(Error::from)
|
body: response.into_body(),
|
||||||
.and_then(|response| {
|
trailers: false,
|
||||||
Process { body: response.into_body(), trailers: false, bytes: 0 }
|
bytes: 0,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -74,16 +73,15 @@ fn main() -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
|
|
||||||
let start = std::time::SystemTime::now();
|
let start = std::time::SystemTime::now();
|
||||||
|
|
||||||
let conn = TcpStream::connect(std::net::SocketAddr::from(([127,0,0,1], 8008)))
|
let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
|
||||||
.await?;
|
conn.set_nodelay(true).unwrap();
|
||||||
|
|
||||||
let (client, h2) = h2::client::Builder::new()
|
let (client, h2) = h2::client::Builder::new()
|
||||||
.initial_connection_window_size(1024*1024*1024)
|
.initial_connection_window_size(1024 * 1024 * 1024)
|
||||||
.initial_window_size(1024*1024*1024)
|
.initial_window_size(1024 * 1024 * 1024)
|
||||||
.max_frame_size(4*1024*1024)
|
.max_frame_size(4 * 1024 * 1024)
|
||||||
.handshake(conn)
|
.handshake(conn)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
@ -99,10 +97,13 @@ async fn run() -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let elapsed = start.elapsed().unwrap();
|
let elapsed = start.elapsed().unwrap();
|
||||||
let elapsed = (elapsed.as_secs() as f64) +
|
let elapsed = (elapsed.as_secs() as f64) + (elapsed.subsec_millis() as f64) / 1000.0;
|
||||||
(elapsed.subsec_millis() as f64)/1000.0;
|
|
||||||
|
|
||||||
println!("Downloaded {} bytes, {} MB/s", bytes, (bytes as f64)/(elapsed*1024.0*1024.0));
|
println!(
|
||||||
|
"Downloaded {} bytes, {} MB/s",
|
||||||
|
bytes,
|
||||||
|
(bytes as f64) / (elapsed * 1024.0 * 1024.0)
|
||||||
|
);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ use std::task::{Context, Poll};
|
|||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
use futures::future::TryFutureExt;
|
use futures::future::TryFutureExt;
|
||||||
use futures::stream::Stream;
|
use futures::stream::Stream;
|
||||||
|
use tokio::net::TcpStream;
|
||||||
|
|
||||||
// Simple H2 client to test H2 download speed using h2s-server.rs
|
// Simple H2 client to test H2 download speed using h2s-server.rs
|
||||||
|
|
||||||
@ -37,11 +38,11 @@ impl Future for Process {
|
|||||||
this.body.flow_control().release_capacity(chunk.len())?;
|
this.body.flow_control().release_capacity(chunk.len())?;
|
||||||
this.bytes += chunk.len();
|
this.bytes += chunk.len();
|
||||||
// println!("GOT FRAME {}", chunk.len());
|
// println!("GOT FRAME {}", chunk.len());
|
||||||
},
|
}
|
||||||
Some(Err(err)) => return Poll::Ready(Err(Error::from(err))),
|
Some(Err(err)) => return Poll::Ready(Err(Error::from(err))),
|
||||||
None => {
|
None => {
|
||||||
this.trailers = true;
|
this.trailers = true;
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -60,10 +61,10 @@ fn send_request(
|
|||||||
|
|
||||||
let (response, _stream) = client.send_request(request, true).unwrap();
|
let (response, _stream) = client.send_request(request, true).unwrap();
|
||||||
|
|
||||||
response
|
response.map_err(Error::from).and_then(|response| Process {
|
||||||
.map_err(Error::from)
|
body: response.into_body(),
|
||||||
.and_then(|response| {
|
trailers: false,
|
||||||
Process { body: response.into_body(), trailers: false, bytes: 0 }
|
bytes: 0,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -74,57 +75,51 @@ fn main() -> Result<(), Error> {
|
|||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
let start = std::time::SystemTime::now();
|
let start = std::time::SystemTime::now();
|
||||||
|
|
||||||
let conn =
|
let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
|
||||||
tokio::net::TcpStream::connect(std::net::SocketAddr::from(([127,0,0,1], 8008))).await?;
|
|
||||||
|
|
||||||
conn.set_nodelay(true).unwrap();
|
conn.set_nodelay(true).unwrap();
|
||||||
conn.set_recv_buffer_size(1024*1024).unwrap();
|
|
||||||
|
|
||||||
use openssl::ssl::{SslConnector, SslMethod};
|
use openssl::ssl::{SslConnector, SslMethod};
|
||||||
|
|
||||||
let mut ssl_connector_builder = SslConnector::builder(SslMethod::tls()).unwrap();
|
let mut ssl_connector_builder = SslConnector::builder(SslMethod::tls()).unwrap();
|
||||||
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
|
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
|
||||||
let conn =
|
let ssl = ssl_connector_builder
|
||||||
tokio_openssl::connect(
|
.build()
|
||||||
ssl_connector_builder.build().configure()?,
|
.configure()?
|
||||||
"localhost",
|
.into_ssl("localhost")?;
|
||||||
conn,
|
|
||||||
)
|
let conn = tokio_openssl::SslStream::new(ssl, conn)?;
|
||||||
|
let mut conn = Box::pin(conn);
|
||||||
|
conn.as_mut()
|
||||||
|
.connect()
|
||||||
.await
|
.await
|
||||||
.map_err(|err| format_err!("connect failed - {}", err))?;
|
.map_err(|err| format_err!("connect failed - {}", err))?;
|
||||||
|
|
||||||
let (client, h2) = h2::client::Builder::new()
|
let (client, h2) = h2::client::Builder::new()
|
||||||
.initial_connection_window_size(1024*1024*1024)
|
.initial_connection_window_size(1024 * 1024 * 1024)
|
||||||
.initial_window_size(1024*1024*1024)
|
.initial_window_size(1024 * 1024 * 1024)
|
||||||
.max_frame_size(4*1024*1024)
|
.max_frame_size(4 * 1024 * 1024)
|
||||||
.handshake(conn)
|
.handshake(conn)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
// Spawn a task to run the conn...
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
if let Err(e) = h2.await {
|
if let Err(err) = h2.await {
|
||||||
println!("GOT ERR={:?}", e);
|
println!("GOT ERR={:?}", err);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut bytes = 0;
|
let mut bytes = 0;
|
||||||
for _ in 0..100 {
|
for _ in 0..2000 {
|
||||||
match send_request(client.clone()).await {
|
bytes += send_request(client.clone()).await?;
|
||||||
Ok(b) => {
|
|
||||||
bytes += b;
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
println!("ERROR {}", e);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let elapsed = start.elapsed().unwrap();
|
let elapsed = start.elapsed().unwrap();
|
||||||
let elapsed = (elapsed.as_secs() as f64) +
|
let elapsed = (elapsed.as_secs() as f64) + (elapsed.subsec_millis() as f64) / 1000.0;
|
||||||
(elapsed.subsec_millis() as f64)/1000.0;
|
|
||||||
|
|
||||||
println!("Downloaded {} bytes, {} MB/s", bytes, (bytes as f64)/(elapsed*1024.0*1024.0));
|
println!(
|
||||||
|
"Downloaded {} bytes, {} MB/s",
|
||||||
|
bytes,
|
||||||
|
(bytes as f64) / (elapsed * 1024.0 * 1024.0)
|
||||||
|
);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -2,14 +2,12 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
use hyper::{Request, Response, Body};
|
use hyper::{Body, Request, Response};
|
||||||
use openssl::ssl::{SslMethod, SslAcceptor, SslFiletype};
|
use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
|
||||||
use tokio::net::{TcpListener, TcpStream};
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
|
|
||||||
use proxmox_backup::configdir;
|
use proxmox_backup::configdir;
|
||||||
|
|
||||||
// Simple H2 server to test H2 speed with h2s-client.rs
|
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_backup::tools::runtime::main(run())
|
proxmox_backup::tools::runtime::main(run())
|
||||||
}
|
}
|
||||||
@ -19,22 +17,23 @@ async fn run() -> Result<(), Error> {
|
|||||||
let cert_path = configdir!("/proxy.pem");
|
let cert_path = configdir!("/proxy.pem");
|
||||||
|
|
||||||
let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
|
let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
|
||||||
acceptor.set_private_key_file(key_path, SslFiletype::PEM)
|
acceptor
|
||||||
|
.set_private_key_file(key_path, SslFiletype::PEM)
|
||||||
.map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
|
.map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
|
||||||
acceptor.set_certificate_chain_file(cert_path)
|
acceptor
|
||||||
|
.set_certificate_chain_file(cert_path)
|
||||||
.map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?;
|
.map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?;
|
||||||
acceptor.check_private_key().unwrap();
|
acceptor.check_private_key().unwrap();
|
||||||
|
|
||||||
let acceptor = Arc::new(acceptor.build());
|
let acceptor = Arc::new(acceptor.build());
|
||||||
|
|
||||||
let mut listener = TcpListener::bind(std::net::SocketAddr::from(([127,0,0,1], 8008))).await?;
|
let listener = TcpListener::bind(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
|
||||||
|
|
||||||
println!("listening on {:?}", listener.local_addr());
|
println!("listening on {:?}", listener.local_addr());
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let (socket, _addr) = listener.accept().await?;
|
let (socket, _addr) = listener.accept().await?;
|
||||||
tokio::spawn(handle_connection(socket, Arc::clone(&acceptor))
|
tokio::spawn(handle_connection(socket, Arc::clone(&acceptor)).map(|res| {
|
||||||
.map(|res| {
|
|
||||||
if let Err(err) = res {
|
if let Err(err) = res {
|
||||||
eprintln!("Error: {}", err);
|
eprintln!("Error: {}", err);
|
||||||
}
|
}
|
||||||
@ -42,15 +41,14 @@ async fn run() -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_connection(
|
async fn handle_connection(socket: TcpStream, acceptor: Arc<SslAcceptor>) -> Result<(), Error> {
|
||||||
socket: TcpStream,
|
|
||||||
acceptor: Arc<SslAcceptor>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
socket.set_nodelay(true).unwrap();
|
socket.set_nodelay(true).unwrap();
|
||||||
socket.set_send_buffer_size(1024*1024).unwrap();
|
|
||||||
socket.set_recv_buffer_size(1024*1024).unwrap();
|
|
||||||
|
|
||||||
let socket = tokio_openssl::accept(acceptor.as_ref(), socket).await?;
|
let ssl = openssl::ssl::Ssl::new(acceptor.context())?;
|
||||||
|
let stream = tokio_openssl::SslStream::new(ssl, socket)?;
|
||||||
|
let mut stream = Box::pin(stream);
|
||||||
|
|
||||||
|
stream.as_mut().accept().await?;
|
||||||
|
|
||||||
let mut http = hyper::server::conn::Http::new();
|
let mut http = hyper::server::conn::Http::new();
|
||||||
http.http2_only(true);
|
http.http2_only(true);
|
||||||
@ -61,7 +59,7 @@ async fn handle_connection(
|
|||||||
|
|
||||||
let service = hyper::service::service_fn(|_req: Request<Body>| {
|
let service = hyper::service::service_fn(|_req: Request<Body>| {
|
||||||
println!("Got request");
|
println!("Got request");
|
||||||
let buffer = vec![65u8; 1024*1024]; // nonsense [A,A,A,A...]
|
let buffer = vec![65u8; 4 * 1024 * 1024]; // nonsense [A,A,A,A...]
|
||||||
let body = Body::from(buffer);
|
let body = Body::from(buffer);
|
||||||
|
|
||||||
let response = Response::builder()
|
let response = Response::builder()
|
||||||
@ -72,7 +70,7 @@ async fn handle_connection(
|
|||||||
future::ok::<_, Error>(response)
|
future::ok::<_, Error>(response)
|
||||||
});
|
});
|
||||||
|
|
||||||
http.serve_connection(socket, service)
|
http.serve_connection(stream, service)
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -1,26 +1,21 @@
|
|||||||
use anyhow::{Error};
|
use anyhow::Error;
|
||||||
use futures::*;
|
use futures::*;
|
||||||
|
use hyper::{Body, Request, Response};
|
||||||
|
|
||||||
// Simple H2 server to test H2 speed with h2client.rs
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
|
|
||||||
use tokio::net::TcpListener;
|
|
||||||
use tokio::io::{AsyncRead, AsyncWrite};
|
|
||||||
|
|
||||||
use proxmox_backup::client::pipe_to_stream::PipeToSendStream;
|
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_backup::tools::runtime::main(run())
|
proxmox_backup::tools::runtime::main(run())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
let mut listener = TcpListener::bind(std::net::SocketAddr::from(([127,0,0,1], 8008))).await?;
|
let listener = TcpListener::bind(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
|
||||||
|
|
||||||
println!("listening on {:?}", listener.local_addr());
|
println!("listening on {:?}", listener.local_addr());
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let (socket, _addr) = listener.accept().await?;
|
let (socket, _addr) = listener.accept().await?;
|
||||||
tokio::spawn(handle_connection(socket)
|
tokio::spawn(handle_connection(socket).map(|res| {
|
||||||
.map(|res| {
|
|
||||||
if let Err(err) = res {
|
if let Err(err) = res {
|
||||||
eprintln!("Error: {}", err);
|
eprintln!("Error: {}", err);
|
||||||
}
|
}
|
||||||
@ -28,24 +23,33 @@ async fn run() -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_connection<T: AsyncRead + AsyncWrite + Unpin>(socket: T) -> Result<(), Error> {
|
async fn handle_connection(socket: TcpStream) -> Result<(), Error> {
|
||||||
let mut conn = h2::server::handshake(socket).await?;
|
socket.set_nodelay(true).unwrap();
|
||||||
|
|
||||||
println!("H2 connection bound");
|
let mut http = hyper::server::conn::Http::new();
|
||||||
|
http.http2_only(true);
|
||||||
|
// increase window size: todo - find optiomal size
|
||||||
|
let max_window_size = (1 << 31) - 2;
|
||||||
|
http.http2_initial_stream_window_size(max_window_size);
|
||||||
|
http.http2_initial_connection_window_size(max_window_size);
|
||||||
|
|
||||||
while let Some((request, mut respond)) = conn.try_next().await? {
|
let service = hyper::service::service_fn(|_req: Request<Body>| {
|
||||||
println!("GOT request: {:?}", request);
|
println!("Got request");
|
||||||
|
let buffer = vec![65u8; 4 * 1024 * 1024]; // nonsense [A,A,A,A...]
|
||||||
|
let body = Body::from(buffer);
|
||||||
|
|
||||||
let response = http::Response::builder()
|
let response = Response::builder()
|
||||||
.status(http::StatusCode::OK)
|
.status(http::StatusCode::OK)
|
||||||
.body(())
|
.header(http::header::CONTENT_TYPE, "application/octet-stream")
|
||||||
|
.body(body)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
future::ok::<_, Error>(response)
|
||||||
|
});
|
||||||
|
|
||||||
let send = respond.send_response(response, false).unwrap();
|
http.serve_connection(socket, service)
|
||||||
let data = vec![65u8; 1024*1024];
|
.map_err(Error::from)
|
||||||
PipeToSendStream::new(bytes::Bytes::from(data), send).await?;
|
.await?;
|
||||||
println!("DATA SENT");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
println!("H2 connection CLOSE !");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
|
|
||||||
use proxmox_backup::api2::types::Userid;
|
use proxmox_backup::api2::types::Authid;
|
||||||
use proxmox_backup::client::*;
|
use proxmox_backup::client::*;
|
||||||
|
|
||||||
async fn upload_speed() -> Result<f64, Error> {
|
async fn upload_speed() -> Result<f64, Error> {
|
||||||
@ -8,13 +8,13 @@ async fn upload_speed() -> Result<f64, Error> {
|
|||||||
let host = "localhost";
|
let host = "localhost";
|
||||||
let datastore = "store2";
|
let datastore = "store2";
|
||||||
|
|
||||||
let username = Userid::root_userid();
|
let auth_id = Authid::root_auth_id();
|
||||||
|
|
||||||
let options = HttpClientOptions::new()
|
let options = HttpClientOptions::default()
|
||||||
.interactive(true)
|
.interactive(true)
|
||||||
.ticket_cache(true);
|
.ticket_cache(true);
|
||||||
|
|
||||||
let client = HttpClient::new(host, 8007, username, options)?;
|
let client = HttpClient::new(host, 8007, auth_id, options)?;
|
||||||
|
|
||||||
let backup_time = proxmox::tools::time::epoch_i64();
|
let backup_time = proxmox::tools::time::epoch_i64();
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//! The Proxmox Backup Server API
|
||||||
|
|
||||||
pub mod access;
|
pub mod access;
|
||||||
pub mod admin;
|
pub mod admin;
|
||||||
pub mod backup;
|
pub mod backup;
|
||||||
@ -7,7 +9,9 @@ pub mod reader;
|
|||||||
pub mod status;
|
pub mod status;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
pub mod version;
|
pub mod version;
|
||||||
|
pub mod ping;
|
||||||
pub mod pull;
|
pub mod pull;
|
||||||
|
pub mod tape;
|
||||||
mod helpers;
|
mod helpers;
|
||||||
|
|
||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
@ -16,15 +20,17 @@ use proxmox::list_subdirs_api_method;
|
|||||||
|
|
||||||
const NODES_ROUTER: Router = Router::new().match_all("node", &node::ROUTER);
|
const NODES_ROUTER: Router = Router::new().match_all("node", &node::ROUTER);
|
||||||
|
|
||||||
pub const SUBDIRS: SubdirMap = &[
|
const SUBDIRS: SubdirMap = &[
|
||||||
("access", &access::ROUTER),
|
("access", &access::ROUTER),
|
||||||
("admin", &admin::ROUTER),
|
("admin", &admin::ROUTER),
|
||||||
("backup", &backup::ROUTER),
|
("backup", &backup::ROUTER),
|
||||||
("config", &config::ROUTER),
|
("config", &config::ROUTER),
|
||||||
("nodes", &NODES_ROUTER),
|
("nodes", &NODES_ROUTER),
|
||||||
|
("ping", &ping::ROUTER),
|
||||||
("pull", &pull::ROUTER),
|
("pull", &pull::ROUTER),
|
||||||
("reader", &reader::ROUTER),
|
("reader", &reader::ROUTER),
|
||||||
("status", &status::ROUTER),
|
("status", &status::ROUTER),
|
||||||
|
("tape", &tape::ROUTER),
|
||||||
("version", &version::ROUTER),
|
("version", &version::ROUTER),
|
||||||
];
|
];
|
||||||
|
|
||||||
|
@ -1,45 +1,69 @@
|
|||||||
|
//! Access control (Users, Permissions and Authentication)
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
|
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use proxmox::api::{api, RpcEnvironment, Permission};
|
|
||||||
use proxmox::api::router::{Router, SubdirMap};
|
use proxmox::api::router::{Router, SubdirMap};
|
||||||
use proxmox::{sortable, identity};
|
use proxmox::api::{api, Permission, RpcEnvironment};
|
||||||
use proxmox::{http_err, list_subdirs_api_method};
|
use proxmox::{http_err, list_subdirs_api_method};
|
||||||
|
use proxmox::{identity, sortable};
|
||||||
|
|
||||||
use crate::tools::ticket::{self, Empty, Ticket};
|
|
||||||
use crate::auth_helpers::*;
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
|
use crate::auth_helpers::*;
|
||||||
|
use crate::server::ticket::ApiTicket;
|
||||||
|
use crate::tools::ticket::{self, Empty, Ticket};
|
||||||
|
|
||||||
|
use crate::config::acl as acl_config;
|
||||||
|
use crate::config::acl::{PRIVILEGES, PRIV_PERMISSIONS_MODIFY, PRIV_SYS_AUDIT};
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
use crate::config::acl::{PRIVILEGES, PRIV_PERMISSIONS_MODIFY};
|
use crate::config::tfa::TfaChallenge;
|
||||||
|
|
||||||
pub mod user;
|
|
||||||
pub mod domain;
|
|
||||||
pub mod acl;
|
pub mod acl;
|
||||||
|
pub mod domain;
|
||||||
pub mod role;
|
pub mod role;
|
||||||
|
pub mod tfa;
|
||||||
|
pub mod user;
|
||||||
|
|
||||||
|
#[allow(clippy::large_enum_variant)]
|
||||||
|
enum AuthResult {
|
||||||
|
/// Successful authentication which does not require a new ticket.
|
||||||
|
Success,
|
||||||
|
|
||||||
|
/// Successful authentication which requires a ticket to be created.
|
||||||
|
CreateTicket,
|
||||||
|
|
||||||
|
/// A partial ticket which requires a 2nd factor will be created.
|
||||||
|
Partial(TfaChallenge),
|
||||||
|
}
|
||||||
|
|
||||||
/// returns Ok(true) if a ticket has to be created
|
|
||||||
/// and Ok(false) if not
|
|
||||||
fn authenticate_user(
|
fn authenticate_user(
|
||||||
userid: &Userid,
|
userid: &Userid,
|
||||||
password: &str,
|
password: &str,
|
||||||
path: Option<String>,
|
path: Option<String>,
|
||||||
privs: Option<String>,
|
privs: Option<String>,
|
||||||
port: Option<u16>,
|
port: Option<u16>,
|
||||||
) -> Result<bool, Error> {
|
tfa_challenge: Option<String>,
|
||||||
|
) -> Result<AuthResult, Error> {
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
if !user_info.is_active_user(&userid) {
|
let auth_id = Authid::from(userid.clone());
|
||||||
|
if !user_info.is_active_auth_id(&auth_id) {
|
||||||
bail!("user account disabled or expired.");
|
bail!("user account disabled or expired.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(tfa_challenge) = tfa_challenge {
|
||||||
|
return authenticate_2nd(userid, &tfa_challenge, password);
|
||||||
|
}
|
||||||
|
|
||||||
if password.starts_with("PBS:") {
|
if password.starts_with("PBS:") {
|
||||||
if let Ok(ticket_userid) = Ticket::<Userid>::parse(password)
|
if let Ok(ticket_userid) = Ticket::<Userid>::parse(password)
|
||||||
.and_then(|ticket| ticket.verify(public_auth_key(), "PBS", None))
|
.and_then(|ticket| ticket.verify(public_auth_key(), "PBS", None))
|
||||||
{
|
{
|
||||||
if *userid == ticket_userid {
|
if *userid == ticket_userid {
|
||||||
return Ok(true);
|
return Ok(AuthResult::CreateTicket);
|
||||||
}
|
}
|
||||||
bail!("ticket login failed - wrong userid");
|
bail!("ticket login failed - wrong userid");
|
||||||
}
|
}
|
||||||
@ -49,17 +73,17 @@ fn authenticate_user(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let path = path.ok_or_else(|| format_err!("missing path for termproxy ticket"))?;
|
let path = path.ok_or_else(|| format_err!("missing path for termproxy ticket"))?;
|
||||||
let privilege_name = privs
|
let privilege_name =
|
||||||
.ok_or_else(|| format_err!("missing privilege name for termproxy ticket"))?;
|
privs.ok_or_else(|| format_err!("missing privilege name for termproxy ticket"))?;
|
||||||
let port = port.ok_or_else(|| format_err!("missing port for termproxy ticket"))?;
|
let port = port.ok_or_else(|| format_err!("missing port for termproxy ticket"))?;
|
||||||
|
|
||||||
if let Ok(Empty) = Ticket::parse(password)
|
if let Ok(Empty) = Ticket::parse(password).and_then(|ticket| {
|
||||||
.and_then(|ticket| ticket.verify(
|
ticket.verify(
|
||||||
public_auth_key(),
|
public_auth_key(),
|
||||||
ticket::TERM_PREFIX,
|
ticket::TERM_PREFIX,
|
||||||
Some(&ticket::term_aad(userid, &path, port)),
|
Some(&ticket::term_aad(userid, &path, port)),
|
||||||
))
|
)
|
||||||
{
|
}) {
|
||||||
for (name, privilege) in PRIVILEGES {
|
for (name, privilege) in PRIVILEGES {
|
||||||
if *name == privilege_name {
|
if *name == privilege_name {
|
||||||
let mut path_vec = Vec::new();
|
let mut path_vec = Vec::new();
|
||||||
@ -68,9 +92,8 @@ fn authenticate_user(
|
|||||||
path_vec.push(part);
|
path_vec.push(part);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
user_info.check_privs(&auth_id, &path_vec, *privilege, false)?;
|
||||||
user_info.check_privs(userid, &path_vec, *privilege, false)?;
|
return Ok(AuthResult::Success);
|
||||||
return Ok(false);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -78,8 +101,26 @@ fn authenticate_user(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let _ = crate::auth::authenticate_user(userid, password)?;
|
let _: () = crate::auth::authenticate_user(userid, password)?;
|
||||||
Ok(true)
|
|
||||||
|
Ok(match crate::config::tfa::login_challenge(userid)? {
|
||||||
|
None => AuthResult::CreateTicket,
|
||||||
|
Some(challenge) => AuthResult::Partial(challenge),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn authenticate_2nd(
|
||||||
|
userid: &Userid,
|
||||||
|
challenge_ticket: &str,
|
||||||
|
response: &str,
|
||||||
|
) -> Result<AuthResult, Error> {
|
||||||
|
let challenge: TfaChallenge = Ticket::<ApiTicket>::parse(&challenge_ticket)?
|
||||||
|
.verify_with_time_frame(public_auth_key(), "PBS", Some(userid.as_str()), -60..600)?
|
||||||
|
.require_partial()?;
|
||||||
|
|
||||||
|
let _: () = crate::config::tfa::verify_challenge(userid, &challenge, response.parse()?)?;
|
||||||
|
|
||||||
|
Ok(AuthResult::CreateTicket)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -106,6 +147,11 @@ fn authenticate_user(
|
|||||||
description: "Port for verifying terminal tickets.",
|
description: "Port for verifying terminal tickets.",
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
"tfa-challenge": {
|
||||||
|
type: String,
|
||||||
|
description: "The signed TFA challenge string the user wants to respond to.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
@ -120,7 +166,9 @@ fn authenticate_user(
|
|||||||
},
|
},
|
||||||
CSRFPreventionToken: {
|
CSRFPreventionToken: {
|
||||||
type: String,
|
type: String,
|
||||||
description: "Cross Site Request Forgery Prevention Token.",
|
description:
|
||||||
|
"Cross Site Request Forgery Prevention Token. \
|
||||||
|
For partial tickets this is the string \"invalid\".",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -132,20 +180,24 @@ fn authenticate_user(
|
|||||||
/// Create or verify authentication ticket.
|
/// Create or verify authentication ticket.
|
||||||
///
|
///
|
||||||
/// Returns: An authentication ticket with additional infos.
|
/// Returns: An authentication ticket with additional infos.
|
||||||
fn create_ticket(
|
pub fn create_ticket(
|
||||||
username: Userid,
|
username: Userid,
|
||||||
password: String,
|
password: String,
|
||||||
path: Option<String>,
|
path: Option<String>,
|
||||||
privs: Option<String>,
|
privs: Option<String>,
|
||||||
port: Option<u16>,
|
port: Option<u16>,
|
||||||
|
tfa_challenge: Option<String>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
match authenticate_user(&username, &password, path, privs, port) {
|
match authenticate_user(&username, &password, path, privs, port, tfa_challenge) {
|
||||||
Ok(true) => {
|
Ok(AuthResult::Success) => Ok(json!({ "username": username })),
|
||||||
let ticket = Ticket::new("PBS", &username)?.sign(private_auth_key(), None)?;
|
Ok(AuthResult::CreateTicket) => {
|
||||||
|
let api_ticket = ApiTicket::full(username.clone());
|
||||||
|
let ticket = Ticket::new("PBS", &api_ticket)?.sign(private_auth_key(), None)?;
|
||||||
let token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
let token = assemble_csrf_prevention_token(csrf_secret(), &username);
|
||||||
|
|
||||||
log::info!("successful auth for user '{}'", username);
|
crate::server::rest::auth_logger()?
|
||||||
|
.log(format!("successful auth for user '{}'", username));
|
||||||
|
|
||||||
Ok(json!({
|
Ok(json!({
|
||||||
"username": username,
|
"username": username,
|
||||||
@ -153,18 +205,38 @@ fn create_ticket(
|
|||||||
"CSRFPreventionToken": token,
|
"CSRFPreventionToken": token,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
Ok(false) => Ok(json!({
|
Ok(AuthResult::Partial(challenge)) => {
|
||||||
|
let api_ticket = ApiTicket::partial(challenge);
|
||||||
|
let ticket = Ticket::new("PBS", &api_ticket)?
|
||||||
|
.sign(private_auth_key(), Some(username.as_str()))?;
|
||||||
|
Ok(json!({
|
||||||
"username": username,
|
"username": username,
|
||||||
})),
|
"ticket": ticket,
|
||||||
|
"CSRFPreventionToken": "invalid",
|
||||||
|
}))
|
||||||
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let client_ip = "unknown"; // $rpcenv->get_client_ip() || '';
|
let client_ip = match rpcenv.get_client_ip().map(|addr| addr.ip()) {
|
||||||
log::error!("authentication failure; rhost={} user={} msg={}", client_ip, username, err.to_string());
|
Some(ip) => format!("{}", ip),
|
||||||
|
None => "unknown".into(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let msg = format!(
|
||||||
|
"authentication failure; rhost={} user={} msg={}",
|
||||||
|
client_ip,
|
||||||
|
username,
|
||||||
|
err.to_string()
|
||||||
|
);
|
||||||
|
crate::server::rest::auth_logger()?.log(&msg);
|
||||||
|
log::error!("{}", msg);
|
||||||
|
|
||||||
Err(http_err!(UNAUTHORIZED, "permission check failed."))
|
Err(http_err!(UNAUTHORIZED, "permission check failed."))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
|
protected: true,
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
userid: {
|
userid: {
|
||||||
@ -176,35 +248,42 @@ fn create_ticket(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
description: "Anybody is allowed to change there own password. In addition, users with 'Permissions:Modify' privilege may change any password.",
|
description: "Everybody is allowed to change their own password. In addition, users with 'Permissions:Modify' privilege may change any password on @pbs realm.",
|
||||||
permission: &Permission::Anybody,
|
permission: &Permission::Anybody,
|
||||||
},
|
},
|
||||||
|
|
||||||
)]
|
)]
|
||||||
/// Change user password
|
/// Change user password
|
||||||
///
|
///
|
||||||
/// Each user is allowed to change his own password. Superuser
|
/// Each user is allowed to change his own password. Superuser
|
||||||
/// can change all passwords.
|
/// can change all passwords.
|
||||||
fn change_password(
|
pub fn change_password(
|
||||||
userid: Userid,
|
userid: Userid,
|
||||||
password: String,
|
password: String,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
|
let current_auth: Authid = rpcenv
|
||||||
let current_user: Userid = rpcenv
|
.get_auth_id()
|
||||||
.get_user()
|
.ok_or_else(|| format_err!("no authid available"))?
|
||||||
.ok_or_else(|| format_err!("unknown user"))?
|
|
||||||
.parse()?;
|
.parse()?;
|
||||||
|
|
||||||
let mut allowed = userid == current_user;
|
if current_auth.is_token() {
|
||||||
|
bail!("API tokens cannot access this API endpoint");
|
||||||
|
}
|
||||||
|
|
||||||
if userid == "root@pam" { allowed = true; }
|
let current_user = current_auth.user();
|
||||||
|
|
||||||
|
let mut allowed = userid == *current_user;
|
||||||
|
|
||||||
if !allowed {
|
if !allowed {
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
let privs = user_info.lookup_privs(¤t_user, &[]);
|
let privs = user_info.lookup_privs(¤t_auth, &[]);
|
||||||
if (privs & PRIV_PERMISSIONS_MODIFY) != 0 { allowed = true; }
|
if user_info.is_superuser(¤t_auth) {
|
||||||
|
allowed = true;
|
||||||
}
|
}
|
||||||
|
if (privs & PRIV_PERMISSIONS_MODIFY) != 0 && userid.realm() != "pam" {
|
||||||
|
allowed = true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
if !allowed {
|
if !allowed {
|
||||||
bail!("you are not authorized to change the password.");
|
bail!("you are not authorized to change the password.");
|
||||||
@ -216,20 +295,138 @@ fn change_password(
|
|||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
"auth-id": {
|
||||||
|
type: Authid,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
schema: ACL_PATH_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
description: "Requires Sys.Audit on '/access', limited to own privileges otherwise.",
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "Map of ACL path to Map of privilege to propagate bit",
|
||||||
|
type: Object,
|
||||||
|
properties: {},
|
||||||
|
additional_properties: true,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List permissions of given or currently authenticated user / API token.
|
||||||
|
///
|
||||||
|
/// Optionally limited to specific path.
|
||||||
|
pub fn list_permissions(
|
||||||
|
auth_id: Option<Authid>,
|
||||||
|
path: Option<String>,
|
||||||
|
rpcenv: &dyn RpcEnvironment,
|
||||||
|
) -> Result<HashMap<String, HashMap<String, bool>>, Error> {
|
||||||
|
let current_auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
let user_privs = user_info.lookup_privs(¤t_auth_id, &["access"]);
|
||||||
|
|
||||||
|
let auth_id = match auth_id {
|
||||||
|
Some(auth_id) if auth_id == current_auth_id => current_auth_id,
|
||||||
|
Some(auth_id) => {
|
||||||
|
if user_privs & PRIV_SYS_AUDIT != 0
|
||||||
|
|| (auth_id.is_token()
|
||||||
|
&& !current_auth_id.is_token()
|
||||||
|
&& auth_id.user() == current_auth_id.user())
|
||||||
|
{
|
||||||
|
auth_id
|
||||||
|
} else {
|
||||||
|
bail!("not allowed to list permissions of {}", auth_id);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => current_auth_id,
|
||||||
|
};
|
||||||
|
|
||||||
|
fn populate_acl_paths(
|
||||||
|
mut paths: HashSet<String>,
|
||||||
|
node: acl_config::AclTreeNode,
|
||||||
|
path: &str,
|
||||||
|
) -> HashSet<String> {
|
||||||
|
for (sub_path, child_node) in node.children {
|
||||||
|
let sub_path = format!("{}/{}", path, &sub_path);
|
||||||
|
paths = populate_acl_paths(paths, child_node, &sub_path);
|
||||||
|
paths.insert(sub_path);
|
||||||
|
}
|
||||||
|
paths
|
||||||
|
}
|
||||||
|
|
||||||
|
let paths = match path {
|
||||||
|
Some(path) => {
|
||||||
|
let mut paths = HashSet::new();
|
||||||
|
paths.insert(path);
|
||||||
|
paths
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
let mut paths = HashSet::new();
|
||||||
|
|
||||||
|
let (acl_tree, _) = acl_config::config()?;
|
||||||
|
paths = populate_acl_paths(paths, acl_tree.root, "");
|
||||||
|
|
||||||
|
// default paths, returned even if no ACL exists
|
||||||
|
paths.insert("/".to_string());
|
||||||
|
paths.insert("/access".to_string());
|
||||||
|
paths.insert("/datastore".to_string());
|
||||||
|
paths.insert("/remote".to_string());
|
||||||
|
paths.insert("/system".to_string());
|
||||||
|
|
||||||
|
paths
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let map = paths.into_iter().fold(
|
||||||
|
HashMap::new(),
|
||||||
|
|mut map: HashMap<String, HashMap<String, bool>>, path: String| {
|
||||||
|
let split_path = acl_config::split_acl_path(path.as_str());
|
||||||
|
let (privs, propagated_privs) = user_info.lookup_privs_details(&auth_id, &split_path);
|
||||||
|
|
||||||
|
match privs {
|
||||||
|
0 => map, // Don't leak ACL paths where we don't have any privileges
|
||||||
|
_ => {
|
||||||
|
let priv_map =
|
||||||
|
PRIVILEGES
|
||||||
|
.iter()
|
||||||
|
.fold(HashMap::new(), |mut priv_map, (name, value)| {
|
||||||
|
if value & privs != 0 {
|
||||||
|
priv_map
|
||||||
|
.insert(name.to_string(), value & propagated_privs != 0);
|
||||||
|
}
|
||||||
|
priv_map
|
||||||
|
});
|
||||||
|
|
||||||
|
map.insert(path, priv_map);
|
||||||
|
map
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(map)
|
||||||
|
}
|
||||||
|
|
||||||
#[sortable]
|
#[sortable]
|
||||||
const SUBDIRS: SubdirMap = &sorted!([
|
const SUBDIRS: SubdirMap = &sorted!([
|
||||||
("acl", &acl::ROUTER),
|
("acl", &acl::ROUTER),
|
||||||
|
("password", &Router::new().put(&API_METHOD_CHANGE_PASSWORD)),
|
||||||
(
|
(
|
||||||
"password", &Router::new()
|
"permissions",
|
||||||
.put(&API_METHOD_CHANGE_PASSWORD)
|
&Router::new().get(&API_METHOD_LIST_PERMISSIONS)
|
||||||
),
|
|
||||||
(
|
|
||||||
"ticket", &Router::new()
|
|
||||||
.post(&API_METHOD_CREATE_TICKET)
|
|
||||||
),
|
),
|
||||||
|
("ticket", &Router::new().post(&API_METHOD_CREATE_TICKET)),
|
||||||
("domains", &domain::ROUTER),
|
("domains", &domain::ROUTER),
|
||||||
("roles", &role::ROUTER),
|
("roles", &role::ROUTER),
|
||||||
("users", &user::ROUTER),
|
("users", &user::ROUTER),
|
||||||
|
("tfa", &tfa::ROUTER),
|
||||||
]);
|
]);
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
|
//! Manage Access Control Lists
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use ::serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||||
use proxmox::tools::fs::open_file_locked;
|
use proxmox::tools::fs::open_file_locked;
|
||||||
@ -7,44 +8,30 @@ use proxmox::tools::fs::open_file_locked;
|
|||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::config::acl;
|
use crate::config::acl;
|
||||||
use crate::config::acl::{Role, PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
|
use crate::config::acl::{Role, PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
|
||||||
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
propagate: {
|
|
||||||
schema: ACL_PROPAGATE_SCHEMA,
|
|
||||||
},
|
|
||||||
path: {
|
|
||||||
schema: ACL_PATH_SCHEMA,
|
|
||||||
},
|
|
||||||
ugid_type: {
|
|
||||||
schema: ACL_UGID_TYPE_SCHEMA,
|
|
||||||
},
|
|
||||||
ugid: {
|
|
||||||
type: String,
|
|
||||||
description: "User or Group ID.",
|
|
||||||
},
|
|
||||||
roleid: {
|
|
||||||
type: Role,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
/// ACL list entry.
|
|
||||||
pub struct AclListItem {
|
|
||||||
path: String,
|
|
||||||
ugid: String,
|
|
||||||
ugid_type: String,
|
|
||||||
propagate: bool,
|
|
||||||
roleid: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn extract_acl_node_data(
|
fn extract_acl_node_data(
|
||||||
node: &acl::AclTreeNode,
|
node: &acl::AclTreeNode,
|
||||||
path: &str,
|
path: &str,
|
||||||
list: &mut Vec<AclListItem>,
|
list: &mut Vec<AclListItem>,
|
||||||
exact: bool,
|
exact: bool,
|
||||||
|
token_user: &Option<Authid>,
|
||||||
) {
|
) {
|
||||||
|
// tokens can't have tokens, so we can early return
|
||||||
|
if let Some(token_user) = token_user {
|
||||||
|
if token_user.is_token() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (user, roles) in &node.users {
|
for (user, roles) in &node.users {
|
||||||
|
if let Some(token_user) = token_user {
|
||||||
|
if !user.is_token()
|
||||||
|
|| user.user() != token_user.user() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (role, propagate) in roles {
|
for (role, propagate) in roles {
|
||||||
list.push(AclListItem {
|
list.push(AclListItem {
|
||||||
path: if path.is_empty() { String::from("/") } else { path.to_string() },
|
path: if path.is_empty() { String::from("/") } else { path.to_string() },
|
||||||
@ -56,6 +43,10 @@ fn extract_acl_node_data(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (group, roles) in &node.groups {
|
for (group, roles) in &node.groups {
|
||||||
|
if token_user.is_some() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
for (role, propagate) in roles {
|
for (role, propagate) in roles {
|
||||||
list.push(AclListItem {
|
list.push(AclListItem {
|
||||||
path: if path.is_empty() { String::from("/") } else { path.to_string() },
|
path: if path.is_empty() { String::from("/") } else { path.to_string() },
|
||||||
@ -71,7 +62,7 @@ fn extract_acl_node_data(
|
|||||||
}
|
}
|
||||||
for (comp, child) in &node.children {
|
for (comp, child) in &node.children {
|
||||||
let new_path = format!("{}/{}", path, comp);
|
let new_path = format!("{}/{}", path, comp);
|
||||||
extract_acl_node_data(child, &new_path, list, exact);
|
extract_acl_node_data(child, &new_path, list, exact, token_user);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,7 +89,8 @@ fn extract_acl_node_data(
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["access", "acl"], PRIV_SYS_AUDIT, false),
|
permission: &Permission::Anybody,
|
||||||
|
description: "Returns all ACLs if user has Sys.Audit on '/access/acl', or just the ACLs containing the user's API tokens.",
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Read Access Control List (ACLs).
|
/// Read Access Control List (ACLs).
|
||||||
@ -107,18 +99,26 @@ pub fn read_acl(
|
|||||||
exact: bool,
|
exact: bool,
|
||||||
mut rpcenv: &mut dyn RpcEnvironment,
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<AclListItem>, Error> {
|
) -> Result<Vec<AclListItem>, Error> {
|
||||||
|
let auth_id = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
//let auth_user = rpcenv.get_user().unwrap();
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
let top_level_privs = user_info.lookup_privs(&auth_id, &["access", "acl"]);
|
||||||
|
let auth_id_filter = if (top_level_privs & PRIV_SYS_AUDIT) == 0 {
|
||||||
|
Some(auth_id)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
let (mut tree, digest) = acl::config()?;
|
let (mut tree, digest) = acl::config()?;
|
||||||
|
|
||||||
let mut list: Vec<AclListItem> = Vec::new();
|
let mut list: Vec<AclListItem> = Vec::new();
|
||||||
if let Some(path) = &path {
|
if let Some(path) = &path {
|
||||||
if let Some(node) = &tree.find_node(path) {
|
if let Some(node) = &tree.find_node(path) {
|
||||||
extract_acl_node_data(&node, path, &mut list, exact);
|
extract_acl_node_data(&node, path, &mut list, exact, &auth_id_filter);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
extract_acl_node_data(&tree.root, "", &mut list, exact);
|
extract_acl_node_data(&tree.root, "", &mut list, exact, &auth_id_filter);
|
||||||
}
|
}
|
||||||
|
|
||||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
@ -140,9 +140,9 @@ pub fn read_acl(
|
|||||||
optional: true,
|
optional: true,
|
||||||
schema: ACL_PROPAGATE_SCHEMA,
|
schema: ACL_PROPAGATE_SCHEMA,
|
||||||
},
|
},
|
||||||
userid: {
|
"auth-id": {
|
||||||
optional: true,
|
optional: true,
|
||||||
type: Userid,
|
type: Authid,
|
||||||
},
|
},
|
||||||
group: {
|
group: {
|
||||||
optional: true,
|
optional: true,
|
||||||
@ -160,20 +160,45 @@ pub fn read_acl(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["access", "acl"], PRIV_PERMISSIONS_MODIFY, false),
|
permission: &Permission::Anybody,
|
||||||
|
description: "Requires Permissions.Modify on '/access/acl', limited to updating ACLs of the user's API tokens otherwise."
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Update Access Control List (ACLs).
|
/// Update Access Control List (ACLs).
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn update_acl(
|
pub fn update_acl(
|
||||||
path: String,
|
path: String,
|
||||||
role: String,
|
role: String,
|
||||||
propagate: Option<bool>,
|
propagate: Option<bool>,
|
||||||
userid: Option<Userid>,
|
auth_id: Option<Authid>,
|
||||||
group: Option<String>,
|
group: Option<String>,
|
||||||
delete: Option<bool>,
|
delete: Option<bool>,
|
||||||
digest: Option<String>,
|
digest: Option<String>,
|
||||||
_rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
let current_auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
let top_level_privs = user_info.lookup_privs(¤t_auth_id, &["access", "acl"]);
|
||||||
|
if top_level_privs & PRIV_PERMISSIONS_MODIFY == 0 {
|
||||||
|
if group.is_some() {
|
||||||
|
bail!("Unprivileged users are not allowed to create group ACL item.");
|
||||||
|
}
|
||||||
|
|
||||||
|
match &auth_id {
|
||||||
|
Some(auth_id) => {
|
||||||
|
if current_auth_id.is_token() {
|
||||||
|
bail!("Unprivileged API tokens can't set ACL items.");
|
||||||
|
} else if !auth_id.is_token() {
|
||||||
|
bail!("Unprivileged users can only set ACL items for API tokens.");
|
||||||
|
} else if auth_id.user() != current_auth_id.user() {
|
||||||
|
bail!("Unprivileged users can only set ACL items for their own API tokens.");
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => { bail!("Unprivileged user needs to provide auth_id to update ACL item."); },
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
let _lock = open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = open_file_locked(acl::ACL_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
@ -190,11 +215,12 @@ pub fn update_acl(
|
|||||||
|
|
||||||
if let Some(ref _group) = group {
|
if let Some(ref _group) = group {
|
||||||
bail!("parameter 'group' - groups are currently not supported.");
|
bail!("parameter 'group' - groups are currently not supported.");
|
||||||
} else if let Some(ref userid) = userid {
|
} else if let Some(ref auth_id) = auth_id {
|
||||||
if !delete { // Note: we allow to delete non-existent users
|
if !delete { // Note: we allow to delete non-existent users
|
||||||
let user_cfg = crate::config::user::cached_config()?;
|
let user_cfg = crate::config::user::cached_config()?;
|
||||||
if user_cfg.sections.get(&userid.to_string()).is_none() {
|
if user_cfg.sections.get(&auth_id.to_string()).is_none() {
|
||||||
bail!("no such user.");
|
bail!(format!("no such {}.",
|
||||||
|
if auth_id.is_token() { "API token" } else { "user" }));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -205,11 +231,11 @@ pub fn update_acl(
|
|||||||
acl::check_acl_path(&path)?;
|
acl::check_acl_path(&path)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(userid) = userid {
|
if let Some(auth_id) = auth_id {
|
||||||
if delete {
|
if delete {
|
||||||
tree.delete_user_role(&path, &userid, &role);
|
tree.delete_user_role(&path, &auth_id, &role);
|
||||||
} else {
|
} else {
|
||||||
tree.insert_user_role(&path, &userid, &role, propagate);
|
tree.insert_user_role(&path, &auth_id, &role, propagate);
|
||||||
}
|
}
|
||||||
} else if let Some(group) = group {
|
} else if let Some(group) = group {
|
||||||
if delete {
|
if delete {
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//! List Authentication domains/realms
|
||||||
|
|
||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
|
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//! Manage Roles with privileges
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
|
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
@ -46,7 +48,7 @@ fn list_roles() -> Result<Value, Error> {
|
|||||||
let mut priv_list = Vec::new();
|
let mut priv_list = Vec::new();
|
||||||
for (name, privilege) in PRIVILEGES.iter() {
|
for (name, privilege) in PRIVILEGES.iter() {
|
||||||
if privs & privilege > 0 {
|
if privs & privilege > 0 {
|
||||||
priv_list.push(name.clone());
|
priv_list.push(name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
list.push(json!({ "roleid": role, "privs": priv_list, "comment": comment }));
|
list.push(json!({ "roleid": role, "privs": priv_list, "comment": comment }));
|
||||||
|
594
src/api2/access/tfa.rs
Normal file
@ -0,0 +1,594 @@
|
|||||||
|
//! Two Factor Authentication
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::{api, Permission, Router, RpcEnvironment};
|
||||||
|
use proxmox::tools::tfa::totp::Totp;
|
||||||
|
use proxmox::{http_bail, http_err};
|
||||||
|
|
||||||
|
use crate::api2::types::{Authid, Userid, PASSWORD_SCHEMA};
|
||||||
|
use crate::config::acl::{PRIV_PERMISSIONS_MODIFY, PRIV_SYS_AUDIT};
|
||||||
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
|
use crate::config::tfa::{TfaInfo, TfaUserData};
|
||||||
|
|
||||||
|
/// Perform first-factor (password) authentication only. Ignore password for the root user.
|
||||||
|
/// Otherwise check the current user's password.
|
||||||
|
///
|
||||||
|
/// This means that user admins need to type in their own password while editing a user, and
|
||||||
|
/// regular users, which can only change their own TFA settings (checked at the API level), can
|
||||||
|
/// change their own settings using their own password.
|
||||||
|
fn tfa_update_auth(
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
userid: &Userid,
|
||||||
|
password: Option<String>,
|
||||||
|
must_exist: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let authid: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
if authid.user() != Userid::root_userid() {
|
||||||
|
let password = password.ok_or_else(|| http_err!(UNAUTHORIZED, "missing password"))?;
|
||||||
|
let _: () = crate::auth::authenticate_user(authid.user(), &password)
|
||||||
|
.map_err(|err| http_err!(UNAUTHORIZED, "{}", err))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// After authentication, verify that the to-be-modified user actually exists:
|
||||||
|
if must_exist && authid.user() != userid {
|
||||||
|
let (config, _digest) = crate::config::user::config()?;
|
||||||
|
|
||||||
|
if config
|
||||||
|
.lookup::<crate::config::user::User>("user", userid.as_str())
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
http_bail!(UNAUTHORIZED, "user '{}' does not exists.", userid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api]
|
||||||
|
/// A TFA entry type.
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
enum TfaType {
|
||||||
|
/// A TOTP entry type.
|
||||||
|
Totp,
|
||||||
|
/// A U2F token entry.
|
||||||
|
U2f,
|
||||||
|
/// A Webauthn token entry.
|
||||||
|
Webauthn,
|
||||||
|
/// Recovery tokens.
|
||||||
|
Recovery,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
type: { type: TfaType },
|
||||||
|
info: { type: TfaInfo },
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// A TFA entry for a user.
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
|
struct TypedTfaInfo {
|
||||||
|
#[serde(rename = "type")]
|
||||||
|
pub ty: TfaType,
|
||||||
|
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub info: TfaInfo,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_data(data: TfaUserData) -> Vec<TypedTfaInfo> {
|
||||||
|
let mut out = Vec::with_capacity(
|
||||||
|
data.totp.len()
|
||||||
|
+ data.u2f.len()
|
||||||
|
+ data.webauthn.len()
|
||||||
|
+ if data.recovery().is_some() { 1 } else { 0 },
|
||||||
|
);
|
||||||
|
if let Some(recovery) = data.recovery() {
|
||||||
|
out.push(TypedTfaInfo {
|
||||||
|
ty: TfaType::Recovery,
|
||||||
|
info: TfaInfo::recovery(recovery.created),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for entry in data.totp {
|
||||||
|
out.push(TypedTfaInfo {
|
||||||
|
ty: TfaType::Totp,
|
||||||
|
info: entry.info,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
for entry in data.webauthn {
|
||||||
|
out.push(TypedTfaInfo {
|
||||||
|
ty: TfaType::Webauthn,
|
||||||
|
info: entry.info,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
for entry in data.u2f {
|
||||||
|
out.push(TypedTfaInfo {
|
||||||
|
ty: TfaType::U2f,
|
||||||
|
info: entry.info,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterate through tuples of `(type, index, id)`.
|
||||||
|
fn tfa_id_iter(data: &TfaUserData) -> impl Iterator<Item = (TfaType, usize, &str)> {
|
||||||
|
data.totp
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, entry)| (TfaType::Totp, i, entry.info.id.as_str()))
|
||||||
|
.chain(
|
||||||
|
data.webauthn
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, entry)| (TfaType::Webauthn, i, entry.info.id.as_str())),
|
||||||
|
)
|
||||||
|
.chain(
|
||||||
|
data.u2f
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, entry)| (TfaType::U2f, i, entry.info.id.as_str())),
|
||||||
|
)
|
||||||
|
.chain(
|
||||||
|
data.recovery
|
||||||
|
.iter()
|
||||||
|
.map(|_| (TfaType::Recovery, 0, "recovery")),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: { userid: { type: Userid } },
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Or(&[
|
||||||
|
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||||
|
&Permission::UserParam("userid"),
|
||||||
|
]),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Add a TOTP secret to the user.
|
||||||
|
fn list_user_tfa(userid: Userid) -> Result<Vec<TypedTfaInfo>, Error> {
|
||||||
|
let _lock = crate::config::tfa::read_lock()?;
|
||||||
|
|
||||||
|
Ok(match crate::config::tfa::read()?.users.remove(&userid) {
|
||||||
|
Some(data) => to_data(data),
|
||||||
|
None => Vec::new(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
userid: { type: Userid },
|
||||||
|
id: { description: "the tfa entry id" }
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Or(&[
|
||||||
|
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||||
|
&Permission::UserParam("userid"),
|
||||||
|
]),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get a single TFA entry.
|
||||||
|
fn get_tfa_entry(userid: Userid, id: String) -> Result<TypedTfaInfo, Error> {
|
||||||
|
let _lock = crate::config::tfa::read_lock()?;
|
||||||
|
|
||||||
|
if let Some(user_data) = crate::config::tfa::read()?.users.remove(&userid) {
|
||||||
|
match {
|
||||||
|
// scope to prevent the temprary iter from borrowing across the whole match
|
||||||
|
let entry = tfa_id_iter(&user_data).find(|(_ty, _index, entry_id)| id == *entry_id);
|
||||||
|
entry.map(|(ty, index, _)| (ty, index))
|
||||||
|
} {
|
||||||
|
Some((TfaType::Recovery, _)) => {
|
||||||
|
if let Some(recovery) = user_data.recovery() {
|
||||||
|
return Ok(TypedTfaInfo {
|
||||||
|
ty: TfaType::Recovery,
|
||||||
|
info: TfaInfo::recovery(recovery.created),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some((TfaType::Totp, index)) => {
|
||||||
|
return Ok(TypedTfaInfo {
|
||||||
|
ty: TfaType::Totp,
|
||||||
|
// `into_iter().nth()` to *move* out of it
|
||||||
|
info: user_data.totp.into_iter().nth(index).unwrap().info,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Some((TfaType::Webauthn, index)) => {
|
||||||
|
return Ok(TypedTfaInfo {
|
||||||
|
ty: TfaType::Webauthn,
|
||||||
|
info: user_data.webauthn.into_iter().nth(index).unwrap().info,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Some((TfaType::U2f, index)) => {
|
||||||
|
return Ok(TypedTfaInfo {
|
||||||
|
ty: TfaType::U2f,
|
||||||
|
info: user_data.u2f.into_iter().nth(index).unwrap().info,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
None => (),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
http_bail!(NOT_FOUND, "no such tfa entry: {}/{}", userid, id);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
userid: { type: Userid },
|
||||||
|
id: {
|
||||||
|
description: "the tfa entry id",
|
||||||
|
},
|
||||||
|
password: {
|
||||||
|
schema: PASSWORD_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Or(&[
|
||||||
|
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||||
|
&Permission::UserParam("userid"),
|
||||||
|
]),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get a single TFA entry.
|
||||||
|
fn delete_tfa(
|
||||||
|
userid: Userid,
|
||||||
|
id: String,
|
||||||
|
password: Option<String>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
tfa_update_auth(rpcenv, &userid, password, false)?;
|
||||||
|
|
||||||
|
let _lock = crate::config::tfa::write_lock()?;
|
||||||
|
|
||||||
|
let mut data = crate::config::tfa::read()?;
|
||||||
|
|
||||||
|
let user_data = data
|
||||||
|
.users
|
||||||
|
.get_mut(&userid)
|
||||||
|
.ok_or_else(|| http_err!(NOT_FOUND, "no such entry: {}/{}", userid, id))?;
|
||||||
|
|
||||||
|
match {
|
||||||
|
// scope to prevent the temprary iter from borrowing across the whole match
|
||||||
|
let entry = tfa_id_iter(&user_data).find(|(_, _, entry_id)| id == *entry_id);
|
||||||
|
entry.map(|(ty, index, _)| (ty, index))
|
||||||
|
} {
|
||||||
|
Some((TfaType::Recovery, _)) => user_data.recovery = None,
|
||||||
|
Some((TfaType::Totp, index)) => drop(user_data.totp.remove(index)),
|
||||||
|
Some((TfaType::Webauthn, index)) => drop(user_data.webauthn.remove(index)),
|
||||||
|
Some((TfaType::U2f, index)) => drop(user_data.u2f.remove(index)),
|
||||||
|
None => http_bail!(NOT_FOUND, "no such tfa entry: {}/{}", userid, id),
|
||||||
|
}
|
||||||
|
|
||||||
|
if user_data.is_empty() {
|
||||||
|
data.users.remove(&userid);
|
||||||
|
}
|
||||||
|
|
||||||
|
crate::config::tfa::write(&data)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"userid": { type: Userid },
|
||||||
|
"entries": {
|
||||||
|
type: Array,
|
||||||
|
items: { type: TypedTfaInfo },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
|
/// Over the API we only provide the descriptions for TFA data.
|
||||||
|
struct TfaUser {
|
||||||
|
/// The user this entry belongs to.
|
||||||
|
userid: Userid,
|
||||||
|
|
||||||
|
/// TFA entries.
|
||||||
|
entries: Vec<TypedTfaInfo>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
description: "Returns all or just the logged-in user, depending on privileges.",
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "The list tuples of user and TFA entries.",
|
||||||
|
type: Array,
|
||||||
|
items: { type: TfaUser }
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List user TFA configuration.
|
||||||
|
fn list_tfa(rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<TfaUser>, Error> {
|
||||||
|
let authid: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
let top_level_privs = user_info.lookup_privs(&authid, &["access", "users"]);
|
||||||
|
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
|
||||||
|
|
||||||
|
let _lock = crate::config::tfa::read_lock()?;
|
||||||
|
let tfa_data = crate::config::tfa::read()?.users;
|
||||||
|
|
||||||
|
let mut out = Vec::<TfaUser>::new();
|
||||||
|
if top_level_allowed {
|
||||||
|
for (user, data) in tfa_data {
|
||||||
|
out.push(TfaUser {
|
||||||
|
userid: user,
|
||||||
|
entries: to_data(data),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} else if let Some(data) = { tfa_data }.remove(authid.user()) {
|
||||||
|
out.push(TfaUser {
|
||||||
|
userid: authid.into(),
|
||||||
|
entries: to_data(data),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
recovery: {
|
||||||
|
description: "A list of recovery codes as integers.",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: Integer,
|
||||||
|
description: "A one-time usable recovery code entry.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// The result returned when adding TFA entries to a user.
|
||||||
|
#[derive(Default, Serialize)]
|
||||||
|
struct TfaUpdateInfo {
|
||||||
|
/// The id if a newly added TFA entry.
|
||||||
|
id: Option<String>,
|
||||||
|
|
||||||
|
/// When adding u2f entries, this contains a challenge the user must respond to in order to
|
||||||
|
/// finish the registration.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
challenge: Option<String>,
|
||||||
|
|
||||||
|
/// When adding recovery codes, this contains the list of codes to be displayed to the user
|
||||||
|
/// this one time.
|
||||||
|
#[serde(skip_serializing_if = "Vec::is_empty", default)]
|
||||||
|
recovery: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TfaUpdateInfo {
|
||||||
|
fn id(id: String) -> Self {
|
||||||
|
Self {
|
||||||
|
id: Some(id),
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
userid: { type: Userid },
|
||||||
|
description: {
|
||||||
|
description: "A description to distinguish multiple entries from one another",
|
||||||
|
type: String,
|
||||||
|
max_length: 255,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"type": { type: TfaType },
|
||||||
|
totp: {
|
||||||
|
description: "A totp URI.",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
value: {
|
||||||
|
description:
|
||||||
|
"The current value for the provided totp URI, or a Webauthn/U2F challenge response",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
challenge: {
|
||||||
|
description: "When responding to a u2f challenge: the original challenge string",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
password: {
|
||||||
|
schema: PASSWORD_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: { type: TfaUpdateInfo },
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Or(&[
|
||||||
|
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||||
|
&Permission::UserParam("userid"),
|
||||||
|
]),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Add a TFA entry to the user.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
fn add_tfa_entry(
|
||||||
|
userid: Userid,
|
||||||
|
description: Option<String>,
|
||||||
|
totp: Option<String>,
|
||||||
|
value: Option<String>,
|
||||||
|
challenge: Option<String>,
|
||||||
|
password: Option<String>,
|
||||||
|
r#type: TfaType,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<TfaUpdateInfo, Error> {
|
||||||
|
tfa_update_auth(rpcenv, &userid, password, true)?;
|
||||||
|
|
||||||
|
let need_description =
|
||||||
|
move || description.ok_or_else(|| format_err!("'description' is required for new entries"));
|
||||||
|
|
||||||
|
match r#type {
|
||||||
|
TfaType::Totp => match (totp, value) {
|
||||||
|
(Some(totp), Some(value)) => {
|
||||||
|
if challenge.is_some() {
|
||||||
|
bail!("'challenge' parameter is invalid for 'totp' entries");
|
||||||
|
}
|
||||||
|
let description = need_description()?;
|
||||||
|
|
||||||
|
let totp: Totp = totp.parse()?;
|
||||||
|
if totp
|
||||||
|
.verify(&value, std::time::SystemTime::now(), -1..=1)?
|
||||||
|
.is_none()
|
||||||
|
{
|
||||||
|
bail!("failed to verify TOTP challenge");
|
||||||
|
}
|
||||||
|
crate::config::tfa::add_totp(&userid, description, totp).map(TfaUpdateInfo::id)
|
||||||
|
}
|
||||||
|
_ => bail!("'totp' type requires both 'totp' and 'value' parameters"),
|
||||||
|
},
|
||||||
|
TfaType::Webauthn => {
|
||||||
|
if totp.is_some() {
|
||||||
|
bail!("'totp' parameter is invalid for 'totp' entries");
|
||||||
|
}
|
||||||
|
|
||||||
|
match challenge {
|
||||||
|
None => crate::config::tfa::add_webauthn_registration(&userid, need_description()?)
|
||||||
|
.map(|c| TfaUpdateInfo {
|
||||||
|
challenge: Some(c),
|
||||||
|
..Default::default()
|
||||||
|
}),
|
||||||
|
Some(challenge) => {
|
||||||
|
let value = value.ok_or_else(|| {
|
||||||
|
format_err!(
|
||||||
|
"missing 'value' parameter (webauthn challenge response missing)"
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
crate::config::tfa::finish_webauthn_registration(&userid, &challenge, &value)
|
||||||
|
.map(TfaUpdateInfo::id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
TfaType::U2f => {
|
||||||
|
if totp.is_some() {
|
||||||
|
bail!("'totp' parameter is invalid for 'totp' entries");
|
||||||
|
}
|
||||||
|
|
||||||
|
match challenge {
|
||||||
|
None => crate::config::tfa::add_u2f_registration(&userid, need_description()?).map(
|
||||||
|
|c| TfaUpdateInfo {
|
||||||
|
challenge: Some(c),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
),
|
||||||
|
Some(challenge) => {
|
||||||
|
let value = value.ok_or_else(|| {
|
||||||
|
format_err!("missing 'value' parameter (u2f challenge response missing)")
|
||||||
|
})?;
|
||||||
|
crate::config::tfa::finish_u2f_registration(&userid, &challenge, &value)
|
||||||
|
.map(TfaUpdateInfo::id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
TfaType::Recovery => {
|
||||||
|
if totp.or(value).or(challenge).is_some() {
|
||||||
|
bail!("generating recovery tokens does not allow additional parameters");
|
||||||
|
}
|
||||||
|
|
||||||
|
let recovery = crate::config::tfa::add_recovery(&userid)?;
|
||||||
|
|
||||||
|
Ok(TfaUpdateInfo {
|
||||||
|
id: Some("recovery".to_string()),
|
||||||
|
recovery,
|
||||||
|
..Default::default()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
userid: { type: Userid },
|
||||||
|
id: {
|
||||||
|
description: "the tfa entry id",
|
||||||
|
},
|
||||||
|
description: {
|
||||||
|
description: "A description to distinguish multiple entries from one another",
|
||||||
|
type: String,
|
||||||
|
max_length: 255,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
enable: {
|
||||||
|
description: "Whether this entry should currently be enabled or disabled",
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
password: {
|
||||||
|
schema: PASSWORD_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Or(&[
|
||||||
|
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||||
|
&Permission::UserParam("userid"),
|
||||||
|
]),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update user's TFA entry description.
|
||||||
|
fn update_tfa_entry(
|
||||||
|
userid: Userid,
|
||||||
|
id: String,
|
||||||
|
description: Option<String>,
|
||||||
|
enable: Option<bool>,
|
||||||
|
password: Option<String>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
tfa_update_auth(rpcenv, &userid, password, true)?;
|
||||||
|
|
||||||
|
let _lock = crate::config::tfa::write_lock()?;
|
||||||
|
|
||||||
|
let mut data = crate::config::tfa::read()?;
|
||||||
|
|
||||||
|
let mut entry = data
|
||||||
|
.users
|
||||||
|
.get_mut(&userid)
|
||||||
|
.and_then(|user| user.find_entry_mut(&id))
|
||||||
|
.ok_or_else(|| http_err!(NOT_FOUND, "no such entry: {}/{}", userid, id))?;
|
||||||
|
|
||||||
|
if let Some(description) = description {
|
||||||
|
entry.description = description;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(enable) = enable {
|
||||||
|
entry.enable = enable;
|
||||||
|
}
|
||||||
|
|
||||||
|
crate::config::tfa::write(&data)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_TFA)
|
||||||
|
.match_all("userid", &USER_ROUTER);
|
||||||
|
|
||||||
|
const USER_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_USER_TFA)
|
||||||
|
.post(&API_METHOD_ADD_TFA_ENTRY)
|
||||||
|
.match_all("id", &ITEM_ROUTER);
|
||||||
|
|
||||||
|
const ITEM_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_GET_TFA_ENTRY)
|
||||||
|
.put(&API_METHOD_UPDATE_TFA_ENTRY)
|
||||||
|
.delete(&API_METHOD_DELETE_TFA);
|
@ -1,12 +1,18 @@
|
|||||||
use anyhow::{bail, Error};
|
//! User Management
|
||||||
use serde_json::Value;
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use serde::{Serialize, Deserialize};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||||
|
use proxmox::api::router::SubdirMap;
|
||||||
use proxmox::api::schema::{Schema, StringSchema};
|
use proxmox::api::schema::{Schema, StringSchema};
|
||||||
use proxmox::tools::fs::open_file_locked;
|
use proxmox::tools::fs::open_file_locked;
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::config::user;
|
use crate::config::user;
|
||||||
|
use crate::config::token_shadow;
|
||||||
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
|
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY};
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
|
|
||||||
@ -16,44 +22,160 @@ pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
|||||||
.max_length(64)
|
.max_length(64)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
userid: {
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
enable: {
|
||||||
|
optional: true,
|
||||||
|
schema: user::ENABLE_USER_SCHEMA,
|
||||||
|
},
|
||||||
|
expire: {
|
||||||
|
optional: true,
|
||||||
|
schema: user::EXPIRE_USER_SCHEMA,
|
||||||
|
},
|
||||||
|
firstname: {
|
||||||
|
optional: true,
|
||||||
|
schema: user::FIRST_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
lastname: {
|
||||||
|
schema: user::LAST_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
email: {
|
||||||
|
schema: user::EMAIL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
tokens: {
|
||||||
|
type: Array,
|
||||||
|
optional: true,
|
||||||
|
description: "List of user's API tokens.",
|
||||||
|
items: {
|
||||||
|
type: user::ApiToken
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
/// User properties with added list of ApiTokens
|
||||||
|
pub struct UserWithTokens {
|
||||||
|
pub userid: Userid,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub enable: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub expire: Option<i64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub firstname: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub lastname: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub email: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Vec::is_empty", default)]
|
||||||
|
pub tokens: Vec<user::ApiToken>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UserWithTokens {
|
||||||
|
fn new(user: user::User) -> Self {
|
||||||
|
Self {
|
||||||
|
userid: user.userid,
|
||||||
|
comment: user.comment,
|
||||||
|
enable: user.enable,
|
||||||
|
expire: user.expire,
|
||||||
|
firstname: user.firstname,
|
||||||
|
lastname: user.lastname,
|
||||||
|
email: user.email,
|
||||||
|
tokens: Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {},
|
properties: {
|
||||||
|
include_tokens: {
|
||||||
|
type: bool,
|
||||||
|
description: "Include user's API tokens in returned list.",
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
description: "List users (with config digest).",
|
description: "List users (with config digest).",
|
||||||
type: Array,
|
type: Array,
|
||||||
items: { type: user::User },
|
items: { type: UserWithTokens },
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Anybody,
|
permission: &Permission::Anybody,
|
||||||
description: "Returns all or just the logged-in user, depending on privileges.",
|
description: "Returns all or just the logged-in user (/API token owner), depending on privileges.",
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// List users
|
/// List users
|
||||||
pub fn list_users(
|
pub fn list_users(
|
||||||
_param: Value,
|
include_tokens: bool,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
mut rpcenv: &mut dyn RpcEnvironment,
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<user::User>, Error> {
|
) -> Result<Vec<UserWithTokens>, Error> {
|
||||||
|
|
||||||
let (config, digest) = user::config()?;
|
let (config, digest) = user::config()?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv
|
||||||
|
.get_auth_id()
|
||||||
|
.ok_or_else(|| format_err!("no authid available"))?
|
||||||
|
.parse()?;
|
||||||
|
|
||||||
|
let userid = auth_id.user();
|
||||||
|
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let top_level_privs = user_info.lookup_privs(&userid, &["access", "users"]);
|
let top_level_privs = user_info.lookup_privs(&auth_id, &["access", "users"]);
|
||||||
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
|
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
|
||||||
|
|
||||||
let filter_by_privs = |user: &user::User| {
|
let filter_by_privs = |user: &user::User| {
|
||||||
top_level_allowed || user.userid == userid
|
top_level_allowed || user.userid == *userid
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
let list:Vec<user::User> = config.convert_to_typed_array("user")?;
|
let list:Vec<user::User> = config.convert_to_typed_array("user")?;
|
||||||
|
|
||||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
Ok(list.into_iter().filter(filter_by_privs).collect())
|
let iter = list.into_iter().filter(filter_by_privs);
|
||||||
|
let list = if include_tokens {
|
||||||
|
let tokens: Vec<user::ApiToken> = config.convert_to_typed_array("token")?;
|
||||||
|
let mut user_to_tokens = tokens
|
||||||
|
.into_iter()
|
||||||
|
.fold(
|
||||||
|
HashMap::new(),
|
||||||
|
|mut map: HashMap<Userid, Vec<user::ApiToken>>, token: user::ApiToken| {
|
||||||
|
if token.tokenid.is_token() {
|
||||||
|
map
|
||||||
|
.entry(token.tokenid.user().clone())
|
||||||
|
.or_default()
|
||||||
|
.push(token);
|
||||||
|
}
|
||||||
|
map
|
||||||
|
});
|
||||||
|
iter
|
||||||
|
.map(|user: user::User| {
|
||||||
|
let mut user = UserWithTokens::new(user);
|
||||||
|
user.tokens = user_to_tokens.remove(&user.userid).unwrap_or_default();
|
||||||
|
user
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
} else {
|
||||||
|
iter.map(UserWithTokens::new)
|
||||||
|
.collect()
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -98,7 +220,11 @@ pub fn list_users(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Create new user.
|
/// Create new user.
|
||||||
pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error> {
|
pub fn create_user(
|
||||||
|
password: Option<String>,
|
||||||
|
param: Value,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
@ -106,17 +232,25 @@ pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error>
|
|||||||
|
|
||||||
let (mut config, _digest) = user::config()?;
|
let (mut config, _digest) = user::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(user.userid.as_str()) {
|
if config.sections.get(user.userid.as_str()).is_some() {
|
||||||
bail!("user '{}' already exists.", user.userid);
|
bail!("user '{}' already exists.", user.userid);
|
||||||
}
|
}
|
||||||
|
|
||||||
let authenticator = crate::auth::lookup_authenticator(&user.userid.realm())?;
|
|
||||||
|
|
||||||
config.set_data(user.userid.as_str(), "user", &user)?;
|
config.set_data(user.userid.as_str(), "user", &user)?;
|
||||||
|
|
||||||
|
let realm = user.userid.realm();
|
||||||
|
|
||||||
|
// Fails if realm does not exist!
|
||||||
|
let authenticator = crate::auth::lookup_authenticator(realm)?;
|
||||||
|
|
||||||
user::save_config(&config)?;
|
user::save_config(&config)?;
|
||||||
|
|
||||||
if let Some(password) = password {
|
if let Some(password) = password {
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
let current_auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
if realm == "pam" && !user_info.is_superuser(¤t_auth_id) {
|
||||||
|
bail!("only superuser can edit pam credentials!");
|
||||||
|
}
|
||||||
authenticator.store_password(user.userid.name(), &password)?;
|
authenticator.store_password(user.userid.name(), &password)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -131,10 +265,7 @@ pub fn create_user(password: Option<String>, param: Value) -> Result<(), Error>
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: { type: user::User },
|
||||||
description: "The user configuration (with config digest).",
|
|
||||||
type: user::User,
|
|
||||||
},
|
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Or(&[
|
permission: &Permission::Or(&[
|
||||||
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
||||||
@ -150,6 +281,21 @@ pub fn read_user(userid: Userid, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
|
|||||||
Ok(user)
|
Ok(user)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
pub enum DeletableProperty {
|
||||||
|
/// Delete the comment property.
|
||||||
|
comment,
|
||||||
|
/// Delete the firstname property.
|
||||||
|
firstname,
|
||||||
|
/// Delete the lastname property.
|
||||||
|
lastname,
|
||||||
|
/// Delete the email property.
|
||||||
|
email,
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
protected: true,
|
protected: true,
|
||||||
input: {
|
input: {
|
||||||
@ -185,6 +331,14 @@ pub fn read_user(userid: Userid, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
|
|||||||
schema: user::EMAIL_SCHEMA,
|
schema: user::EMAIL_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
delete: {
|
||||||
|
description: "List of properties to delete.",
|
||||||
|
type: Array,
|
||||||
|
optional: true,
|
||||||
|
items: {
|
||||||
|
type: DeletableProperty,
|
||||||
|
}
|
||||||
|
},
|
||||||
digest: {
|
digest: {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
@ -199,6 +353,7 @@ pub fn read_user(userid: Userid, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Update user configuration.
|
/// Update user configuration.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn update_user(
|
pub fn update_user(
|
||||||
userid: Userid,
|
userid: Userid,
|
||||||
comment: Option<String>,
|
comment: Option<String>,
|
||||||
@ -208,7 +363,9 @@ pub fn update_user(
|
|||||||
firstname: Option<String>,
|
firstname: Option<String>,
|
||||||
lastname: Option<String>,
|
lastname: Option<String>,
|
||||||
email: Option<String>,
|
email: Option<String>,
|
||||||
|
delete: Option<Vec<DeletableProperty>>,
|
||||||
digest: Option<String>,
|
digest: Option<String>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
@ -222,6 +379,17 @@ pub fn update_user(
|
|||||||
|
|
||||||
let mut data: user::User = config.lookup("user", userid.as_str())?;
|
let mut data: user::User = config.lookup("user", userid.as_str())?;
|
||||||
|
|
||||||
|
if let Some(delete) = delete {
|
||||||
|
for delete_prop in delete {
|
||||||
|
match delete_prop {
|
||||||
|
DeletableProperty::comment => data.comment = None,
|
||||||
|
DeletableProperty::firstname => data.firstname = None,
|
||||||
|
DeletableProperty::lastname => data.lastname = None,
|
||||||
|
DeletableProperty::email => data.email = None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(comment) = comment {
|
if let Some(comment) = comment {
|
||||||
let comment = comment.trim().to_string();
|
let comment = comment.trim().to_string();
|
||||||
if comment.is_empty() {
|
if comment.is_empty() {
|
||||||
@ -240,6 +408,13 @@ pub fn update_user(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(password) = password {
|
if let Some(password) = password {
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
let current_auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let self_service = current_auth_id.user() == &userid;
|
||||||
|
let target_realm = userid.realm();
|
||||||
|
if !self_service && target_realm == "pam" && !user_info.is_superuser(¤t_auth_id) {
|
||||||
|
bail!("only superuser can edit pam credentials!");
|
||||||
|
}
|
||||||
let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
|
let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
|
||||||
authenticator.store_password(userid.name(), &password)?;
|
authenticator.store_password(userid.name(), &password)?;
|
||||||
}
|
}
|
||||||
@ -285,6 +460,7 @@ pub fn update_user(
|
|||||||
/// Remove a user from the configuration file.
|
/// Remove a user from the configuration file.
|
||||||
pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
|
pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _tfa_lock = crate::config::tfa::write_lock()?;
|
||||||
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = user::config()?;
|
let (mut config, expected_digest) = user::config()?;
|
||||||
@ -301,15 +477,353 @@ pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error>
|
|||||||
|
|
||||||
user::save_config(&config)?;
|
user::save_config(&config)?;
|
||||||
|
|
||||||
|
match crate::config::tfa::read().and_then(|mut cfg| {
|
||||||
|
let _: bool = cfg.remove_user(&userid);
|
||||||
|
crate::config::tfa::write(&cfg)
|
||||||
|
}) {
|
||||||
|
Ok(()) => (),
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!(
|
||||||
|
"error updating TFA config after deleting user {:?}: {}",
|
||||||
|
userid, err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
const ITEM_ROUTER: Router = Router::new()
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
userid: {
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
tokenname: {
|
||||||
|
type: Tokenname,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: { type: user::ApiToken },
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Or(&[
|
||||||
|
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
||||||
|
&Permission::UserParam("userid"),
|
||||||
|
]),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Read user's API token metadata
|
||||||
|
pub fn read_token(
|
||||||
|
userid: Userid,
|
||||||
|
tokenname: Tokenname,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<user::ApiToken, Error> {
|
||||||
|
|
||||||
|
let (config, digest) = user::config()?;
|
||||||
|
|
||||||
|
let tokenid = Authid::from((userid, Some(tokenname)));
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
config.lookup("token", &tokenid.to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
userid: {
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
tokenname: {
|
||||||
|
type: Tokenname,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
enable: {
|
||||||
|
schema: user::ENABLE_USER_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
expire: {
|
||||||
|
schema: user::EXPIRE_USER_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
optional: true,
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Or(&[
|
||||||
|
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||||
|
&Permission::UserParam("userid"),
|
||||||
|
]),
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "API token identifier + generated secret.",
|
||||||
|
properties: {
|
||||||
|
value: {
|
||||||
|
type: String,
|
||||||
|
description: "The API token secret",
|
||||||
|
},
|
||||||
|
tokenid: {
|
||||||
|
type: String,
|
||||||
|
description: "The API token identifier",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Generate a new API token with given metadata
|
||||||
|
pub fn generate_token(
|
||||||
|
userid: Userid,
|
||||||
|
tokenname: Tokenname,
|
||||||
|
comment: Option<String>,
|
||||||
|
enable: Option<bool>,
|
||||||
|
expire: Option<i64>,
|
||||||
|
digest: Option<String>,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
|
let (mut config, expected_digest) = user::config()?;
|
||||||
|
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let tokenid = Authid::from((userid.clone(), Some(tokenname.clone())));
|
||||||
|
let tokenid_string = tokenid.to_string();
|
||||||
|
|
||||||
|
if config.sections.get(&tokenid_string).is_some() {
|
||||||
|
bail!("token '{}' for user '{}' already exists.", tokenname.as_str(), userid);
|
||||||
|
}
|
||||||
|
|
||||||
|
let secret = format!("{:x}", proxmox::tools::uuid::Uuid::generate());
|
||||||
|
token_shadow::set_secret(&tokenid, &secret)?;
|
||||||
|
|
||||||
|
let token = user::ApiToken {
|
||||||
|
tokenid,
|
||||||
|
comment,
|
||||||
|
enable,
|
||||||
|
expire,
|
||||||
|
};
|
||||||
|
|
||||||
|
config.set_data(&tokenid_string, "token", &token)?;
|
||||||
|
|
||||||
|
user::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(json!({
|
||||||
|
"tokenid": tokenid_string,
|
||||||
|
"value": secret
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
userid: {
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
tokenname: {
|
||||||
|
type: Tokenname,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
enable: {
|
||||||
|
schema: user::ENABLE_USER_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
expire: {
|
||||||
|
schema: user::EXPIRE_USER_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
optional: true,
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Or(&[
|
||||||
|
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||||
|
&Permission::UserParam("userid"),
|
||||||
|
]),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update user's API token metadata
|
||||||
|
pub fn update_token(
|
||||||
|
userid: Userid,
|
||||||
|
tokenname: Tokenname,
|
||||||
|
comment: Option<String>,
|
||||||
|
enable: Option<bool>,
|
||||||
|
expire: Option<i64>,
|
||||||
|
digest: Option<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
|
let (mut config, expected_digest) = user::config()?;
|
||||||
|
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let tokenid = Authid::from((userid, Some(tokenname)));
|
||||||
|
let tokenid_string = tokenid.to_string();
|
||||||
|
|
||||||
|
let mut data: user::ApiToken = config.lookup("token", &tokenid_string)?;
|
||||||
|
|
||||||
|
if let Some(comment) = comment {
|
||||||
|
let comment = comment.trim().to_string();
|
||||||
|
if comment.is_empty() {
|
||||||
|
data.comment = None;
|
||||||
|
} else {
|
||||||
|
data.comment = Some(comment);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(enable) = enable {
|
||||||
|
data.enable = if enable { None } else { Some(false) };
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(expire) = expire {
|
||||||
|
data.expire = if expire > 0 { Some(expire) } else { None };
|
||||||
|
}
|
||||||
|
|
||||||
|
config.set_data(&tokenid_string, "token", &data)?;
|
||||||
|
|
||||||
|
user::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
userid: {
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
tokenname: {
|
||||||
|
type: Tokenname,
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
optional: true,
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Or(&[
|
||||||
|
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
|
||||||
|
&Permission::UserParam("userid"),
|
||||||
|
]),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Delete a user's API token
|
||||||
|
pub fn delete_token(
|
||||||
|
userid: Userid,
|
||||||
|
tokenname: Tokenname,
|
||||||
|
digest: Option<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = open_file_locked(user::USER_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
|
let (mut config, expected_digest) = user::config()?;
|
||||||
|
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let tokenid = Authid::from((userid.clone(), Some(tokenname.clone())));
|
||||||
|
let tokenid_string = tokenid.to_string();
|
||||||
|
|
||||||
|
match config.sections.get(&tokenid_string) {
|
||||||
|
Some(_) => { config.sections.remove(&tokenid_string); },
|
||||||
|
None => bail!("token '{}' of user '{}' does not exist.", tokenname.as_str(), userid),
|
||||||
|
}
|
||||||
|
|
||||||
|
token_shadow::delete_secret(&tokenid)?;
|
||||||
|
|
||||||
|
user::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
userid: {
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "List user's API tokens (with config digest).",
|
||||||
|
type: Array,
|
||||||
|
items: { type: user::ApiToken },
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Or(&[
|
||||||
|
&Permission::Privilege(&["access", "users"], PRIV_SYS_AUDIT, false),
|
||||||
|
&Permission::UserParam("userid"),
|
||||||
|
]),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List user's API tokens
|
||||||
|
pub fn list_tokens(
|
||||||
|
userid: Userid,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<user::ApiToken>, Error> {
|
||||||
|
|
||||||
|
let (config, digest) = user::config()?;
|
||||||
|
|
||||||
|
let list:Vec<user::ApiToken> = config.convert_to_typed_array("token")?;
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
let filter_by_owner = |token: &user::ApiToken| {
|
||||||
|
if token.tokenid.is_token() {
|
||||||
|
token.tokenid.user() == &userid
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(list.into_iter().filter(filter_by_owner).collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
const TOKEN_ITEM_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_READ_TOKEN)
|
||||||
|
.put(&API_METHOD_UPDATE_TOKEN)
|
||||||
|
.post(&API_METHOD_GENERATE_TOKEN)
|
||||||
|
.delete(&API_METHOD_DELETE_TOKEN);
|
||||||
|
|
||||||
|
const TOKEN_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_TOKENS)
|
||||||
|
.match_all("tokenname", &TOKEN_ITEM_ROUTER);
|
||||||
|
|
||||||
|
const USER_SUBDIRS: SubdirMap = &[
|
||||||
|
("token", &TOKEN_ROUTER),
|
||||||
|
];
|
||||||
|
|
||||||
|
const USER_ROUTER: Router = Router::new()
|
||||||
.get(&API_METHOD_READ_USER)
|
.get(&API_METHOD_READ_USER)
|
||||||
.put(&API_METHOD_UPDATE_USER)
|
.put(&API_METHOD_UPDATE_USER)
|
||||||
.delete(&API_METHOD_DELETE_USER);
|
.delete(&API_METHOD_DELETE_USER)
|
||||||
|
.subdirs(USER_SUBDIRS);
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
.get(&API_METHOD_LIST_USERS)
|
.get(&API_METHOD_LIST_USERS)
|
||||||
.post(&API_METHOD_CREATE_USER)
|
.post(&API_METHOD_CREATE_USER)
|
||||||
.match_all("userid", &ITEM_ROUTER);
|
.match_all("userid", &USER_ROUTER);
|
||||||
|
@ -1,12 +1,16 @@
|
|||||||
|
//! Backup Server Administration
|
||||||
|
|
||||||
use proxmox::api::router::{Router, SubdirMap};
|
use proxmox::api::router::{Router, SubdirMap};
|
||||||
use proxmox::list_subdirs_api_method;
|
use proxmox::list_subdirs_api_method;
|
||||||
|
|
||||||
pub mod datastore;
|
pub mod datastore;
|
||||||
pub mod sync;
|
pub mod sync;
|
||||||
|
pub mod verify;
|
||||||
|
|
||||||
const SUBDIRS: SubdirMap = &[
|
const SUBDIRS: SubdirMap = &[
|
||||||
("datastore", &datastore::ROUTER),
|
("datastore", &datastore::ROUTER),
|
||||||
("sync", &sync::ROUTER)
|
("sync", &sync::ROUTER),
|
||||||
|
("verify", &verify::ROUTER)
|
||||||
];
|
];
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
|
@ -1,37 +1,68 @@
|
|||||||
use anyhow::{format_err, Error};
|
//! Datastore Syncronization Job Management
|
||||||
|
|
||||||
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
|
use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment};
|
||||||
use proxmox::api::router::SubdirMap;
|
use proxmox::api::router::SubdirMap;
|
||||||
use proxmox::{list_subdirs_api_method, sortable};
|
use proxmox::{list_subdirs_api_method, sortable};
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::api2::pull::do_sync_job;
|
use crate::api2::pull::do_sync_job;
|
||||||
|
use crate::api2::config::sync::{check_sync_job_modify_access, check_sync_job_read_access};
|
||||||
|
|
||||||
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
|
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
|
||||||
use crate::server::UPID;
|
use crate::server::UPID;
|
||||||
use crate::config::jobstate::{Job, JobState};
|
use crate::server::jobstate::{Job, JobState};
|
||||||
use crate::tools::systemd::time::{
|
use crate::tools::systemd::time::{
|
||||||
parse_calendar_event, compute_next_event};
|
parse_calendar_event, compute_next_event};
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {},
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
description: "List configured jobs and their status.",
|
description: "List configured jobs and their status.",
|
||||||
type: Array,
|
type: Array,
|
||||||
items: { type: sync::SyncJobStatus },
|
items: { type: sync::SyncJobStatus },
|
||||||
},
|
},
|
||||||
|
access: {
|
||||||
|
description: "Limited to sync jobs where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// List all sync jobs
|
/// List all sync jobs
|
||||||
pub fn list_sync_jobs(
|
pub fn list_sync_jobs(
|
||||||
|
store: Option<String>,
|
||||||
_param: Value,
|
_param: Value,
|
||||||
mut rpcenv: &mut dyn RpcEnvironment,
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<SyncJobStatus>, Error> {
|
) -> Result<Vec<SyncJobStatus>, Error> {
|
||||||
|
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let (config, digest) = sync::config()?;
|
let (config, digest) = sync::config()?;
|
||||||
|
|
||||||
let mut list: Vec<SyncJobStatus> = config.convert_to_typed_array("sync")?;
|
let mut list: Vec<SyncJobStatus> = config
|
||||||
|
.convert_to_typed_array("sync")?
|
||||||
|
.into_iter()
|
||||||
|
.filter(|job: &SyncJobStatus| {
|
||||||
|
if let Some(store) = &store {
|
||||||
|
&job.store == store
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.filter(|job: &SyncJobStatus| {
|
||||||
|
let as_config: SyncJobConfig = job.into();
|
||||||
|
check_sync_job_read_access(&user_info, &auth_id, &as_config)
|
||||||
|
}).collect();
|
||||||
|
|
||||||
for job in &mut list {
|
for job in &mut list {
|
||||||
let last_state = JobState::load("syncjob", &job.id)
|
let last_state = JobState::load("syncjob", &job.id)
|
||||||
@ -52,13 +83,13 @@ pub fn list_sync_jobs(
|
|||||||
job.last_run_state = state;
|
job.last_run_state = state;
|
||||||
job.last_run_endtime = endtime;
|
job.last_run_endtime = endtime;
|
||||||
|
|
||||||
let last = job.last_run_endtime.unwrap_or_else(|| starttime);
|
let last = job.last_run_endtime.unwrap_or(starttime);
|
||||||
|
|
||||||
job.next_run = (|| -> Option<i64> {
|
job.next_run = (|| -> Option<i64> {
|
||||||
let schedule = job.schedule.as_ref()?;
|
let schedule = job.schedule.as_ref()?;
|
||||||
let event = parse_calendar_event(&schedule).ok()?;
|
let event = parse_calendar_event(&schedule).ok()?;
|
||||||
// ignore errors
|
// ignore errors
|
||||||
compute_next_event(&event, last, false).unwrap_or_else(|_| None)
|
compute_next_event(&event, last, false).unwrap_or(None)
|
||||||
})();
|
})();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -74,23 +105,31 @@ pub fn list_sync_jobs(
|
|||||||
schema: JOB_ID_SCHEMA,
|
schema: JOB_ID_SCHEMA,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
|
access: {
|
||||||
|
description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
},
|
||||||
)]
|
)]
|
||||||
/// Runs the sync jobs manually.
|
/// Runs the sync jobs manually.
|
||||||
fn run_sync_job(
|
pub fn run_sync_job(
|
||||||
id: String,
|
id: String,
|
||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let (config, _digest) = sync::config()?;
|
let (config, _digest) = sync::config()?;
|
||||||
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
|
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) {
|
||||||
|
bail!("permission check failed");
|
||||||
|
}
|
||||||
|
|
||||||
let job = Job::new("syncjob", &id)?;
|
let job = Job::new("syncjob", &id)?;
|
||||||
|
|
||||||
let upid_str = do_sync_job(job, sync_job, &userid, None)?;
|
let upid_str = do_sync_job(job, sync_job, &auth_id, None)?;
|
||||||
|
|
||||||
Ok(upid_str)
|
Ok(upid_str)
|
||||||
}
|
}
|
||||||
|
149
src/api2/admin/verify.rs
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
//! Datastore Verify Job Management
|
||||||
|
|
||||||
|
use anyhow::{format_err, Error};
|
||||||
|
|
||||||
|
use proxmox::api::router::SubdirMap;
|
||||||
|
use proxmox::{list_subdirs_api_method, sortable};
|
||||||
|
use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment};
|
||||||
|
|
||||||
|
use crate::api2::types::*;
|
||||||
|
use crate::server::do_verification_job;
|
||||||
|
use crate::server::jobstate::{Job, JobState};
|
||||||
|
use crate::config::acl::{
|
||||||
|
PRIV_DATASTORE_AUDIT,
|
||||||
|
PRIV_DATASTORE_VERIFY,
|
||||||
|
};
|
||||||
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
|
use crate::config::verify;
|
||||||
|
use crate::config::verify::{VerificationJobConfig, VerificationJobStatus};
|
||||||
|
use serde_json::Value;
|
||||||
|
use crate::tools::systemd::time::{parse_calendar_event, compute_next_event};
|
||||||
|
use crate::server::UPID;
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "List configured jobs and their status (filtered by access)",
|
||||||
|
type: Array,
|
||||||
|
items: { type: verify::VerificationJobStatus },
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
description: "Requires Datastore.Audit or Datastore.Verify on datastore.",
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List all verification jobs
|
||||||
|
pub fn list_verification_jobs(
|
||||||
|
store: Option<String>,
|
||||||
|
_param: Value,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<VerificationJobStatus>, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_VERIFY;
|
||||||
|
|
||||||
|
let (config, digest) = verify::config()?;
|
||||||
|
|
||||||
|
let mut list: Vec<VerificationJobStatus> = config
|
||||||
|
.convert_to_typed_array("verification")?
|
||||||
|
.into_iter()
|
||||||
|
.filter(|job: &VerificationJobStatus| {
|
||||||
|
let privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
|
||||||
|
if privs & required_privs == 0 {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(store) = &store {
|
||||||
|
&job.store == store
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}).collect();
|
||||||
|
|
||||||
|
for job in &mut list {
|
||||||
|
let last_state = JobState::load("verificationjob", &job.id)
|
||||||
|
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
||||||
|
|
||||||
|
let (upid, endtime, state, starttime) = match last_state {
|
||||||
|
JobState::Created { time } => (None, None, None, time),
|
||||||
|
JobState::Started { upid } => {
|
||||||
|
let parsed_upid: UPID = upid.parse()?;
|
||||||
|
(Some(upid), None, None, parsed_upid.starttime)
|
||||||
|
},
|
||||||
|
JobState::Finished { upid, state } => {
|
||||||
|
let parsed_upid: UPID = upid.parse()?;
|
||||||
|
(Some(upid), Some(state.endtime()), Some(state.to_string()), parsed_upid.starttime)
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
job.last_run_upid = upid;
|
||||||
|
job.last_run_state = state;
|
||||||
|
job.last_run_endtime = endtime;
|
||||||
|
|
||||||
|
let last = job.last_run_endtime.unwrap_or(starttime);
|
||||||
|
|
||||||
|
job.next_run = (|| -> Option<i64> {
|
||||||
|
let schedule = job.schedule.as_ref()?;
|
||||||
|
let event = parse_calendar_event(&schedule).ok()?;
|
||||||
|
// ignore errors
|
||||||
|
compute_next_event(&event, last, false).unwrap_or(None)
|
||||||
|
})();
|
||||||
|
}
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
description: "Requires Datastore.Verify on job's datastore.",
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Runs a verification job manually.
|
||||||
|
pub fn run_verification_job(
|
||||||
|
id: String,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
let (config, _digest) = verify::config()?;
|
||||||
|
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
|
||||||
|
|
||||||
|
user_info.check_privs(&auth_id, &["datastore", &verification_job.store], PRIV_DATASTORE_VERIFY, true)?;
|
||||||
|
|
||||||
|
let job = Job::new("verificationjob", &id)?;
|
||||||
|
|
||||||
|
let upid_str = do_verification_job(job, verification_job, &auth_id, None)?;
|
||||||
|
|
||||||
|
Ok(upid_str)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[sortable]
|
||||||
|
const VERIFICATION_INFO_SUBDIRS: SubdirMap = &[("run", &Router::new().post(&API_METHOD_RUN_VERIFICATION_JOB))];
|
||||||
|
|
||||||
|
const VERIFICATION_INFO_ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(VERIFICATION_INFO_SUBDIRS))
|
||||||
|
.subdirs(VERIFICATION_INFO_SUBDIRS);
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_VERIFICATION_JOBS)
|
||||||
|
.match_all("id", &VERIFICATION_INFO_ROUTER);
|
@ -1,8 +1,10 @@
|
|||||||
|
//! Backup protocol (HTTP2 upgrade)
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
use hyper::header::{HeaderValue, UPGRADE};
|
use hyper::header::{HeaderValue, UPGRADE};
|
||||||
use hyper::http::request::Parts;
|
use hyper::http::request::Parts;
|
||||||
use hyper::{Body, Response, StatusCode};
|
use hyper::{Body, Response, Request, StatusCode};
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
use proxmox::{sortable, identity, list_subdirs_api_method};
|
use proxmox::{sortable, identity, list_subdirs_api_method};
|
||||||
@ -16,7 +18,7 @@ use crate::backup::*;
|
|||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
use crate::config::acl::PRIV_DATASTORE_BACKUP;
|
use crate::config::acl::PRIV_DATASTORE_BACKUP;
|
||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
use crate::tools::fs::lock_dir_noblock;
|
use crate::tools::fs::lock_dir_noblock_shared;
|
||||||
|
|
||||||
mod environment;
|
mod environment;
|
||||||
use environment::*;
|
use environment::*;
|
||||||
@ -59,12 +61,12 @@ async move {
|
|||||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||||
let benchmark = param["benchmark"].as_bool().unwrap_or(false);
|
let benchmark = param["benchmark"].as_bool().unwrap_or(false);
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
let store = tools::required_string_param(¶m, "store")?.to_owned();
|
||||||
|
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store)?;
|
let datastore = DataStore::lookup_datastore(&store)?;
|
||||||
|
|
||||||
@ -86,7 +88,7 @@ async move {
|
|||||||
bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
|
bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
|
||||||
}
|
}
|
||||||
|
|
||||||
let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
|
let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
|
||||||
|
|
||||||
let env_type = rpcenv.env_type();
|
let env_type = rpcenv.env_type();
|
||||||
|
|
||||||
@ -105,12 +107,15 @@ async move {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// lock backup group to only allow one backup per group at a time
|
// lock backup group to only allow one backup per group at a time
|
||||||
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &userid)?;
|
let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &auth_id)?;
|
||||||
|
|
||||||
// permission check
|
// permission check
|
||||||
if owner != userid && worker_type != "benchmark" {
|
let correct_owner = owner == auth_id
|
||||||
|
|| (owner.is_token()
|
||||||
|
&& Authid::from(owner.user().clone()) == auth_id);
|
||||||
|
if !correct_owner && worker_type != "benchmark" {
|
||||||
// only the owner is allowed to create additional snapshots
|
// only the owner is allowed to create additional snapshots
|
||||||
bail!("backup owner check failed ({} != {})", userid, owner);
|
bail!("backup owner check failed ({} != {})", auth_id, owner);
|
||||||
}
|
}
|
||||||
|
|
||||||
let last_backup = {
|
let last_backup = {
|
||||||
@ -135,7 +140,7 @@ async move {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let backup_dir = BackupDir::with_group(backup_group.clone(), backup_time)?;
|
let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
|
||||||
|
|
||||||
let _last_guard = if let Some(last) = &last_backup {
|
let _last_guard = if let Some(last) = &last_backup {
|
||||||
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
|
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
|
||||||
@ -144,18 +149,18 @@ async move {
|
|||||||
|
|
||||||
// lock last snapshot to prevent forgetting/pruning it during backup
|
// lock last snapshot to prevent forgetting/pruning it during backup
|
||||||
let full_path = datastore.snapshot_path(&last.backup_dir);
|
let full_path = datastore.snapshot_path(&last.backup_dir);
|
||||||
Some(lock_dir_noblock(&full_path, "snapshot", "base snapshot is already locked by another operation")?)
|
Some(lock_dir_noblock_shared(&full_path, "snapshot", "base snapshot is already locked by another operation")?)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
let (path, is_new, _snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
|
let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
|
||||||
if !is_new { bail!("backup directory already exists."); }
|
if !is_new { bail!("backup directory already exists."); }
|
||||||
|
|
||||||
|
|
||||||
WorkerTask::spawn(worker_type, Some(worker_id), userid.clone(), true, move |worker| {
|
WorkerTask::spawn(worker_type, Some(worker_id), auth_id.clone(), true, move |worker| {
|
||||||
let mut env = BackupEnvironment::new(
|
let mut env = BackupEnvironment::new(
|
||||||
env_type, userid, worker.clone(), datastore, backup_dir);
|
env_type, auth_id, worker.clone(), datastore, backup_dir);
|
||||||
|
|
||||||
env.debug = debug;
|
env.debug = debug;
|
||||||
env.last_backup = last_backup;
|
env.last_backup = last_backup;
|
||||||
@ -168,8 +173,7 @@ async move {
|
|||||||
|
|
||||||
let env2 = env.clone();
|
let env2 = env.clone();
|
||||||
|
|
||||||
let mut req_fut = req_body
|
let mut req_fut = hyper::upgrade::on(Request::from_parts(parts, req_body))
|
||||||
.on_upgrade()
|
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
.and_then(move |conn| {
|
.and_then(move |conn| {
|
||||||
env2.debug("protocol upgrade done");
|
env2.debug("protocol upgrade done");
|
||||||
@ -182,8 +186,22 @@ async move {
|
|||||||
http.http2_initial_connection_window_size(window_size);
|
http.http2_initial_connection_window_size(window_size);
|
||||||
http.http2_max_frame_size(4*1024*1024);
|
http.http2_max_frame_size(4*1024*1024);
|
||||||
|
|
||||||
|
let env3 = env2.clone();
|
||||||
http.serve_connection(conn, service)
|
http.serve_connection(conn, service)
|
||||||
.map_err(Error::from)
|
.map(move |result| {
|
||||||
|
match result {
|
||||||
|
Err(err) => {
|
||||||
|
// Avoid Transport endpoint is not connected (os error 107)
|
||||||
|
// fixme: find a better way to test for that error
|
||||||
|
if err.to_string().starts_with("connection error") && env3.finished() {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(Error::from(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(()) => Ok(()),
|
||||||
|
}
|
||||||
|
})
|
||||||
});
|
});
|
||||||
let mut abort_future = abort_future
|
let mut abort_future = abort_future
|
||||||
.map(|_| Err(format_err!("task aborted")));
|
.map(|_| Err(format_err!("task aborted")));
|
||||||
@ -191,7 +209,7 @@ async move {
|
|||||||
async move {
|
async move {
|
||||||
// keep flock until task ends
|
// keep flock until task ends
|
||||||
let _group_guard = _group_guard;
|
let _group_guard = _group_guard;
|
||||||
let _snap_guard = _snap_guard;
|
let snap_guard = snap_guard;
|
||||||
let _last_guard = _last_guard;
|
let _last_guard = _last_guard;
|
||||||
|
|
||||||
let res = select!{
|
let res = select!{
|
||||||
@ -203,20 +221,32 @@ async move {
|
|||||||
tools::runtime::block_in_place(|| env.remove_backup())?;
|
tools::runtime::block_in_place(|| env.remove_backup())?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let verify = |env: BackupEnvironment| {
|
||||||
|
if let Err(err) = env.verify_after_complete(snap_guard) {
|
||||||
|
env.log(format!(
|
||||||
|
"backup finished, but starting the requested verify task failed: {}",
|
||||||
|
err
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
match (res, env.ensure_finished()) {
|
match (res, env.ensure_finished()) {
|
||||||
(Ok(_), Ok(())) => {
|
(Ok(_), Ok(())) => {
|
||||||
env.log("backup finished successfully");
|
env.log("backup finished successfully");
|
||||||
|
verify(env);
|
||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
(Err(err), Ok(())) => {
|
(Err(err), Ok(())) => {
|
||||||
// ignore errors after finish
|
// ignore errors after finish
|
||||||
env.log(format!("backup had errors but finished: {}", err));
|
env.log(format!("backup had errors but finished: {}", err));
|
||||||
|
verify(env);
|
||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
(Ok(_), Err(err)) => {
|
(Ok(_), Err(err)) => {
|
||||||
env.log(format!("backup ended and finish failed: {}", err));
|
env.log(format!("backup ended and finish failed: {}", err));
|
||||||
env.log("removing unfinished backup");
|
env.log("removing unfinished backup");
|
||||||
env.remove_backup()?;
|
tools::runtime::block_in_place(|| env.remove_backup())?;
|
||||||
Err(err)
|
Err(err)
|
||||||
},
|
},
|
||||||
(Err(err), Err(_)) => {
|
(Err(err), Err(_)) => {
|
||||||
@ -282,6 +312,10 @@ pub const BACKUP_API_SUBDIRS: SubdirMap = &[
|
|||||||
"previous", &Router::new()
|
"previous", &Router::new()
|
||||||
.download(&API_METHOD_DOWNLOAD_PREVIOUS)
|
.download(&API_METHOD_DOWNLOAD_PREVIOUS)
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
"previous_backup_time", &Router::new()
|
||||||
|
.get(&API_METHOD_GET_PREVIOUS_BACKUP_TIME)
|
||||||
|
),
|
||||||
(
|
(
|
||||||
"speedtest", &Router::new()
|
"speedtest", &Router::new()
|
||||||
.upload(&API_METHOD_UPLOAD_SPEEDTEST)
|
.upload(&API_METHOD_UPLOAD_SPEEDTEST)
|
||||||
@ -665,6 +699,28 @@ fn finish_backup (
|
|||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[sortable]
|
||||||
|
pub const API_METHOD_GET_PREVIOUS_BACKUP_TIME: ApiMethod = ApiMethod::new(
|
||||||
|
&ApiHandler::Sync(&get_previous_backup_time),
|
||||||
|
&ObjectSchema::new(
|
||||||
|
"Get previous backup time.",
|
||||||
|
&[],
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
fn get_previous_backup_time(
|
||||||
|
_param: Value,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
|
|
||||||
|
let env: &BackupEnvironment = rpcenv.as_ref();
|
||||||
|
|
||||||
|
let backup_time = env.last_backup.as_ref().map(|info| info.backup_dir.backup_time());
|
||||||
|
|
||||||
|
Ok(json!(backup_time))
|
||||||
|
}
|
||||||
|
|
||||||
#[sortable]
|
#[sortable]
|
||||||
pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
|
pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
|
||||||
&ApiHandler::AsyncHttp(&download_previous),
|
&ApiHandler::AsyncHttp(&download_previous),
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use nix::dir::Dir;
|
||||||
|
|
||||||
use ::serde::{Serialize};
|
use ::serde::{Serialize};
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
@ -9,7 +10,7 @@ use proxmox::tools::digest_to_hex;
|
|||||||
use proxmox::tools::fs::{replace_file, CreateOptions};
|
use proxmox::tools::fs::{replace_file, CreateOptions};
|
||||||
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
|
||||||
|
|
||||||
use crate::api2::types::Userid;
|
use crate::api2::types::Authid;
|
||||||
use crate::backup::*;
|
use crate::backup::*;
|
||||||
use crate::server::WorkerTask;
|
use crate::server::WorkerTask;
|
||||||
use crate::server::formatter::*;
|
use crate::server::formatter::*;
|
||||||
@ -103,7 +104,7 @@ impl SharedBackupState {
|
|||||||
pub struct BackupEnvironment {
|
pub struct BackupEnvironment {
|
||||||
env_type: RpcEnvironmentType,
|
env_type: RpcEnvironmentType,
|
||||||
result_attributes: Value,
|
result_attributes: Value,
|
||||||
user: Userid,
|
auth_id: Authid,
|
||||||
pub debug: bool,
|
pub debug: bool,
|
||||||
pub formatter: &'static OutputFormatter,
|
pub formatter: &'static OutputFormatter,
|
||||||
pub worker: Arc<WorkerTask>,
|
pub worker: Arc<WorkerTask>,
|
||||||
@ -116,7 +117,7 @@ pub struct BackupEnvironment {
|
|||||||
impl BackupEnvironment {
|
impl BackupEnvironment {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
env_type: RpcEnvironmentType,
|
env_type: RpcEnvironmentType,
|
||||||
user: Userid,
|
auth_id: Authid,
|
||||||
worker: Arc<WorkerTask>,
|
worker: Arc<WorkerTask>,
|
||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
backup_dir: BackupDir,
|
backup_dir: BackupDir,
|
||||||
@ -136,7 +137,7 @@ impl BackupEnvironment {
|
|||||||
Self {
|
Self {
|
||||||
result_attributes: json!({}),
|
result_attributes: json!({}),
|
||||||
env_type,
|
env_type,
|
||||||
user,
|
auth_id,
|
||||||
worker,
|
worker,
|
||||||
datastore,
|
datastore,
|
||||||
debug: false,
|
debug: false,
|
||||||
@ -184,7 +185,9 @@ impl BackupEnvironment {
|
|||||||
|
|
||||||
if size > data.chunk_size {
|
if size > data.chunk_size {
|
||||||
bail!("fixed writer '{}' - got large chunk ({} > {}", data.name, size, data.chunk_size);
|
bail!("fixed writer '{}' - got large chunk ({} > {}", data.name, size, data.chunk_size);
|
||||||
} else if size < data.chunk_size {
|
}
|
||||||
|
|
||||||
|
if size < data.chunk_size {
|
||||||
data.small_chunk_count += 1;
|
data.small_chunk_count += 1;
|
||||||
if data.small_chunk_count > 1 {
|
if data.small_chunk_count > 1 {
|
||||||
bail!("fixed writer '{}' - detected multiple end chunks (chunk size too small)");
|
bail!("fixed writer '{}' - detected multiple end chunks (chunk size too small)");
|
||||||
@ -464,7 +467,7 @@ impl BackupEnvironment {
|
|||||||
state.ensure_unfinished()?;
|
state.ensure_unfinished()?;
|
||||||
|
|
||||||
// test if all writer are correctly closed
|
// test if all writer are correctly closed
|
||||||
if state.dynamic_writers.len() != 0 || state.fixed_writers.len() != 0 {
|
if !state.dynamic_writers.is_empty() || !state.fixed_writers.is_empty() {
|
||||||
bail!("found open index writer - unable to finish backup");
|
bail!("found open index writer - unable to finish backup");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -472,16 +475,11 @@ impl BackupEnvironment {
|
|||||||
bail!("backup does not contain valid files (file count == 0)");
|
bail!("backup does not contain valid files (file count == 0)");
|
||||||
}
|
}
|
||||||
|
|
||||||
// check manifest
|
// check for valid manifest and store stats
|
||||||
let mut manifest = self.datastore.load_manifest_json(&self.backup_dir)
|
|
||||||
.map_err(|err| format_err!("unable to load manifest blob - {}", err))?;
|
|
||||||
|
|
||||||
let stats = serde_json::to_value(state.backup_stat)?;
|
let stats = serde_json::to_value(state.backup_stat)?;
|
||||||
|
self.datastore.update_manifest(&self.backup_dir, |manifest| {
|
||||||
manifest["unprotected"]["chunk_upload_stats"] = stats;
|
manifest.unprotected["chunk_upload_stats"] = stats;
|
||||||
|
}).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
||||||
self.datastore.store_manifest(&self.backup_dir, manifest)
|
|
||||||
.map_err(|err| format_err!("unable to store manifest blob - {}", err))?;
|
|
||||||
|
|
||||||
if let Some(base) = &self.last_backup {
|
if let Some(base) = &self.last_backup {
|
||||||
let path = self.datastore.snapshot_path(&base.backup_dir);
|
let path = self.datastore.snapshot_path(&base.backup_dir);
|
||||||
@ -499,6 +497,51 @@ impl BackupEnvironment {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// If verify-new is set on the datastore, this will run a new verify task
|
||||||
|
/// for the backup. If not, this will return and also drop the passed lock
|
||||||
|
/// immediately.
|
||||||
|
pub fn verify_after_complete(&self, snap_lock: Dir) -> Result<(), Error> {
|
||||||
|
self.ensure_finished()?;
|
||||||
|
|
||||||
|
if !self.datastore.verify_new() {
|
||||||
|
// no verify requested, do nothing
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let worker_id = format!("{}:{}/{}/{:08X}",
|
||||||
|
self.datastore.name(),
|
||||||
|
self.backup_dir.group().backup_type(),
|
||||||
|
self.backup_dir.group().backup_id(),
|
||||||
|
self.backup_dir.backup_time());
|
||||||
|
|
||||||
|
let datastore = self.datastore.clone();
|
||||||
|
let backup_dir = self.backup_dir.clone();
|
||||||
|
|
||||||
|
WorkerTask::new_thread(
|
||||||
|
"verify",
|
||||||
|
Some(worker_id),
|
||||||
|
self.auth_id.clone(),
|
||||||
|
false,
|
||||||
|
move |worker| {
|
||||||
|
worker.log("Automatically verifying newly added snapshot");
|
||||||
|
|
||||||
|
|
||||||
|
let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
|
||||||
|
if !verify_backup_dir_with_lock(
|
||||||
|
&verify_worker,
|
||||||
|
&backup_dir,
|
||||||
|
worker.upid().clone(),
|
||||||
|
None,
|
||||||
|
snap_lock,
|
||||||
|
)? {
|
||||||
|
bail!("verification failed - please check the log for details");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
).map(|_| ())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn log<S: AsRef<str>>(&self, msg: S) {
|
pub fn log<S: AsRef<str>>(&self, msg: S) {
|
||||||
self.worker.log(msg);
|
self.worker.log(msg);
|
||||||
}
|
}
|
||||||
@ -523,6 +566,12 @@ impl BackupEnvironment {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return true if the finished flag is set
|
||||||
|
pub fn finished(&self) -> bool {
|
||||||
|
let state = self.state.lock().unwrap();
|
||||||
|
state.finished
|
||||||
|
}
|
||||||
|
|
||||||
/// Remove complete backup
|
/// Remove complete backup
|
||||||
pub fn remove_backup(&self) -> Result<(), Error> {
|
pub fn remove_backup(&self) -> Result<(), Error> {
|
||||||
let mut state = self.state.lock().unwrap();
|
let mut state = self.state.lock().unwrap();
|
||||||
@ -548,12 +597,12 @@ impl RpcEnvironment for BackupEnvironment {
|
|||||||
self.env_type
|
self.env_type
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_user(&mut self, _user: Option<String>) {
|
fn set_auth_id(&mut self, _auth_id: Option<String>) {
|
||||||
panic!("unable to change user");
|
panic!("unable to change auth_id");
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_user(&self) -> Option<String> {
|
fn get_auth_id(&self) -> Option<String> {
|
||||||
Some(self.user.to_string())
|
Some(self.auth_id.to_string())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,14 +1,28 @@
|
|||||||
|
//! Backup Server Configuration
|
||||||
|
|
||||||
use proxmox::api::router::{Router, SubdirMap};
|
use proxmox::api::router::{Router, SubdirMap};
|
||||||
use proxmox::list_subdirs_api_method;
|
use proxmox::list_subdirs_api_method;
|
||||||
|
|
||||||
|
pub mod access;
|
||||||
pub mod datastore;
|
pub mod datastore;
|
||||||
pub mod remote;
|
pub mod remote;
|
||||||
pub mod sync;
|
pub mod sync;
|
||||||
|
pub mod verify;
|
||||||
|
pub mod drive;
|
||||||
|
pub mod changer;
|
||||||
|
pub mod media_pool;
|
||||||
|
pub mod tape_encryption_keys;
|
||||||
|
|
||||||
const SUBDIRS: SubdirMap = &[
|
const SUBDIRS: SubdirMap = &[
|
||||||
|
("access", &access::ROUTER),
|
||||||
|
("changer", &changer::ROUTER),
|
||||||
("datastore", &datastore::ROUTER),
|
("datastore", &datastore::ROUTER),
|
||||||
|
("drive", &drive::ROUTER),
|
||||||
|
("media-pool", &media_pool::ROUTER),
|
||||||
("remote", &remote::ROUTER),
|
("remote", &remote::ROUTER),
|
||||||
("sync", &sync::ROUTER),
|
("sync", &sync::ROUTER),
|
||||||
|
("tape-encryption-keys", &tape_encryption_keys::ROUTER),
|
||||||
|
("verify", &verify::ROUTER),
|
||||||
];
|
];
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
|
10
src/api2/config/access/mod.rs
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
use proxmox::api::{Router, SubdirMap};
|
||||||
|
use proxmox::list_subdirs_api_method;
|
||||||
|
|
||||||
|
pub mod tfa;
|
||||||
|
|
||||||
|
const SUBDIRS: SubdirMap = &[("tfa", &tfa::ROUTER)];
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||||
|
.subdirs(SUBDIRS);
|
84
src/api2/config/access/tfa/mod.rs
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
//! For now this only has the TFA subdir, which is in this file.
|
||||||
|
//! If we add more, it should be moved into a sub module.
|
||||||
|
|
||||||
|
use anyhow::Error;
|
||||||
|
|
||||||
|
use crate::api2::types::PROXMOX_CONFIG_DIGEST_SCHEMA;
|
||||||
|
use proxmox::api::{api, Permission, Router, RpcEnvironment, SubdirMap};
|
||||||
|
use proxmox::list_subdirs_api_method;
|
||||||
|
|
||||||
|
use crate::config::tfa::{self, WebauthnConfig, WebauthnConfigUpdater};
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(SUBDIRS))
|
||||||
|
.subdirs(SUBDIRS);
|
||||||
|
|
||||||
|
const SUBDIRS: SubdirMap = &[("webauthn", &WEBAUTHN_ROUTER)];
|
||||||
|
|
||||||
|
const WEBAUTHN_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_GET_WEBAUTHN_CONFIG)
|
||||||
|
.put(&API_METHOD_UPDATE_WEBAUTHN_CONFIG);
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: WebauthnConfig,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get the TFA configuration.
|
||||||
|
pub fn get_webauthn_config(
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Option<WebauthnConfig>, Error> {
|
||||||
|
let (config, digest) = match tfa::webauthn_config()? {
|
||||||
|
Some(c) => c,
|
||||||
|
None => return Ok(None),
|
||||||
|
};
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
Ok(Some(config))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
webauthn: {
|
||||||
|
flatten: true,
|
||||||
|
type: WebauthnConfigUpdater,
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
optional: true,
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update the TFA configuration.
|
||||||
|
pub fn update_webauthn_config(
|
||||||
|
webauthn: WebauthnConfigUpdater,
|
||||||
|
digest: Option<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let _lock = tfa::write_lock();
|
||||||
|
|
||||||
|
let mut tfa = tfa::read()?;
|
||||||
|
|
||||||
|
if let Some(wa) = &mut tfa.webauthn {
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &wa.digest()?)?;
|
||||||
|
}
|
||||||
|
webauthn.apply_to(wa);
|
||||||
|
} else {
|
||||||
|
tfa.webauthn = Some(webauthn.build()?);
|
||||||
|
}
|
||||||
|
|
||||||
|
tfa::write(&tfa)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
295
src/api2/config/changer.rs
Normal file
@ -0,0 +1,295 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox::api::{
|
||||||
|
api,
|
||||||
|
Router,
|
||||||
|
RpcEnvironment,
|
||||||
|
schema::parse_property_string,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
config,
|
||||||
|
api2::types::{
|
||||||
|
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
CHANGER_NAME_SCHEMA,
|
||||||
|
LINUX_DRIVE_PATH_SCHEMA,
|
||||||
|
SLOT_ARRAY_SCHEMA,
|
||||||
|
EXPORT_SLOT_LIST_SCHEMA,
|
||||||
|
ScsiTapeChanger,
|
||||||
|
LinuxTapeDrive,
|
||||||
|
},
|
||||||
|
tape::{
|
||||||
|
linux_tape_changer_list,
|
||||||
|
check_drive_path,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
schema: LINUX_DRIVE_PATH_SCHEMA,
|
||||||
|
},
|
||||||
|
"export-slots": {
|
||||||
|
schema: EXPORT_SLOT_LIST_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Create a new changer device
|
||||||
|
pub fn create_changer(
|
||||||
|
name: String,
|
||||||
|
path: String,
|
||||||
|
export_slots: Option<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::drive::lock()?;
|
||||||
|
|
||||||
|
let (mut config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
let linux_changers = linux_tape_changer_list();
|
||||||
|
|
||||||
|
check_drive_path(&linux_changers, &path)?;
|
||||||
|
|
||||||
|
let existing: Vec<ScsiTapeChanger> = config.convert_to_typed_array("changer")?;
|
||||||
|
|
||||||
|
for changer in existing {
|
||||||
|
if changer.name == name {
|
||||||
|
bail!("Entry '{}' already exists", name);
|
||||||
|
}
|
||||||
|
|
||||||
|
if changer.path == path {
|
||||||
|
bail!("Path '{}' already in use by '{}'", path, changer.name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let item = ScsiTapeChanger {
|
||||||
|
name: name.clone(),
|
||||||
|
path,
|
||||||
|
export_slots,
|
||||||
|
};
|
||||||
|
|
||||||
|
config.set_data(&name, "changer", &item)?;
|
||||||
|
|
||||||
|
config::drive::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: ScsiTapeChanger,
|
||||||
|
},
|
||||||
|
|
||||||
|
)]
|
||||||
|
/// Get tape changer configuration
|
||||||
|
pub fn get_config(
|
||||||
|
name: String,
|
||||||
|
_param: Value,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<ScsiTapeChanger, Error> {
|
||||||
|
|
||||||
|
let (config, digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
let data: ScsiTapeChanger = config.lookup("changer", &name)?;
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "The list of configured changers (with config digest).",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: ScsiTapeChanger,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List changers
|
||||||
|
pub fn list_changers(
|
||||||
|
_param: Value,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<ScsiTapeChanger>, Error> {
|
||||||
|
|
||||||
|
let (config, digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
let list: Vec<ScsiTapeChanger> = config.convert_to_typed_array("changer")?;
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Deletable property name
|
||||||
|
pub enum DeletableProperty {
|
||||||
|
/// Delete export-slots.
|
||||||
|
export_slots,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
schema: LINUX_DRIVE_PATH_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"export-slots": {
|
||||||
|
schema: EXPORT_SLOT_LIST_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
delete: {
|
||||||
|
description: "List of properties to delete.",
|
||||||
|
type: Array,
|
||||||
|
optional: true,
|
||||||
|
items: {
|
||||||
|
type: DeletableProperty,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update a tape changer configuration
|
||||||
|
pub fn update_changer(
|
||||||
|
name: String,
|
||||||
|
path: Option<String>,
|
||||||
|
export_slots: Option<String>,
|
||||||
|
delete: Option<Vec<DeletableProperty>>,
|
||||||
|
digest: Option<String>,
|
||||||
|
_param: Value,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::drive::lock()?;
|
||||||
|
|
||||||
|
let (mut config, expected_digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut data: ScsiTapeChanger = config.lookup("changer", &name)?;
|
||||||
|
|
||||||
|
if let Some(delete) = delete {
|
||||||
|
for delete_prop in delete {
|
||||||
|
match delete_prop {
|
||||||
|
DeletableProperty::export_slots => {
|
||||||
|
data.export_slots = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(path) = path {
|
||||||
|
let changers = linux_tape_changer_list();
|
||||||
|
check_drive_path(&changers, &path)?;
|
||||||
|
data.path = path;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(export_slots) = export_slots {
|
||||||
|
let slots: Value = parse_property_string(
|
||||||
|
&export_slots, &SLOT_ARRAY_SCHEMA
|
||||||
|
)?;
|
||||||
|
let mut slots: Vec<String> = slots
|
||||||
|
.as_array()
|
||||||
|
.unwrap()
|
||||||
|
.iter()
|
||||||
|
.map(|v| v.to_string())
|
||||||
|
.collect();
|
||||||
|
slots.sort();
|
||||||
|
|
||||||
|
if slots.is_empty() {
|
||||||
|
data.export_slots = None;
|
||||||
|
} else {
|
||||||
|
let slots = slots.join(",");
|
||||||
|
data.export_slots = Some(slots);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config.set_data(&name, "changer", &data)?;
|
||||||
|
|
||||||
|
config::drive::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Delete a tape changer configuration
|
||||||
|
pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::drive::lock()?;
|
||||||
|
|
||||||
|
let (mut config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
match config.sections.get(&name) {
|
||||||
|
Some((section_type, _)) => {
|
||||||
|
if section_type != "changer" {
|
||||||
|
bail!("Entry '{}' exists, but is not a changer device", name);
|
||||||
|
}
|
||||||
|
config.sections.remove(&name);
|
||||||
|
},
|
||||||
|
None => bail!("Delete changer '{}' failed - no such entry", name),
|
||||||
|
}
|
||||||
|
|
||||||
|
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
||||||
|
for drive in drive_list {
|
||||||
|
if let Some(changer) = drive.changer {
|
||||||
|
if changer == name {
|
||||||
|
bail!("Delete changer '{}' failed - used by drive '{}'", name, drive.name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config::drive::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
const ITEM_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_GET_CONFIG)
|
||||||
|
.put(&API_METHOD_UPDATE_CHANGER)
|
||||||
|
.delete(&API_METHOD_DELETE_CHANGER);
|
||||||
|
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_CHANGERS)
|
||||||
|
.post(&API_METHOD_CREATE_CHANGER)
|
||||||
|
.match_all("name", &ITEM_ROUTER);
|
@ -5,6 +5,7 @@ use serde_json::Value;
|
|||||||
use ::serde::{Deserialize, Serialize};
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
use proxmox::api::{api, Router, RpcEnvironment, Permission};
|
||||||
|
use proxmox::api::schema::parse_property_string;
|
||||||
use proxmox::tools::fs::open_file_locked;
|
use proxmox::tools::fs::open_file_locked;
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
@ -12,6 +13,7 @@ use crate::backup::*;
|
|||||||
use crate::config::cached_user_info::CachedUserInfo;
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA};
|
use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA};
|
||||||
use crate::config::acl::{PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
|
use crate::config::acl::{PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
|
||||||
|
use crate::server::jobstate;
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
@ -34,14 +36,14 @@ pub fn list_datastores(
|
|||||||
|
|
||||||
let (config, digest) = datastore::config()?;
|
let (config, digest) = datastore::config()?;
|
||||||
|
|
||||||
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let user_info = CachedUserInfo::new()?;
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
|
let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
|
||||||
let filter_by_privs = |store: &DataStoreConfig| {
|
let filter_by_privs = |store: &DataStoreConfig| {
|
||||||
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store.name]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store.name]);
|
||||||
(user_privs & PRIV_DATASTORE_AUDIT) != 0
|
(user_privs & PRIV_DATASTORE_AUDIT) != 0
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -67,6 +69,14 @@ pub fn list_datastores(
|
|||||||
optional: true,
|
optional: true,
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
},
|
},
|
||||||
|
"notify-user": {
|
||||||
|
optional: true,
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
"notify": {
|
||||||
|
optional: true,
|
||||||
|
schema: DATASTORE_NOTIFY_STRING_SCHEMA,
|
||||||
|
},
|
||||||
"gc-schedule": {
|
"gc-schedule": {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: GC_SCHEDULE_SCHEMA,
|
schema: GC_SCHEDULE_SCHEMA,
|
||||||
@ -75,10 +85,6 @@ pub fn list_datastores(
|
|||||||
optional: true,
|
optional: true,
|
||||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||||
},
|
},
|
||||||
"verify-schedule": {
|
|
||||||
optional: true,
|
|
||||||
schema: VERIFY_SCHEDULE_SCHEMA,
|
|
||||||
},
|
|
||||||
"keep-last": {
|
"keep-last": {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||||
@ -114,11 +120,11 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let datastore: datastore::DataStoreConfig = serde_json::from_value(param.clone())?;
|
let datastore: datastore::DataStoreConfig = serde_json::from_value(param)?;
|
||||||
|
|
||||||
let (mut config, _digest) = datastore::config()?;
|
let (mut config, _digest) = datastore::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&datastore.name) {
|
if config.sections.get(&datastore.name).is_some() {
|
||||||
bail!("datastore '{}' already exists.", datastore.name);
|
bail!("datastore '{}' already exists.", datastore.name);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -131,9 +137,8 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
datastore::save_config(&config)?;
|
datastore::save_config(&config)?;
|
||||||
|
|
||||||
crate::config::jobstate::create_state_file("prune", &datastore.name)?;
|
jobstate::create_state_file("prune", &datastore.name)?;
|
||||||
crate::config::jobstate::create_state_file("garbage_collection", &datastore.name)?;
|
jobstate::create_state_file("garbage_collection", &datastore.name)?;
|
||||||
crate::config::jobstate::create_state_file("verify", &datastore.name)?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -146,10 +151,7 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: { type: datastore::DataStoreConfig },
|
||||||
description: "The datastore configuration (with config digest).",
|
|
||||||
type: datastore::DataStoreConfig,
|
|
||||||
},
|
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_AUDIT, false),
|
permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_AUDIT, false),
|
||||||
},
|
},
|
||||||
@ -179,8 +181,6 @@ pub enum DeletableProperty {
|
|||||||
gc_schedule,
|
gc_schedule,
|
||||||
/// Delete the prune job schedule.
|
/// Delete the prune job schedule.
|
||||||
prune_schedule,
|
prune_schedule,
|
||||||
/// Delete the verify schedule property
|
|
||||||
verify_schedule,
|
|
||||||
/// Delete the keep-last property
|
/// Delete the keep-last property
|
||||||
keep_last,
|
keep_last,
|
||||||
/// Delete the keep-hourly property
|
/// Delete the keep-hourly property
|
||||||
@ -193,6 +193,12 @@ pub enum DeletableProperty {
|
|||||||
keep_monthly,
|
keep_monthly,
|
||||||
/// Delete the keep-yearly property
|
/// Delete the keep-yearly property
|
||||||
keep_yearly,
|
keep_yearly,
|
||||||
|
/// Delete the verify-new property
|
||||||
|
verify_new,
|
||||||
|
/// Delete the notify-user property
|
||||||
|
notify_user,
|
||||||
|
/// Delete the notify property
|
||||||
|
notify,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -206,6 +212,14 @@ pub enum DeletableProperty {
|
|||||||
optional: true,
|
optional: true,
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
},
|
},
|
||||||
|
"notify-user": {
|
||||||
|
optional: true,
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
"notify": {
|
||||||
|
optional: true,
|
||||||
|
schema: DATASTORE_NOTIFY_STRING_SCHEMA,
|
||||||
|
},
|
||||||
"gc-schedule": {
|
"gc-schedule": {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: GC_SCHEDULE_SCHEMA,
|
schema: GC_SCHEDULE_SCHEMA,
|
||||||
@ -214,10 +228,6 @@ pub enum DeletableProperty {
|
|||||||
optional: true,
|
optional: true,
|
||||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||||
},
|
},
|
||||||
"verify-schedule": {
|
|
||||||
optional: true,
|
|
||||||
schema: VERIFY_SCHEDULE_SCHEMA,
|
|
||||||
},
|
|
||||||
"keep-last": {
|
"keep-last": {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: PRUNE_SCHEMA_KEEP_LAST,
|
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||||
@ -242,6 +252,12 @@ pub enum DeletableProperty {
|
|||||||
optional: true,
|
optional: true,
|
||||||
schema: PRUNE_SCHEMA_KEEP_YEARLY,
|
schema: PRUNE_SCHEMA_KEEP_YEARLY,
|
||||||
},
|
},
|
||||||
|
"verify-new": {
|
||||||
|
description: "If enabled, all new backups will be verified right after completion.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
delete: {
|
delete: {
|
||||||
description: "List of properties to delete.",
|
description: "List of properties to delete.",
|
||||||
type: Array,
|
type: Array,
|
||||||
@ -261,18 +277,21 @@ pub enum DeletableProperty {
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Update datastore config.
|
/// Update datastore config.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn update_datastore(
|
pub fn update_datastore(
|
||||||
name: String,
|
name: String,
|
||||||
comment: Option<String>,
|
comment: Option<String>,
|
||||||
gc_schedule: Option<String>,
|
gc_schedule: Option<String>,
|
||||||
prune_schedule: Option<String>,
|
prune_schedule: Option<String>,
|
||||||
verify_schedule: Option<String>,
|
|
||||||
keep_last: Option<u64>,
|
keep_last: Option<u64>,
|
||||||
keep_hourly: Option<u64>,
|
keep_hourly: Option<u64>,
|
||||||
keep_daily: Option<u64>,
|
keep_daily: Option<u64>,
|
||||||
keep_weekly: Option<u64>,
|
keep_weekly: Option<u64>,
|
||||||
keep_monthly: Option<u64>,
|
keep_monthly: Option<u64>,
|
||||||
keep_yearly: Option<u64>,
|
keep_yearly: Option<u64>,
|
||||||
|
verify_new: Option<bool>,
|
||||||
|
notify: Option<String>,
|
||||||
|
notify_user: Option<Userid>,
|
||||||
delete: Option<Vec<DeletableProperty>>,
|
delete: Option<Vec<DeletableProperty>>,
|
||||||
digest: Option<String>,
|
digest: Option<String>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -295,13 +314,15 @@ pub fn update_datastore(
|
|||||||
DeletableProperty::comment => { data.comment = None; },
|
DeletableProperty::comment => { data.comment = None; },
|
||||||
DeletableProperty::gc_schedule => { data.gc_schedule = None; },
|
DeletableProperty::gc_schedule => { data.gc_schedule = None; },
|
||||||
DeletableProperty::prune_schedule => { data.prune_schedule = None; },
|
DeletableProperty::prune_schedule => { data.prune_schedule = None; },
|
||||||
DeletableProperty::verify_schedule => { data.verify_schedule = None; },
|
|
||||||
DeletableProperty::keep_last => { data.keep_last = None; },
|
DeletableProperty::keep_last => { data.keep_last = None; },
|
||||||
DeletableProperty::keep_hourly => { data.keep_hourly = None; },
|
DeletableProperty::keep_hourly => { data.keep_hourly = None; },
|
||||||
DeletableProperty::keep_daily => { data.keep_daily = None; },
|
DeletableProperty::keep_daily => { data.keep_daily = None; },
|
||||||
DeletableProperty::keep_weekly => { data.keep_weekly = None; },
|
DeletableProperty::keep_weekly => { data.keep_weekly = None; },
|
||||||
DeletableProperty::keep_monthly => { data.keep_monthly = None; },
|
DeletableProperty::keep_monthly => { data.keep_monthly = None; },
|
||||||
DeletableProperty::keep_yearly => { data.keep_yearly = None; },
|
DeletableProperty::keep_yearly => { data.keep_yearly = None; },
|
||||||
|
DeletableProperty::verify_new => { data.verify_new = None; },
|
||||||
|
DeletableProperty::notify => { data.notify = None; },
|
||||||
|
DeletableProperty::notify_user => { data.notify_user = None; },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -327,12 +348,6 @@ pub fn update_datastore(
|
|||||||
data.prune_schedule = prune_schedule;
|
data.prune_schedule = prune_schedule;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut verify_schedule_changed = false;
|
|
||||||
if verify_schedule.is_some() {
|
|
||||||
verify_schedule_changed = data.verify_schedule != verify_schedule;
|
|
||||||
data.verify_schedule = verify_schedule;
|
|
||||||
}
|
|
||||||
|
|
||||||
if keep_last.is_some() { data.keep_last = keep_last; }
|
if keep_last.is_some() { data.keep_last = keep_last; }
|
||||||
if keep_hourly.is_some() { data.keep_hourly = keep_hourly; }
|
if keep_hourly.is_some() { data.keep_hourly = keep_hourly; }
|
||||||
if keep_daily.is_some() { data.keep_daily = keep_daily; }
|
if keep_daily.is_some() { data.keep_daily = keep_daily; }
|
||||||
@ -340,6 +355,19 @@ pub fn update_datastore(
|
|||||||
if keep_monthly.is_some() { data.keep_monthly = keep_monthly; }
|
if keep_monthly.is_some() { data.keep_monthly = keep_monthly; }
|
||||||
if keep_yearly.is_some() { data.keep_yearly = keep_yearly; }
|
if keep_yearly.is_some() { data.keep_yearly = keep_yearly; }
|
||||||
|
|
||||||
|
if let Some(notify_str) = notify {
|
||||||
|
let value = parse_property_string(¬ify_str, &DatastoreNotify::API_SCHEMA)?;
|
||||||
|
let notify: DatastoreNotify = serde_json::from_value(value)?;
|
||||||
|
if let DatastoreNotify { gc: None, verify: None, sync: None } = notify {
|
||||||
|
data.notify = None;
|
||||||
|
} else {
|
||||||
|
data.notify = Some(notify_str);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if verify_new.is_some() { data.verify_new = verify_new; }
|
||||||
|
|
||||||
|
if notify_user.is_some() { data.notify_user = notify_user; }
|
||||||
|
|
||||||
config.set_data(&name, "datastore", &data)?;
|
config.set_data(&name, "datastore", &data)?;
|
||||||
|
|
||||||
datastore::save_config(&config)?;
|
datastore::save_config(&config)?;
|
||||||
@ -347,15 +375,11 @@ pub fn update_datastore(
|
|||||||
// we want to reset the statefiles, to avoid an immediate action in some cases
|
// we want to reset the statefiles, to avoid an immediate action in some cases
|
||||||
// (e.g. going from monthly to weekly in the second week of the month)
|
// (e.g. going from monthly to weekly in the second week of the month)
|
||||||
if gc_schedule_changed {
|
if gc_schedule_changed {
|
||||||
crate::config::jobstate::create_state_file("garbage_collection", &name)?;
|
jobstate::create_state_file("garbage_collection", &name)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if prune_schedule_changed {
|
if prune_schedule_changed {
|
||||||
crate::config::jobstate::create_state_file("prune", &name)?;
|
jobstate::create_state_file("prune", &name)?;
|
||||||
}
|
|
||||||
|
|
||||||
if verify_schedule_changed {
|
|
||||||
crate::config::jobstate::create_state_file("verify", &name)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -398,9 +422,8 @@ pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Erro
|
|||||||
datastore::save_config(&config)?;
|
datastore::save_config(&config)?;
|
||||||
|
|
||||||
// ignore errors
|
// ignore errors
|
||||||
let _ = crate::config::jobstate::remove_state_file("prune", &name);
|
let _ = jobstate::remove_state_file("prune", &name);
|
||||||
let _ = crate::config::jobstate::remove_state_file("garbage_collection", &name);
|
let _ = jobstate::remove_state_file("garbage_collection", &name);
|
||||||
let _ = crate::config::jobstate::remove_state_file("verify", &name);
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
281
src/api2/config/drive.rs
Normal file
@ -0,0 +1,281 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox::api::{api, Router, RpcEnvironment};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
config,
|
||||||
|
api2::types::{
|
||||||
|
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
DRIVE_NAME_SCHEMA,
|
||||||
|
CHANGER_NAME_SCHEMA,
|
||||||
|
CHANGER_DRIVENUM_SCHEMA,
|
||||||
|
LINUX_DRIVE_PATH_SCHEMA,
|
||||||
|
LinuxTapeDrive,
|
||||||
|
ScsiTapeChanger,
|
||||||
|
},
|
||||||
|
tape::{
|
||||||
|
linux_tape_device_list,
|
||||||
|
check_drive_path,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
schema: LINUX_DRIVE_PATH_SCHEMA,
|
||||||
|
},
|
||||||
|
changer: {
|
||||||
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"changer-drivenum": {
|
||||||
|
schema: CHANGER_DRIVENUM_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Create a new drive
|
||||||
|
pub fn create_drive(param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::drive::lock()?;
|
||||||
|
|
||||||
|
let (mut config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
let item: LinuxTapeDrive = serde_json::from_value(param)?;
|
||||||
|
|
||||||
|
let linux_drives = linux_tape_device_list();
|
||||||
|
|
||||||
|
check_drive_path(&linux_drives, &item.path)?;
|
||||||
|
|
||||||
|
let existing: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
||||||
|
|
||||||
|
for drive in existing {
|
||||||
|
if drive.name == item.name {
|
||||||
|
bail!("Entry '{}' already exists", item.name);
|
||||||
|
}
|
||||||
|
if drive.path == item.path {
|
||||||
|
bail!("Path '{}' already used in drive '{}'", item.path, drive.name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config.set_data(&item.name, "linux", &item)?;
|
||||||
|
|
||||||
|
config::drive::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: LinuxTapeDrive,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get drive configuration
|
||||||
|
pub fn get_config(
|
||||||
|
name: String,
|
||||||
|
_param: Value,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<LinuxTapeDrive, Error> {
|
||||||
|
|
||||||
|
let (config, digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
let data: LinuxTapeDrive = config.lookup("linux", &name)?;
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "The list of configured drives (with config digest).",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: LinuxTapeDrive,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List drives
|
||||||
|
pub fn list_drives(
|
||||||
|
_param: Value,
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<LinuxTapeDrive>, Error> {
|
||||||
|
|
||||||
|
let (config, digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
let drive_list: Vec<LinuxTapeDrive> = config.convert_to_typed_array("linux")?;
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(drive_list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Deletable property name
|
||||||
|
pub enum DeletableProperty {
|
||||||
|
/// Delete the changer property.
|
||||||
|
changer,
|
||||||
|
/// Delete the changer-drivenum property.
|
||||||
|
changer_drivenum,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
schema: LINUX_DRIVE_PATH_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
changer: {
|
||||||
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"changer-drivenum": {
|
||||||
|
schema: CHANGER_DRIVENUM_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
delete: {
|
||||||
|
description: "List of properties to delete.",
|
||||||
|
type: Array,
|
||||||
|
optional: true,
|
||||||
|
items: {
|
||||||
|
type: DeletableProperty,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update a drive configuration
|
||||||
|
pub fn update_drive(
|
||||||
|
name: String,
|
||||||
|
path: Option<String>,
|
||||||
|
changer: Option<String>,
|
||||||
|
changer_drivenum: Option<u64>,
|
||||||
|
delete: Option<Vec<DeletableProperty>>,
|
||||||
|
digest: Option<String>,
|
||||||
|
_param: Value,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::drive::lock()?;
|
||||||
|
|
||||||
|
let (mut config, expected_digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = proxmox::tools::hex_to_digest(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut data: LinuxTapeDrive = config.lookup("linux", &name)?;
|
||||||
|
|
||||||
|
if let Some(delete) = delete {
|
||||||
|
for delete_prop in delete {
|
||||||
|
match delete_prop {
|
||||||
|
DeletableProperty::changer => {
|
||||||
|
data.changer = None;
|
||||||
|
data.changer_drivenum = None;
|
||||||
|
},
|
||||||
|
DeletableProperty::changer_drivenum => { data.changer_drivenum = None; },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(path) = path {
|
||||||
|
let linux_drives = linux_tape_device_list();
|
||||||
|
check_drive_path(&linux_drives, &path)?;
|
||||||
|
data.path = path;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(changer) = changer {
|
||||||
|
let _: ScsiTapeChanger = config.lookup("changer", &changer)?;
|
||||||
|
data.changer = Some(changer);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(changer_drivenum) = changer_drivenum {
|
||||||
|
if changer_drivenum == 0 {
|
||||||
|
data.changer_drivenum = None;
|
||||||
|
} else {
|
||||||
|
if data.changer.is_none() {
|
||||||
|
bail!("Option 'changer-drivenum' requires option 'changer'.");
|
||||||
|
}
|
||||||
|
data.changer_drivenum = Some(changer_drivenum);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config.set_data(&name, "linux", &data)?;
|
||||||
|
|
||||||
|
config::drive::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Delete a drive configuration
|
||||||
|
pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::drive::lock()?;
|
||||||
|
|
||||||
|
let (mut config, _digest) = config::drive::config()?;
|
||||||
|
|
||||||
|
match config.sections.get(&name) {
|
||||||
|
Some((section_type, _)) => {
|
||||||
|
if section_type != "linux" {
|
||||||
|
bail!("Entry '{}' exists, but is not a linux tape drive", name);
|
||||||
|
}
|
||||||
|
config.sections.remove(&name);
|
||||||
|
},
|
||||||
|
None => bail!("Delete drive '{}' failed - no such drive", name),
|
||||||
|
}
|
||||||
|
|
||||||
|
config::drive::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
const ITEM_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_GET_CONFIG)
|
||||||
|
.put(&API_METHOD_UPDATE_DRIVE)
|
||||||
|
.delete(&API_METHOD_DELETE_DRIVE);
|
||||||
|
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_DRIVES)
|
||||||
|
.post(&API_METHOD_CREATE_DRIVE)
|
||||||
|
.match_all("name", &ITEM_ROUTER);
|
251
src/api2/config/media_pool.rs
Normal file
@ -0,0 +1,251 @@
|
|||||||
|
use anyhow::{bail, Error};
|
||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::{
|
||||||
|
api::{
|
||||||
|
api,
|
||||||
|
Router,
|
||||||
|
RpcEnvironment,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
api2::types::{
|
||||||
|
MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
||||||
|
MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
||||||
|
MEDIA_RETENTION_POLICY_SCHEMA,
|
||||||
|
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
|
MediaPoolConfig,
|
||||||
|
},
|
||||||
|
config,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
allocation: {
|
||||||
|
schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
retention: {
|
||||||
|
schema: MEDIA_RETENTION_POLICY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
template: {
|
||||||
|
schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
encrypt: {
|
||||||
|
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Create a new media pool
|
||||||
|
pub fn create_pool(
|
||||||
|
name: String,
|
||||||
|
allocation: Option<String>,
|
||||||
|
retention: Option<String>,
|
||||||
|
template: Option<String>,
|
||||||
|
encrypt: Option<String>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::media_pool::lock()?;
|
||||||
|
|
||||||
|
let (mut config, _digest) = config::media_pool::config()?;
|
||||||
|
|
||||||
|
if config.sections.get(&name).is_some() {
|
||||||
|
bail!("Media pool '{}' already exists", name);
|
||||||
|
}
|
||||||
|
|
||||||
|
let item = MediaPoolConfig {
|
||||||
|
name: name.clone(),
|
||||||
|
allocation,
|
||||||
|
retention,
|
||||||
|
template,
|
||||||
|
encrypt,
|
||||||
|
};
|
||||||
|
|
||||||
|
config.set_data(&name, "pool", &item)?;
|
||||||
|
|
||||||
|
config::media_pool::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
returns: {
|
||||||
|
description: "The list of configured media pools (with config digest).",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
type: MediaPoolConfig,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List media pools
|
||||||
|
pub fn list_pools(
|
||||||
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<MediaPoolConfig>, Error> {
|
||||||
|
|
||||||
|
let (config, digest) = config::media_pool::config()?;
|
||||||
|
|
||||||
|
let list = config.convert_to_typed_array("pool")?;
|
||||||
|
|
||||||
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: MediaPoolConfig,
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Get media pool configuration
|
||||||
|
pub fn get_config(name: String) -> Result<MediaPoolConfig, Error> {
|
||||||
|
|
||||||
|
let (config, _digest) = config::media_pool::config()?;
|
||||||
|
|
||||||
|
let data: MediaPoolConfig = config.lookup("pool", &name)?;
|
||||||
|
|
||||||
|
Ok(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
/// Deletable property name
|
||||||
|
pub enum DeletableProperty {
|
||||||
|
/// Delete media set allocation policy.
|
||||||
|
allocation,
|
||||||
|
/// Delete pool retention policy
|
||||||
|
retention,
|
||||||
|
/// Delete media set naming template
|
||||||
|
template,
|
||||||
|
/// Delete encryption fingerprint
|
||||||
|
encrypt,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
allocation: {
|
||||||
|
schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
retention: {
|
||||||
|
schema: MEDIA_RETENTION_POLICY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
template: {
|
||||||
|
schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
encrypt: {
|
||||||
|
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
delete: {
|
||||||
|
description: "List of properties to delete.",
|
||||||
|
type: Array,
|
||||||
|
optional: true,
|
||||||
|
items: {
|
||||||
|
type: DeletableProperty,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update media pool settings
|
||||||
|
pub fn update_pool(
|
||||||
|
name: String,
|
||||||
|
allocation: Option<String>,
|
||||||
|
retention: Option<String>,
|
||||||
|
template: Option<String>,
|
||||||
|
encrypt: Option<String>,
|
||||||
|
delete: Option<Vec<DeletableProperty>>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::media_pool::lock()?;
|
||||||
|
|
||||||
|
let (mut config, _digest) = config::media_pool::config()?;
|
||||||
|
|
||||||
|
let mut data: MediaPoolConfig = config.lookup("pool", &name)?;
|
||||||
|
|
||||||
|
if let Some(delete) = delete {
|
||||||
|
for delete_prop in delete {
|
||||||
|
match delete_prop {
|
||||||
|
DeletableProperty::allocation => { data.allocation = None; },
|
||||||
|
DeletableProperty::retention => { data.retention = None; },
|
||||||
|
DeletableProperty::template => { data.template = None; },
|
||||||
|
DeletableProperty::encrypt => { data.encrypt = None; },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if allocation.is_some() { data.allocation = allocation; }
|
||||||
|
if retention.is_some() { data.retention = retention; }
|
||||||
|
if template.is_some() { data.template = template; }
|
||||||
|
if encrypt.is_some() { data.encrypt = encrypt; }
|
||||||
|
|
||||||
|
config.set_data(&name, "pool", &data)?;
|
||||||
|
|
||||||
|
config::media_pool::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Delete a media pool configuration
|
||||||
|
pub fn delete_pool(name: String) -> Result<(), Error> {
|
||||||
|
|
||||||
|
let _lock = config::media_pool::lock()?;
|
||||||
|
|
||||||
|
let (mut config, _digest) = config::media_pool::config()?;
|
||||||
|
|
||||||
|
match config.sections.get(&name) {
|
||||||
|
Some(_) => { config.sections.remove(&name); },
|
||||||
|
None => bail!("delete pool '{}' failed - no such pool", name),
|
||||||
|
}
|
||||||
|
|
||||||
|
config::media_pool::save_config(&config)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
const ITEM_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_GET_CONFIG)
|
||||||
|
.put(&API_METHOD_UPDATE_POOL)
|
||||||
|
.delete(&API_METHOD_DELETE_POOL);
|
||||||
|
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_POOLS)
|
||||||
|
.post(&API_METHOD_CREATE_POOL)
|
||||||
|
.match_all("name", &ITEM_ROUTER);
|
@ -1,12 +1,14 @@
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use ::serde::{Deserialize, Serialize};
|
use ::serde::{Deserialize, Serialize};
|
||||||
use base64;
|
|
||||||
|
|
||||||
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment, Permission};
|
||||||
|
use proxmox::http_err;
|
||||||
use proxmox::tools::fs::open_file_locked;
|
use proxmox::tools::fs::open_file_locked;
|
||||||
|
|
||||||
use crate::api2::types::*;
|
use crate::api2::types::*;
|
||||||
|
use crate::client::{HttpClient, HttpClientOptions};
|
||||||
|
use crate::config::cached_user_info::CachedUserInfo;
|
||||||
use crate::config::remote;
|
use crate::config::remote;
|
||||||
use crate::config::acl::{PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY};
|
use crate::config::acl::{PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY};
|
||||||
|
|
||||||
@ -17,13 +19,11 @@ use crate::config::acl::{PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY};
|
|||||||
returns: {
|
returns: {
|
||||||
description: "The list of configured remotes (with config digest).",
|
description: "The list of configured remotes (with config digest).",
|
||||||
type: Array,
|
type: Array,
|
||||||
items: {
|
items: { type: remote::Remote },
|
||||||
type: remote::Remote,
|
|
||||||
description: "Remote configuration (without password).",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["remote"], PRIV_REMOTE_AUDIT, false),
|
description: "List configured remotes filtered by Remote.Audit privileges",
|
||||||
|
permission: &Permission::Anybody,
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// List all remotes
|
/// List all remotes
|
||||||
@ -32,16 +32,25 @@ pub fn list_remotes(
|
|||||||
_info: &ApiMethod,
|
_info: &ApiMethod,
|
||||||
mut rpcenv: &mut dyn RpcEnvironment,
|
mut rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Vec<remote::Remote>, Error> {
|
) -> Result<Vec<remote::Remote>, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let (config, digest) = remote::config()?;
|
let (config, digest) = remote::config()?;
|
||||||
|
|
||||||
let mut list: Vec<remote::Remote> = config.convert_to_typed_array("remote")?;
|
let mut list: Vec<remote::Remote> = config.convert_to_typed_array("remote")?;
|
||||||
|
|
||||||
// don't return password in api
|
// don't return password in api
|
||||||
for remote in &mut list {
|
for remote in &mut list {
|
||||||
remote.password = "".to_string();
|
remote.password = "".to_string();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let list = list
|
||||||
|
.into_iter()
|
||||||
|
.filter(|remote| {
|
||||||
|
let privs = user_info.lookup_privs(&auth_id, &["remote", &remote.name]);
|
||||||
|
privs & PRIV_REMOTE_AUDIT != 0
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
||||||
Ok(list)
|
Ok(list)
|
||||||
}
|
}
|
||||||
@ -66,8 +75,8 @@ pub fn list_remotes(
|
|||||||
optional: true,
|
optional: true,
|
||||||
default: 8007,
|
default: 8007,
|
||||||
},
|
},
|
||||||
userid: {
|
"auth-id": {
|
||||||
type: Userid,
|
type: Authid,
|
||||||
},
|
},
|
||||||
password: {
|
password: {
|
||||||
schema: remote::REMOTE_PASSWORD_SCHEMA,
|
schema: remote::REMOTE_PASSWORD_SCHEMA,
|
||||||
@ -87,13 +96,13 @@ pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let mut data = param.clone();
|
let mut data = param;
|
||||||
data["password"] = Value::from(base64::encode(password.as_bytes()));
|
data["password"] = Value::from(base64::encode(password.as_bytes()));
|
||||||
let remote: remote::Remote = serde_json::from_value(data)?;
|
let remote: remote::Remote = serde_json::from_value(data)?;
|
||||||
|
|
||||||
let (mut config, _digest) = remote::config()?;
|
let (mut config, _digest) = remote::config()?;
|
||||||
|
|
||||||
if let Some(_) = config.sections.get(&remote.name) {
|
if config.sections.get(&remote.name).is_some() {
|
||||||
bail!("remote '{}' already exists.", remote.name);
|
bail!("remote '{}' already exists.", remote.name);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -112,10 +121,7 @@ pub fn create_remote(password: String, param: Value) -> Result<(), Error> {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: { type: remote::Remote },
|
||||||
description: "The remote configuration (with config digest).",
|
|
||||||
type: remote::Remote,
|
|
||||||
},
|
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(&["remote", "{name}"], PRIV_REMOTE_AUDIT, false),
|
permission: &Permission::Privilege(&["remote", "{name}"], PRIV_REMOTE_AUDIT, false),
|
||||||
}
|
}
|
||||||
@ -166,9 +172,9 @@ pub enum DeletableProperty {
|
|||||||
type: u16,
|
type: u16,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
userid: {
|
"auth-id": {
|
||||||
optional: true,
|
optional: true,
|
||||||
type: Userid,
|
type: Authid,
|
||||||
},
|
},
|
||||||
password: {
|
password: {
|
||||||
optional: true,
|
optional: true,
|
||||||
@ -197,12 +203,13 @@ pub enum DeletableProperty {
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Update remote configuration.
|
/// Update remote configuration.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn update_remote(
|
pub fn update_remote(
|
||||||
name: String,
|
name: String,
|
||||||
comment: Option<String>,
|
comment: Option<String>,
|
||||||
host: Option<String>,
|
host: Option<String>,
|
||||||
port: Option<u16>,
|
port: Option<u16>,
|
||||||
userid: Option<Userid>,
|
auth_id: Option<Authid>,
|
||||||
password: Option<String>,
|
password: Option<String>,
|
||||||
fingerprint: Option<String>,
|
fingerprint: Option<String>,
|
||||||
delete: Option<Vec<DeletableProperty>>,
|
delete: Option<Vec<DeletableProperty>>,
|
||||||
@ -240,7 +247,7 @@ pub fn update_remote(
|
|||||||
}
|
}
|
||||||
if let Some(host) = host { data.host = host; }
|
if let Some(host) = host { data.host = host; }
|
||||||
if port.is_some() { data.port = port; }
|
if port.is_some() { data.port = port; }
|
||||||
if let Some(userid) = userid { data.userid = userid; }
|
if let Some(auth_id) = auth_id { data.auth_id = auth_id; }
|
||||||
if let Some(password) = password { data.password = password; }
|
if let Some(password) = password { data.password = password; }
|
||||||
|
|
||||||
if let Some(fingerprint) = fingerprint { data.fingerprint = Some(fingerprint); }
|
if let Some(fingerprint) = fingerprint { data.fingerprint = Some(fingerprint); }
|
||||||
@ -272,6 +279,17 @@ pub fn update_remote(
|
|||||||
/// Remove a remote from the configuration file.
|
/// Remove a remote from the configuration file.
|
||||||
pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
|
pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
|
||||||
|
|
||||||
|
use crate::config::sync::{self, SyncJobConfig};
|
||||||
|
|
||||||
|
let (sync_jobs, _) = sync::config()?;
|
||||||
|
|
||||||
|
let job_list: Vec<SyncJobConfig> = sync_jobs.convert_to_typed_array("sync")?;
|
||||||
|
for job in job_list {
|
||||||
|
if job.remote == name {
|
||||||
|
bail!("remote '{}' is used by sync job '{}' (datastore '{}')", name, job.id, job.store);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = remote::config()?;
|
let (mut config, expected_digest) = remote::config()?;
|
||||||
@ -291,10 +309,78 @@ pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error>
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Helper to get client for remote.cfg entry
|
||||||
|
pub async fn remote_client(remote: remote::Remote) -> Result<HttpClient, Error> {
|
||||||
|
let options = HttpClientOptions::new_non_interactive(remote.password.clone(), remote.fingerprint.clone());
|
||||||
|
|
||||||
|
let client = HttpClient::new(
|
||||||
|
&remote.host,
|
||||||
|
remote.port.unwrap_or(8007),
|
||||||
|
&remote.auth_id,
|
||||||
|
options)?;
|
||||||
|
let _auth_info = client.login() // make sure we can auth
|
||||||
|
.await
|
||||||
|
.map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.host, err))?;
|
||||||
|
|
||||||
|
Ok(client)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: REMOTE_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["remote", "{name}"], PRIV_REMOTE_AUDIT, false),
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "List the accessible datastores.",
|
||||||
|
type: Array,
|
||||||
|
items: { type: DataStoreListItem },
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List datastores of a remote.cfg entry
|
||||||
|
pub async fn scan_remote_datastores(name: String) -> Result<Vec<DataStoreListItem>, Error> {
|
||||||
|
let (remote_config, _digest) = remote::config()?;
|
||||||
|
let remote: remote::Remote = remote_config.lookup("remote", &name)?;
|
||||||
|
|
||||||
|
let map_remote_err = |api_err| {
|
||||||
|
http_err!(INTERNAL_SERVER_ERROR,
|
||||||
|
"failed to scan remote '{}' - {}",
|
||||||
|
&name,
|
||||||
|
api_err)
|
||||||
|
};
|
||||||
|
|
||||||
|
let client = remote_client(remote)
|
||||||
|
.await
|
||||||
|
.map_err(map_remote_err)?;
|
||||||
|
let api_res = client
|
||||||
|
.get("api2/json/admin/datastore", None)
|
||||||
|
.await
|
||||||
|
.map_err(map_remote_err)?;
|
||||||
|
let parse_res = match api_res.get("data") {
|
||||||
|
Some(data) => serde_json::from_value::<Vec<DataStoreListItem>>(data.to_owned()),
|
||||||
|
None => bail!("remote {} did not return any datastore list data", &name),
|
||||||
|
};
|
||||||
|
|
||||||
|
match parse_res {
|
||||||
|
Ok(parsed) => Ok(parsed),
|
||||||
|
Err(_) => bail!("Failed to parse remote scan api result."),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const SCAN_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_SCAN_REMOTE_DATASTORES);
|
||||||
|
|
||||||
const ITEM_ROUTER: Router = Router::new()
|
const ITEM_ROUTER: Router = Router::new()
|
||||||
.get(&API_METHOD_READ_REMOTE)
|
.get(&API_METHOD_READ_REMOTE)
|
||||||
.put(&API_METHOD_UPDATE_REMOTE)
|
.put(&API_METHOD_UPDATE_REMOTE)
|
||||||
.delete(&API_METHOD_DELETE_REMOTE);
|
.delete(&API_METHOD_DELETE_REMOTE)
|
||||||
|
.subdirs(&[("scan", &SCAN_ROUTER)]);
|
||||||
|
|
||||||
pub const ROUTER: Router = Router::new()
|
pub const ROUTER: Router = Router::new()
|
||||||
.get(&API_METHOD_LIST_REMOTES)
|
.get(&API_METHOD_LIST_REMOTES)
|
||||||
|