Compare commits
844 Commits
Author | SHA1 | Date | |
---|---|---|---|
497a7b3f8e | |||
71549afa3f | |||
a294588409 | |||
5a83930667 | |||
c25ea25f0a | |||
f7885eb263 | |||
a48d534d39 | |||
bfa942c0cf | |||
f54634a890 | |||
efb7c5348c | |||
d6fcc1170a | |||
3f742f952a | |||
84af82e8cf | |||
48109c5354 | |||
fd18775ac1 | |||
e678a50ea1 | |||
6523588c8d | |||
6fbf0acc76 | |||
36b7085ec2 | |||
1b1a553741 | |||
98b7d58b94 | |||
7fa9a37c7c | |||
f533d16ef6 | |||
778c7d954b | |||
605fe2e7e7 | |||
1b552c109d | |||
d4d49f7325 | |||
8bca935f08 | |||
fd6d243843 | |||
037f6b6d5e | |||
8eef31724f | |||
2de1b06a06 | |||
a332040a7f | |||
957133077f | |||
36c6e7bb82 | |||
ccc3896ff3 | |||
cef5c72682 | |||
51a2d9e375 | |||
048b43af24 | |||
bfd2b47649 | |||
67a5cf4714 | |||
6227654ad8 | |||
e384f16a19 | |||
89725197c0 | |||
e7d4be9d85 | |||
ba3d7e19fb | |||
b65dfff574 | |||
8cc3760e74 | |||
1cb08a0a05 | |||
6f4228809e | |||
5af3bcf062 | |||
67d00d5c0e | |||
cdc83c4eb2 | |||
ffa403b5fd | |||
5bd77f00e2 | |||
802189f7f5 | |||
a4e5a0fc9f | |||
58bfa3b19c | |||
f9c0a94140 | |||
e3619d4101 | |||
5839c469c1 | |||
bbdda58b35 | |||
ed2080762c | |||
45d5d873ce | |||
f46806414a | |||
ebf34e7edd | |||
aad2d162ab | |||
68149b9045 | |||
1ce8e905ea | |||
ccb3b45e18 | |||
6afdda8832 | |||
2121174827 | |||
df12c9ec4e | |||
4c1b776168 | |||
42dad3abd3 | |||
6c76aa434d | |||
e5f9b7f79e | |||
dd2162f6bd | |||
cabdabba3d | |||
3e593a2459 | |||
7c5287bb95 | |||
7c72ae04f1 | |||
86582454e8 | |||
013b1e8bca | |||
40ff84b138 | |||
b2065dc7d2 | |||
97dfc62f0d | |||
e351ac786d | |||
7b570c177d | |||
6838b75904 | |||
dbda1513c5 | |||
c62a6acb2e | |||
e4a5c072b4 | |||
80f950c05d | |||
4933b853cd | |||
aec1b91eb8 | |||
2e2d64fdba | |||
a37c8d2431 | |||
a8a20e9210 | |||
be5b468975 | |||
9789461363 | |||
9f58e312d7 | |||
cffe0b81e3 | |||
bb14ed8cab | |||
023adb5945 | |||
e5545c9804 | |||
efe96ec039 | |||
1d3ae83359 | |||
4bb3876352 | |||
400e90cfbe | |||
e16c289f50 | |||
140c159b36 | |||
8be69a8453 | |||
9ba4833f3c | |||
0b12a5a698 | |||
2eac359430 | |||
855b55dc14 | |||
5ad40a3dd1 | |||
7116a2d9da | |||
0d5e990a62 | |||
4f57f4ad84 | |||
13e13d836f | |||
3ab2432ab6 | |||
76e8565076 | |||
a5f30a562b | |||
a2ef36d445 | |||
9a1ecae0b7 | |||
42b010174e | |||
68e77657e6 | |||
1b2f851e42 | |||
cc99866ea3 | |||
1ea3f23f7e | |||
3f780ddf73 | |||
9edf96e6b6 | |||
73e1ba65ca | |||
02631056b8 | |||
131d0f10c2 | |||
f9aa980c7d | |||
ad0364c558 | |||
76486eb3d1 | |||
65ab4ca976 | |||
99a73fad15 | |||
16a01c19dd | |||
86b8ba448c | |||
9b8e8012a7 | |||
b29292a87b | |||
c1feb447e8 | |||
62a0e190cb | |||
5890143920 | |||
ef4df211ab | |||
eb5e0ae65a | |||
bbc71e3b02 | |||
ac81ed17b9 | |||
89145cde34 | |||
ef4b2c2470 | |||
7190cbf2ac | |||
f726e1e0ea | |||
6d81e65986 | |||
ba5f5083c3 | |||
314db4072c | |||
baff2324f3 | |||
02eae829f7 | |||
bb77143108 | |||
02cb5b5f80 | |||
a301c362e3 | |||
7526d86419 | |||
a00888e93f | |||
fc5870be53 | |||
3c8c2827cb | |||
6c221244df | |||
38629c3961 | |||
513d019ac3 | |||
3fa1b4b48c | |||
a6eac535e4 | |||
58a3fae773 | |||
0889806a3c | |||
51ec8a3c62 | |||
a12b1be728 | |||
4d04cd9ab9 | |||
a3399f4337 | |||
2b7f8dd5ea | |||
72fbe9ffa5 | |||
0be8bce718 | |||
4805edc4ec | |||
9eb784076c | |||
b9c5cd8291 | |||
9008c0c177 | |||
f027c2146e | |||
afbf2e10f3 | |||
9805207aa5 | |||
8e0b852f24 | |||
0052dc6d28 | |||
61f05679d2 | |||
9751ef4b36 | |||
0a240aaa9a | |||
e0665a64bd | |||
dc46aa9a00 | |||
ced694589d | |||
6c053ffc89 | |||
9f5b57a348 | |||
f1c4b8df34 | |||
269e274bb5 | |||
bfd357c5a1 | |||
9517a5759a | |||
a5d51b0c4f | |||
d9822cd3cb | |||
66501529a2 | |||
2072dede4a | |||
31c94d1645 | |||
9ee4c23833 | |||
a14a1c7b90 | |||
9ef88578af | |||
c4c4b5a3ef | |||
0ed40b19c7 | |||
a0cd0f9cec | |||
49e47c491b | |||
424d2d68d3 | |||
415690a0e7 | |||
2c0abe9234 | |||
2649c89358 | |||
bbd34d70d5 | |||
9779ad0b00 | |||
70fd0652a1 | |||
6b85671dd2 | |||
82bdf6b5e7 | |||
ba2679c9d7 | |||
8866cbccc8 | |||
b3477d286f | |||
68e2ea99ba | |||
d6688884f6 | |||
7d3482f5bf | |||
7a39b41c20 | |||
4672273fe6 | |||
01284de0b2 | |||
b20368ee1b | |||
e584593cb5 | |||
069a6e28a7 | |||
8fab19da73 | |||
991be99c37 | |||
1900d7810c | |||
6b5013edb3 | |||
f313494d48 | |||
353dcf1d13 | |||
3006d70ebe | |||
681e096448 | |||
ac9a9e8002 | |||
ecbc385b7b | |||
5117cf4f17 | |||
da7ec1d2af | |||
934de1d691 | |||
0c27d880b0 | |||
be3a0295b6 | |||
aa2838c27a | |||
ea584a7510 | |||
ba0ccc5991 | |||
75f83c6a81 | |||
0dda5a6695 | |||
289738dc1a | |||
d830804f02 | |||
82cc4b56e5 | |||
923f94a4d7 | |||
bbff317aa7 | |||
20429238e0 | |||
364299740f | |||
b81818b6ad | |||
2f02e431b0 | |||
e64f38cb6b | |||
ae24382634 | |||
82cae19d19 | |||
3f5fbc5620 | |||
000e6cad5c | |||
49f44cedbf | |||
eb1c59cc2a | |||
c7d032fc17 | |||
73b77d4787 | |||
67466ce564 | |||
4e0faf5ef3 | |||
c23192d34e | |||
83771aa037 | |||
95f9d67ce9 | |||
314d360fcd | |||
f8a74456cc | |||
4906bac10f | |||
86c831a5c3 | |||
a5951b4f38 | |||
f75292bd8d | |||
bfff4eaa7f | |||
067dc06dba | |||
18cdf20afc | |||
e57841c442 | |||
751f6b6148 | |||
3c430e9a55 | |||
155f657f6b | |||
86fb38776b | |||
f323e90602 | |||
770a36e53a | |||
d420962fbc | |||
01fd2447b2 | |||
85beb7d875 | |||
af06decd1b | |||
aceae32baa | |||
74a4f9efc9 | |||
fb1e7a86f4 | |||
dc99315cf9 | |||
34bd1109b0 | |||
13a2445744 | |||
c968da789e | |||
3f84541412 | |||
4d8bd03668 | |||
f9bd5e1691 | |||
ecd66ecaf6 | |||
33d7292f29 | |||
f4d371d2d2 | |||
835d0e5dd3 | |||
9a06eb1618 | |||
309e14ebb7 | |||
2d48533378 | |||
fffd6874e6 | |||
0ddd48f0b5 | |||
cb590dbc07 | |||
6c4f762c49 | |||
7a0afee391 | |||
0dda883994 | |||
c2e2078b3f | |||
26a3450f19 | |||
324c069848 | |||
bd4c5607ca | |||
e1d85f1840 | |||
1ce1a5e5cc | |||
6f66a0ca71 | |||
62a5b3907b | |||
85b6c4ead4 | |||
a190979c04 | |||
4a489ae3de | |||
9ac8b73e07 | |||
414be8b675 | |||
fda19dcc6f | |||
cd975e5787 | |||
3b7b1dfb8e | |||
d8a47ec649 | |||
252cd3b781 | |||
0decd11efb | |||
b84d2592fb | |||
0219ba2cc5 | |||
bbff6c4968 | |||
bb88c6a29d | |||
a02466966d | |||
b0fc11804e | |||
d9d81741e3 | |||
9678366102 | |||
a2c73c78dd | |||
c6a0e7d98e | |||
85417b2a88 | |||
d738669066 | |||
442d6da8fb | |||
62f10a01db | |||
5667b76381 | |||
d9b318a444 | |||
86ce56f193 | |||
8d72c2c32e | |||
c48c38ab8c | |||
3d3769830b | |||
4921a411ad | |||
81c767efce | |||
60abf03f05 | |||
dcbf29e71b | |||
037e6c0ca8 | |||
c7024b282a | |||
90ff75f85c | |||
2165f0d450 | |||
1e7639bfc4 | |||
4121628d99 | |||
da78b90f9c | |||
1ef6e8b6a7 | |||
10351f7075 | |||
70a152deb7 | |||
5446bfbba8 | |||
400885e620 | |||
f960fc3b6f | |||
ddfa4d679a | |||
10e8026786 | |||
2527c039df | |||
93d8a2044e | |||
d2354a16cd | |||
34ee1f1c76 | |||
2de4dc3a81 | |||
b90036dadd | |||
4708f4fc21 | |||
062cf75cdf | |||
e5950360ca | |||
5b358ff0b1 | |||
4c00391d78 | |||
9594362e35 | |||
3420029b5e | |||
f432a1c927 | |||
e8b32f2d87 | |||
3e3b505cc8 | |||
0bca966ec5 | |||
84737fb33f | |||
e21a15ab17 | |||
90066d22a0 | |||
dbf5dad1c4 | |||
c793da1edc | |||
f8735e5988 | |||
e9805b2486 | |||
eb90405a78 | |||
ecf5f468c3 | |||
51aee8cac8 | |||
7d5049c350 | |||
01a99f5651 | |||
2914e99ff3 | |||
f9b824ac30 | |||
9a535ec77b | |||
ffba023c91 | |||
e01689978e | |||
68ac8976eb | |||
afb790db73 | |||
0732de361a | |||
d455270fa1 | |||
1336be16c9 | |||
03380db560 | |||
927ebc702c | |||
c24cb13382 | |||
3a804a8a20 | |||
1fde4167ea | |||
75f9f40922 | |||
e9c2638f90 | |||
338c545f85 | |||
e379b4a31c | |||
3d7ca2bdb9 | |||
d34019e246 | |||
7cb2ebba79 | |||
4e8581950e | |||
2a9a3d632e | |||
b6d07fa038 | |||
4599e7959c | |||
82ed13c7d7 | |||
5aaa81ab89 | |||
8a06d1935e | |||
f44254b4bd | |||
07875ce13e | |||
98dc770efa | |||
8848f1d487 | |||
5128ae48a0 | |||
104ae6093a | |||
e830d63f6a | |||
ce32cd487a | |||
f36c659365 | |||
47e5cbdb03 | |||
4923a76f22 | |||
e01ca6a2dd | |||
5e989333cd | |||
af39c399bc | |||
64591e731e | |||
5658504b90 | |||
64e0786aa9 | |||
90761f0f62 | |||
74f74d1e64 | |||
4db4b9706c | |||
00a5072ad3 | |||
3d3d698bb3 | |||
1b9521bb87 | |||
1d781c5b20 | |||
8e8836d1ea | |||
a904e3755d | |||
7ba99fef86 | |||
7d2be91bc9 | |||
578895336a | |||
8c090937f5 | |||
4229633d98 | |||
3ed7e87538 | |||
5b43cc4487 | |||
3241392117 | |||
c474a66b41 | |||
b32cf6a1e0 | |||
f32791b4b2 | |||
8f33fe8e59 | |||
d19010481d | |||
6b11524a8b | |||
e953029e8f | |||
10f788b7eb | |||
9348544e46 | |||
126ccbcfa6 | |||
440472cb32 | |||
4ce7da516d | |||
a7f8efcf35 | |||
9fe4c79005 | |||
f09f4d5fd5 | |||
38b4f9b534 | |||
fca1cef29f | |||
45b8a0327f | |||
a723c08715 | |||
c381a162fb | |||
b4931192c3 | |||
cc269b9ff9 | |||
a5e3be4992 | |||
137309cc4e | |||
85f4e834d8 | |||
065013ccec | |||
56d98ba966 | |||
dda1b4fa44 | |||
68b102269f | |||
0ecdaa0dc0 | |||
13f435caab | |||
ff99780303 | |||
fa9507020a | |||
1bff50afea | |||
37ff72720b | |||
2d5d264f99 | |||
c9c07445b7 | |||
a4388ffc36 | |||
ea1458923e | |||
e857f1fae8 | |||
3ec42e81b1 | |||
be1163acfe | |||
d308dc8af7 | |||
60643023ad | |||
875d53ef6c | |||
b41f9e9fec | |||
a1b71c3c7d | |||
013fa2d886 | |||
72e311c6b2 | |||
2732c47466 | |||
0466089316 | |||
5e42d38598 | |||
82a4bb5e80 | |||
94bc7957c1 | |||
c9e6b07145 | |||
3c06eba17a | |||
8081e4aa7b | |||
d8769d659e | |||
572cd0381b | |||
5e91b40087 | |||
936eceda61 | |||
61c4087041 | |||
7d39e47182 | |||
c4e1af3069 | |||
3e234af16e | |||
bbbf662d20 | |||
25d78b1068 | |||
78bf292343 | |||
e5ef69ecf7 | |||
b7b9a57425 | |||
c4a04b7c62 | |||
2e41dbe828 | |||
56d36ca439 | |||
e0ba5553be | |||
8d6fb677c1 | |||
a2daecc25d | |||
ee0c5c8e01 | |||
ae5b1e188f | |||
49f9aca627 | |||
4cba875379 | |||
7ab4382476 | |||
eaef6c8d00 | |||
95f3692545 | |||
686173dc2a | |||
39c5db7f0f | |||
603aa09d54 | |||
88aa3076f0 | |||
5400fe171c | |||
87bf9f569f | |||
8fb24a2c0a | |||
4b5d9b6e64 | |||
72bd8293e3 | |||
09989d9963 | |||
4088d5bc62 | |||
d4b84c1dec | |||
426847e1ce | |||
79b902d512 | |||
73c607497e | |||
f2f526b61d | |||
cb67ecaddb | |||
5bf9b0b0bb | |||
7a61f89e5a | |||
671c6a96e7 | |||
f0d23e5370 | |||
d1bee4344d | |||
d724116c0c | |||
888d89e2dd | |||
a6471bc346 | |||
6b1da1c166 | |||
18210d8958 | |||
bc5c1a9aa6 | |||
3df77ef5da | |||
e8d9d9adfa | |||
01d152720f | |||
5e58381ea9 | |||
0b6d9442bd | |||
134ed9e14f | |||
0796b642de | |||
f912ba6a3e | |||
a576e6685b | |||
b1c793cfa5 | |||
c0147e49c4 | |||
d52b120905 | |||
84c8a580b5 | |||
467bd01cdf | |||
7a7fcb4715 | |||
cf8e44bc30 | |||
279e7eb497 | |||
606828cc65 | |||
aac424674c | |||
8fd1e10830 | |||
12509a6d9e | |||
5e169f387c | |||
8369ade880 | |||
73cef112eb | |||
4a0132382a | |||
6ee69fccd3 | |||
a862835be2 | |||
ddbd63ed5f | |||
6a59fa0e18 | |||
1ed9069ad3 | |||
a588b67906 | |||
37a634f550 | |||
951fe0cb7d | |||
4ca3f0c6ae | |||
69e5ba29c4 | |||
e045d154e9 | |||
6526709d48 | |||
603f80d813 | |||
398636b61c | |||
eb70464839 | |||
75054859ff | |||
8e898895cc | |||
4be6beab6f | |||
a3b4b5b50e | |||
33b8d7e5e8 | |||
f2f43e1904 | |||
c002d48b0c | |||
15998ed12a | |||
9d8ab62769 | |||
3526a76ef3 | |||
b9e0fcbdcd | |||
a7188b3a75 | |||
b6c06dce9d | |||
4adf47b606 | |||
4d0dc29951 | |||
1011fb552b | |||
2fd2d29281 | |||
9104152a83 | |||
02a58862dd | |||
26153589ba | |||
17b3e4451f | |||
a2072cc346 | |||
fea23d0323 | |||
71e83e1b1f | |||
28570d19a6 | |||
1369bcdbba | |||
5e4d81e957 | |||
0f4721f305 | |||
5547f90ba7 | |||
2e1b63fb25 | |||
7b2d3a5fe9 | |||
0216f56241 | |||
80acdd71fa | |||
26af61debc | |||
e7f94010d3 | |||
a4e871f52c | |||
bc3072ef7a | |||
f4bb2510b9 | |||
2ab12cd0cb | |||
c894909e17 | |||
7f394c807b | |||
7afb98a912 | |||
3847008e1b | |||
f6ed2eff47 | |||
23eed6755a | |||
384a2b4d4f | |||
910177a388 | |||
54311a38c6 | |||
983edbc54a | |||
10439718e2 | |||
ebddccef5f | |||
9cfe0ff350 | |||
295bae14b7 | |||
53939bb438 | |||
329c2cbe66 | |||
55334cf45a | |||
a2e30cd51d | |||
4bf2ab1109 | |||
1dd1c9eb5c | |||
6dde015f8c | |||
5f3b2330c8 | |||
4ba5d3b3dd | |||
e7e3d7360a | |||
fd8b00aed7 | |||
2631e57d20 | |||
90461b76fb | |||
629103d60c | |||
dc232b8946 | |||
6fed819dc2 | |||
646fc7f086 | |||
ecc5602c88 | |||
6a15cce540 | |||
f281b8d3a9 | |||
4465b76812 | |||
61df02cda1 | |||
3b0321365b | |||
0dfce17a43 | |||
a38dccf0e8 | |||
f05085ab22 | |||
bc42bb3c6e | |||
94b7f56e65 | |||
0417e9af1b | |||
ce5327badc | |||
368f4c5416 | |||
318b310638 | |||
164ad7b706 | |||
a5322f3c50 | |||
fa29d7eb49 | |||
a21f9852fd | |||
79e2473c63 | |||
375b1f6150 | |||
109ccd300f | |||
c287b28725 | |||
c560cfddca | |||
44f6bb019c | |||
d6d42702d1 | |||
3fafd0e2a1 | |||
59648eac3d | |||
5b6b5bba68 | |||
b13089cdf5 | |||
1f03196c0b | |||
edf0940649 | |||
801ec1dbf9 | |||
34ac5cd889 | |||
58421ec112 | |||
a5bdc987dc | |||
d32a8652bd | |||
a26ebad5f9 | |||
dd9cef56fc | |||
26858dba84 | |||
9fe3358ce6 | |||
76425d84b3 | |||
42355b11a4 | |||
511e4f6987 | |||
3f0e344bc1 | |||
a316178768 | |||
dff8ea92aa | |||
88e1f7997c | |||
4c3eabeaf3 | |||
4c7be5f59d | |||
6d4fbbc3ea | |||
1a23132262 | |||
48c4193f7c | |||
8204d9b095 | |||
fad95a334a | |||
973e985d73 | |||
e5a13382b2 | |||
81c0b90447 | |||
ee9fa953de | |||
09acf0a70d | |||
15d1435789 | |||
80ea23e1b9 | |||
5d6379f8db | |||
566b946f9b | |||
7f7459677d | |||
0892a512bc | |||
b717871d2a | |||
7b11a8098d | |||
8b2c6f5dbc | |||
d26985a600 | |||
e29f456efc | |||
a79082a0dd | |||
1336ae8249 | |||
0db5712493 | |||
c47609fedb | |||
b84e8aaee9 | |||
d84e4073af | |||
e8656da70d | |||
59477ad252 | |||
2f29f1c765 | |||
4d84e869bf | |||
79d841014e | |||
ea62611d8e | |||
f3c867a034 | |||
aae5db916e | |||
a417c8a93e | |||
79e58a903e | |||
9f40e09d0a | |||
553e57f914 | |||
2200a38671 | |||
ba39ab20fb | |||
ff8945fd2f | |||
4876393562 | |||
971bc6f94b | |||
cab92acb3c | |||
a1d90719e4 | |||
eeff085d9d | |||
d43c407a00 | |||
6bc87d3952 | |||
04c1c68f31 | |||
94b17c804a | |||
94352256b7 | |||
b3bed7e41f | |||
a4672dd0b1 | |||
17bbcb57d7 | |||
843146479a | |||
cf1e117fc7 | |||
03eac20b87 | |||
11f5d59396 | |||
6f63c29306 | |||
c0e365fd49 | |||
93fb2e0d21 | |||
c553407e98 | |||
4830de408b | |||
7f78528308 | |||
2843ba9017 | |||
e244b9d03d | |||
657c47db35 | |||
a32bb86df9 | |||
654c56e05d | |||
589c4dad9e | |||
0320deb0a9 | |||
4c4e5c2b1e | |||
924373d2df | |||
3b60b5098f | |||
4abb3edd9f | |||
932e69a837 | |||
ef6d49670b | |||
52ea00e9df | |||
870681013a | |||
c046739461 | |||
8b1289f3e4 | |||
f1d76ecf6c | |||
074503f288 | |||
c6f55139f8 | |||
20cc25d749 | |||
30316192b3 | |||
e93263be1e | |||
2ab2ca9c24 | |||
54fcb7f5d8 | |||
4abd4dbe38 | |||
eac1beef3c | |||
166a48f903 | |||
82775c4764 | |||
88bc9635aa | |||
1037f2bc2d | |||
f24cbee77d | |||
25b4d52dce | |||
2729d134bd | |||
32b75d36a8 |
69
Cargo.toml
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
version = "1.0.11"
|
version = "2.0.10"
|
||||||
authors = [
|
authors = [
|
||||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||||
@ -18,6 +18,26 @@ homepage = "https://www.proxmox.com"
|
|||||||
|
|
||||||
exclude = [ "build", "debian", "tests/catar_data/test_symlink/symlink1"]
|
exclude = [ "build", "debian", "tests/catar_data/test_symlink/symlink1"]
|
||||||
|
|
||||||
|
[workspace]
|
||||||
|
members = [
|
||||||
|
"pbs-buildcfg",
|
||||||
|
"pbs-client",
|
||||||
|
"pbs-config",
|
||||||
|
"pbs-datastore",
|
||||||
|
"pbs-fuse-loop",
|
||||||
|
"pbs-runtime",
|
||||||
|
"proxmox-rest-server",
|
||||||
|
"proxmox-systemd",
|
||||||
|
"pbs-tape",
|
||||||
|
"pbs-tools",
|
||||||
|
|
||||||
|
"proxmox-backup-banner",
|
||||||
|
"proxmox-backup-client",
|
||||||
|
"proxmox-file-restore",
|
||||||
|
"proxmox-restore-daemon",
|
||||||
|
"pxar-bin",
|
||||||
|
]
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
name = "proxmox_backup"
|
name = "proxmox_backup"
|
||||||
path = "src/lib.rs"
|
path = "src/lib.rs"
|
||||||
@ -29,7 +49,10 @@ bitflags = "1.2.1"
|
|||||||
bytes = "1.0"
|
bytes = "1.0"
|
||||||
crc32fast = "1"
|
crc32fast = "1"
|
||||||
endian_trait = { version = "0.6", features = ["arrays"] }
|
endian_trait = { version = "0.6", features = ["arrays"] }
|
||||||
|
env_logger = "0.7"
|
||||||
|
flate2 = "1.0"
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
|
thiserror = "1.0"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
h2 = { version = "0.3", features = [ "stream" ] }
|
h2 = { version = "0.3", features = [ "stream" ] }
|
||||||
handlebars = "3.0"
|
handlebars = "3.0"
|
||||||
@ -45,25 +68,16 @@ openssl = "0.10"
|
|||||||
pam = "0.7"
|
pam = "0.7"
|
||||||
pam-sys = "0.5"
|
pam-sys = "0.5"
|
||||||
percent-encoding = "2.1"
|
percent-encoding = "2.1"
|
||||||
pin-utils = "0.1.0"
|
|
||||||
pin-project = "1.0"
|
|
||||||
pathpatterns = "0.1.2"
|
|
||||||
proxmox = { version = "0.11.0", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
|
||||||
#proxmox = { git = "git://git.proxmox.com/git/proxmox", version = "0.1.2", features = [ "sortable-macro", "api-macro" ] }
|
|
||||||
#proxmox = { path = "../proxmox/proxmox", features = [ "sortable-macro", "api-macro", "websocket" ] }
|
|
||||||
proxmox-fuse = "0.1.1"
|
|
||||||
pxar = { version = "0.10.0", features = [ "tokio-io" ] }
|
|
||||||
#pxar = { path = "../pxar", features = [ "tokio-io" ] }
|
|
||||||
regex = "1.2"
|
regex = "1.2"
|
||||||
rustyline = "7"
|
rustyline = "7"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
siphasher = "0.3"
|
siphasher = "0.3"
|
||||||
syslog = "4.0"
|
syslog = "4.0"
|
||||||
tokio = { version = "1.0", features = [ "fs", "io-util", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
|
tokio = { version = "1.6", features = [ "fs", "io-util", "io-std", "macros", "net", "parking_lot", "process", "rt", "rt-multi-thread", "signal", "time" ] }
|
||||||
tokio-openssl = "0.6.1"
|
tokio-openssl = "0.6.1"
|
||||||
tokio-stream = "0.1.0"
|
tokio-stream = "0.1.0"
|
||||||
tokio-util = { version = "0.6", features = [ "codec" ] }
|
tokio-util = { version = "0.6", features = [ "codec", "io" ] }
|
||||||
tower-service = "0.3.0"
|
tower-service = "0.3.0"
|
||||||
udev = ">= 0.3, <0.5"
|
udev = ">= 0.3, <0.5"
|
||||||
url = "2.1"
|
url = "2.1"
|
||||||
@ -71,10 +85,39 @@ url = "2.1"
|
|||||||
walkdir = "2"
|
walkdir = "2"
|
||||||
webauthn-rs = "0.2.5"
|
webauthn-rs = "0.2.5"
|
||||||
xdg = "2.2"
|
xdg = "2.2"
|
||||||
zstd = { version = "0.4", features = [ "bindgen" ] }
|
|
||||||
nom = "5.1"
|
nom = "5.1"
|
||||||
crossbeam-channel = "0.5"
|
crossbeam-channel = "0.5"
|
||||||
|
|
||||||
|
# Used only by examples currently:
|
||||||
|
zstd = { version = "0.6", features = [ "bindgen" ] }
|
||||||
|
|
||||||
|
pathpatterns = "0.1.2"
|
||||||
|
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
||||||
|
|
||||||
|
proxmox = { version = "0.13.3", features = [ "sortable-macro", "api-macro", "cli", "router", "tfa" ] }
|
||||||
|
proxmox-acme-rs = "0.2.1"
|
||||||
|
proxmox-apt = "0.7.0"
|
||||||
|
proxmox-http = { version = "0.4.0", features = [ "client", "http-helpers", "websocket" ] }
|
||||||
|
proxmox-openid = "0.7.0"
|
||||||
|
|
||||||
|
pbs-api-types = { path = "pbs-api-types" }
|
||||||
|
pbs-buildcfg = { path = "pbs-buildcfg" }
|
||||||
|
pbs-client = { path = "pbs-client" }
|
||||||
|
pbs-config = { path = "pbs-config" }
|
||||||
|
pbs-datastore = { path = "pbs-datastore" }
|
||||||
|
pbs-runtime = { path = "pbs-runtime" }
|
||||||
|
proxmox-rest-server = { path = "proxmox-rest-server" }
|
||||||
|
proxmox-systemd = { path = "proxmox-systemd" }
|
||||||
|
pbs-tools = { path = "pbs-tools" }
|
||||||
|
pbs-tape = { path = "pbs-tape" }
|
||||||
|
|
||||||
|
# Local path overrides
|
||||||
|
# NOTE: You must run `cargo update` after changing this for it to take effect!
|
||||||
|
[patch.crates-io]
|
||||||
|
#proxmox = { path = "../proxmox/proxmox" }
|
||||||
|
#proxmox-http = { path = "../proxmox/proxmox-http" }
|
||||||
|
#pxar = { path = "../pxar" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
#valgrind = ["valgrind_request"]
|
#valgrind = ["valgrind_request"]
|
||||||
|
126
Makefile
@ -9,6 +9,7 @@ SUBDIRS := etc www docs
|
|||||||
# Binaries usable by users
|
# Binaries usable by users
|
||||||
USR_BIN := \
|
USR_BIN := \
|
||||||
proxmox-backup-client \
|
proxmox-backup-client \
|
||||||
|
proxmox-file-restore \
|
||||||
pxar \
|
pxar \
|
||||||
proxmox-tape \
|
proxmox-tape \
|
||||||
pmtx \
|
pmtx \
|
||||||
@ -16,7 +17,8 @@ USR_BIN := \
|
|||||||
|
|
||||||
# Binaries usable by admins
|
# Binaries usable by admins
|
||||||
USR_SBIN := \
|
USR_SBIN := \
|
||||||
proxmox-backup-manager
|
proxmox-backup-manager \
|
||||||
|
proxmox-backup-debug \
|
||||||
|
|
||||||
# Binaries for services:
|
# Binaries for services:
|
||||||
SERVICE_BIN := \
|
SERVICE_BIN := \
|
||||||
@ -25,6 +27,28 @@ SERVICE_BIN := \
|
|||||||
proxmox-backup-proxy \
|
proxmox-backup-proxy \
|
||||||
proxmox-daily-update
|
proxmox-daily-update
|
||||||
|
|
||||||
|
# Single file restore daemon
|
||||||
|
RESTORE_BIN := \
|
||||||
|
proxmox-restore-daemon
|
||||||
|
|
||||||
|
SUBCRATES := \
|
||||||
|
pbs-api-types \
|
||||||
|
pbs-buildcfg \
|
||||||
|
pbs-client \
|
||||||
|
pbs-config \
|
||||||
|
pbs-datastore \
|
||||||
|
pbs-fuse-loop \
|
||||||
|
pbs-runtime \
|
||||||
|
proxmox-rest-server \
|
||||||
|
proxmox-systemd \
|
||||||
|
pbs-tape \
|
||||||
|
pbs-tools \
|
||||||
|
proxmox-backup-banner \
|
||||||
|
proxmox-backup-client \
|
||||||
|
proxmox-file-restore \
|
||||||
|
proxmox-restore-daemon \
|
||||||
|
pxar-bin
|
||||||
|
|
||||||
ifeq ($(BUILD_MODE), release)
|
ifeq ($(BUILD_MODE), release)
|
||||||
CARGO_BUILD_ARGS += --release
|
CARGO_BUILD_ARGS += --release
|
||||||
COMPILEDIR := target/release
|
COMPILEDIR := target/release
|
||||||
@ -39,7 +63,7 @@ endif
|
|||||||
CARGO ?= cargo
|
CARGO ?= cargo
|
||||||
|
|
||||||
COMPILED_BINS := \
|
COMPILED_BINS := \
|
||||||
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN))
|
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN))
|
||||||
|
|
||||||
export DEB_VERSION DEB_VERSION_UPSTREAM
|
export DEB_VERSION DEB_VERSION_UPSTREAM
|
||||||
|
|
||||||
@ -47,15 +71,20 @@ SERVER_DEB=${PACKAGE}-server_${DEB_VERSION}_${ARCH}.deb
|
|||||||
SERVER_DBG_DEB=${PACKAGE}-server-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
SERVER_DBG_DEB=${PACKAGE}-server-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||||
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
CLIENT_DEB=${PACKAGE}-client_${DEB_VERSION}_${ARCH}.deb
|
||||||
CLIENT_DBG_DEB=${PACKAGE}-client-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
CLIENT_DBG_DEB=${PACKAGE}-client-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||||
|
RESTORE_DEB=proxmox-backup-file-restore_${DEB_VERSION}_${ARCH}.deb
|
||||||
|
RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_${DEB_VERSION}_${ARCH}.deb
|
||||||
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
DOC_DEB=${PACKAGE}-docs_${DEB_VERSION}_all.deb
|
||||||
|
|
||||||
DEBS=${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB}
|
DEBS=${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB} \
|
||||||
|
${RESTORE_DEB} ${RESTORE_DBG_DEB} ${DEBUG_DEB} ${DEBUG_DBG_DEB}
|
||||||
|
|
||||||
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
|
DSC = rust-${PACKAGE}_${DEB_VERSION}.dsc
|
||||||
|
|
||||||
DESTDIR=
|
DESTDIR=
|
||||||
|
|
||||||
all: cargo-build $(SUBDIRS)
|
tests ?= --workspace
|
||||||
|
|
||||||
|
all: $(SUBDIRS)
|
||||||
|
|
||||||
.PHONY: $(SUBDIRS)
|
.PHONY: $(SUBDIRS)
|
||||||
$(SUBDIRS):
|
$(SUBDIRS):
|
||||||
@ -67,19 +96,23 @@ test:
|
|||||||
$(CARGO) test $(tests) $(CARGO_BUILD_ARGS)
|
$(CARGO) test $(tests) $(CARGO_BUILD_ARGS)
|
||||||
|
|
||||||
doc:
|
doc:
|
||||||
$(CARGO) doc --no-deps $(CARGO_BUILD_ARGS)
|
$(CARGO) doc --workspace --no-deps $(CARGO_BUILD_ARGS)
|
||||||
|
|
||||||
# always re-create this dir
|
# always re-create this dir
|
||||||
.PHONY: build
|
.PHONY: build
|
||||||
build:
|
build:
|
||||||
|
@echo "Setting pkg-buildcfg version to: $(DEB_VERSION_UPSTREAM)"
|
||||||
|
sed -i -e 's/^version =.*$$/version = "$(DEB_VERSION_UPSTREAM)"/' \
|
||||||
|
pbs-buildcfg/Cargo.toml
|
||||||
rm -rf build
|
rm -rf build
|
||||||
rm -f debian/control
|
mkdir build
|
||||||
debcargo package --config debian/debcargo.toml --changelog-ready --no-overlay-write-back --directory build proxmox-backup $(shell dpkg-parsechangelog -l debian/changelog -SVersion | sed -e 's/-.*//')
|
cp -a debian \
|
||||||
sed -e '1,/^$$/ ! d' build/debian/control > build/debian/control.src
|
Cargo.toml src \
|
||||||
cat build/debian/control.src build/debian/control.in > build/debian/control
|
$(SUBCRATES) \
|
||||||
rm build/debian/control.in build/debian/control.src
|
docs etc examples tests www zsh-completions \
|
||||||
cp build/debian/control debian/control
|
defines.mk Makefile \
|
||||||
rm build/Cargo.lock
|
./build/
|
||||||
|
rm -f build/Cargo.lock
|
||||||
find build/debian -name "*.hint" -delete
|
find build/debian -name "*.hint" -delete
|
||||||
$(foreach i,$(SUBDIRS), \
|
$(foreach i,$(SUBDIRS), \
|
||||||
$(MAKE) -C build/$(i) clean ;)
|
$(MAKE) -C build/$(i) clean ;)
|
||||||
@ -99,7 +132,9 @@ deb: build
|
|||||||
lintian $(DEBS)
|
lintian $(DEBS)
|
||||||
|
|
||||||
.PHONY: deb-all
|
.PHONY: deb-all
|
||||||
deb-all: $(DOC_DEB) $(DEBS)
|
deb-all: build
|
||||||
|
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean
|
||||||
|
lintian $(DEBS) $(DOC_DEB)
|
||||||
|
|
||||||
.PHONY: dsc
|
.PHONY: dsc
|
||||||
dsc: $(DSC)
|
dsc: $(DSC)
|
||||||
@ -107,27 +142,61 @@ $(DSC): build
|
|||||||
cd build; dpkg-buildpackage -S -us -uc -d -nc
|
cd build; dpkg-buildpackage -S -us -uc -d -nc
|
||||||
lintian $(DSC)
|
lintian $(DSC)
|
||||||
|
|
||||||
|
.PHONY: clean distclean deb clean
|
||||||
distclean: clean
|
distclean: clean
|
||||||
|
clean: clean-deb
|
||||||
clean:
|
|
||||||
$(foreach i,$(SUBDIRS), \
|
$(foreach i,$(SUBDIRS), \
|
||||||
$(MAKE) -C $(i) clean ;)
|
$(MAKE) -C $(i) clean ;)
|
||||||
$(CARGO) clean
|
$(CARGO) clean
|
||||||
rm -rf *.deb *.dsc *.tar.gz *.buildinfo *.changes build
|
rm -f .do-cargo-build
|
||||||
find . -name '*~' -exec rm {} ';'
|
find . -name '*~' -exec rm {} ';'
|
||||||
|
|
||||||
|
# allows one to avoid running cargo clean when one just wants to tidy up after a packgae build
|
||||||
|
clean-deb:
|
||||||
|
rm -rf *.deb *.dsc *.tar.gz *.buildinfo *.changes build/
|
||||||
|
|
||||||
.PHONY: dinstall
|
.PHONY: dinstall
|
||||||
dinstall: ${DEBS}
|
dinstall: ${SERVER_DEB} ${SERVER_DBG_DEB} ${CLIENT_DEB} ${CLIENT_DBG_DEB} \
|
||||||
dpkg -i ${DEBS}
|
${DEBUG_DEB} ${DEBUG_DBG_DEB}
|
||||||
|
dpkg -i $^
|
||||||
|
|
||||||
# make sure we build binaries before docs
|
# make sure we build binaries before docs
|
||||||
docs: cargo-build
|
docs: $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen
|
||||||
|
|
||||||
.PHONY: cargo-build
|
.PHONY: cargo-build
|
||||||
cargo-build:
|
cargo-build:
|
||||||
$(CARGO) build $(CARGO_BUILD_ARGS)
|
rm -f .do-cargo-build
|
||||||
|
$(MAKE) $(COMPILED_BINS)
|
||||||
|
|
||||||
|
$(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-cargo-build
|
||||||
|
.do-cargo-build:
|
||||||
|
$(CARGO) build $(CARGO_BUILD_ARGS) \
|
||||||
|
--bin proxmox-backup-api \
|
||||||
|
--bin proxmox-backup-proxy \
|
||||||
|
--bin proxmox-backup-manager \
|
||||||
|
--bin docgen \
|
||||||
|
--package proxmox-backup-banner \
|
||||||
|
--bin proxmox-backup-banner \
|
||||||
|
--package proxmox-backup-client \
|
||||||
|
--bin proxmox-backup-client \
|
||||||
|
--bin proxmox-backup-debug \
|
||||||
|
--package proxmox-file-restore \
|
||||||
|
--bin proxmox-file-restore \
|
||||||
|
--package pxar-bin \
|
||||||
|
--bin pxar \
|
||||||
|
--package pbs-tape \
|
||||||
|
--bin pmt \
|
||||||
|
--bin pmtx \
|
||||||
|
--package proxmox-restore-daemon \
|
||||||
|
--bin proxmox-restore-daemon \
|
||||||
|
--package proxmox-backup \
|
||||||
|
--bin dump-catalog-shell-cli \
|
||||||
|
--bin proxmox-daily-update \
|
||||||
|
--bin proxmox-file-restore \
|
||||||
|
--bin proxmox-tape \
|
||||||
|
--bin sg-tape-cmd
|
||||||
|
touch "$@"
|
||||||
|
|
||||||
$(COMPILED_BINS): cargo-build
|
|
||||||
|
|
||||||
.PHONY: lint
|
.PHONY: lint
|
||||||
lint:
|
lint:
|
||||||
@ -144,16 +213,25 @@ install: $(COMPILED_BINS)
|
|||||||
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(SBINDIR)/ ; \
|
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(SBINDIR)/ ; \
|
||||||
install -m644 zsh-completions/_$(i) $(DESTDIR)$(ZSH_COMPL_DEST)/ ;)
|
install -m644 zsh-completions/_$(i) $(DESTDIR)$(ZSH_COMPL_DEST)/ ;)
|
||||||
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup
|
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup
|
||||||
|
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/file-restore
|
||||||
|
$(foreach i,$(RESTORE_BIN), \
|
||||||
|
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/file-restore/ ;)
|
||||||
# install sg-tape-cmd as setuid binary
|
# install sg-tape-cmd as setuid binary
|
||||||
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
|
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
|
||||||
$(foreach i,$(SERVICE_BIN), \
|
$(foreach i,$(SERVICE_BIN), \
|
||||||
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
|
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
|
||||||
$(MAKE) -C www install
|
$(MAKE) -C www install
|
||||||
$(MAKE) -C docs install
|
$(MAKE) -C docs install
|
||||||
|
ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
|
||||||
|
$(MAKE) test # HACK, only test now to avoid clobbering build files with wrong config
|
||||||
|
endif
|
||||||
|
|
||||||
.PHONY: upload
|
.PHONY: upload
|
||||||
upload: ${SERVER_DEB} ${CLIENT_DEB} ${DOC_DEB}
|
upload: ${SERVER_DEB} ${CLIENT_DEB} ${RESTORE_DEB} ${DOC_DEB} ${DEBUG_DEB}
|
||||||
# check if working directory is clean
|
# check if working directory is clean
|
||||||
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
||||||
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} | ssh -X repoman@repo.proxmox.com upload --product pbs --dist buster
|
tar cf - ${SERVER_DEB} ${SERVER_DBG_DEB} ${DOC_DEB} ${CLIENT_DEB} \
|
||||||
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pbs,pve,pmg" --dist buster
|
${CLIENT_DBG_DEB} ${DEBUG_DEB} ${DEBUG_DBG_DEB} \
|
||||||
|
| ssh -X repoman@repo.proxmox.com upload --product pbs --dist bullseye
|
||||||
|
tar cf - ${CLIENT_DEB} ${CLIENT_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist bullseye
|
||||||
|
tar cf - ${RESTORE_DEB} ${RESTORE_DBG_DEB} | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist bullseye
|
||||||
|
545
debian/changelog
vendored
@ -1,3 +1,548 @@
|
|||||||
|
rust-proxmox-backup (2.0.10-1) UNRELEASED; urgency=medium
|
||||||
|
|
||||||
|
* ui: fix order of prune keep reasons
|
||||||
|
|
||||||
|
* server: add proxmox-backup-debug binary with chunk/file inspection, an API
|
||||||
|
shell with completion support
|
||||||
|
|
||||||
|
* restructured code base to reduce linkage and libraray ABI version
|
||||||
|
constraints for all non-server binaries (client, pxar, file-restore)
|
||||||
|
|
||||||
|
* zsh: fix passign parameters in auto-completion scripts
|
||||||
|
|
||||||
|
* tape: also add 'force-media-set' to availablea CLI options
|
||||||
|
|
||||||
|
* api: nodes: add missing node list (index) api endpoint
|
||||||
|
|
||||||
|
* docs: proxmox-backup-debug: add info about the new 'api' subcommand
|
||||||
|
|
||||||
|
* docs/technical-overview: add troubleshooting section
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 21 Sep 2021 14:00:48 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.9-2) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* tape backup: mention groups that were empty
|
||||||
|
|
||||||
|
* tape: compute next-media-label for each tape backup job
|
||||||
|
|
||||||
|
* tape: lto: increase default timeout to 10 minutes
|
||||||
|
|
||||||
|
* ui: display next-media-label for tape backup jobs
|
||||||
|
|
||||||
|
* cli: proxmox-tape backup-job list: use status api and display next-run
|
||||||
|
and next-media-label
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 24 Aug 2021 14:44:12 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.8-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* use proxmox-apt to 0.6
|
||||||
|
|
||||||
|
* api: apt: adapt to proxmox-apt back-end changes
|
||||||
|
|
||||||
|
* api/ui: allow zstd compression for new zpools
|
||||||
|
|
||||||
|
* tape: media_catalog: add snapshot list cache for catalog
|
||||||
|
|
||||||
|
* api2: tape: media: use MediaCatalog::snapshot_list for content listing
|
||||||
|
|
||||||
|
* tape: lock media_catalog file to to get a consistent view with load_catalog
|
||||||
|
|
||||||
|
* tape: changer: handle libraries that sends wrong amount of data
|
||||||
|
|
||||||
|
* tape: changer: remove unnecesary inquiry parameter
|
||||||
|
|
||||||
|
* api2: tape/restore: commit temporary catalog at the end
|
||||||
|
|
||||||
|
* docs: tape: add instructions on how to restore the catalog
|
||||||
|
|
||||||
|
* ui: tape/ChangerStatus: improve layout for large libraries
|
||||||
|
|
||||||
|
* tape: changer: handle invalid descriptor data from library in status page
|
||||||
|
|
||||||
|
* datastore config: cleanup code (use flatten attribute)
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 02 Aug 2021 10:34:55 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.7-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* tape changer: better cope with models that are not following spec
|
||||||
|
proposals when returning the status page
|
||||||
|
|
||||||
|
* tape changer: make DVCID information optional, not all devices return it
|
||||||
|
|
||||||
|
* restore daemon: setup the 'backup' system user and group in the minimal
|
||||||
|
restore environment, as we like to ensure that all state files are ownend
|
||||||
|
by them.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 23 Jul 2021 08:43:51 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.6-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* increase maximum drives per changer to 255
|
||||||
|
|
||||||
|
* allow one to pass a secret not only directly through the environment value,
|
||||||
|
but also indirectly through a file path, an open file descriptor or a
|
||||||
|
command that can write the secret to standard out.
|
||||||
|
|
||||||
|
* pull in new proxmox library version to improve the file system
|
||||||
|
comaptibility on creation of atomic files, e.g., lock files.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 22 Jul 2021 10:22:19 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.5-2) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* ui: tape: backup overview: increase timeout for media-set content
|
||||||
|
|
||||||
|
* tape: changer: always retry until timeout
|
||||||
|
|
||||||
|
* file-restore: increase lock timeout on QEMU map
|
||||||
|
|
||||||
|
* fix #3515: file-restore-daemon: allow LVs/PVs with dash in name
|
||||||
|
|
||||||
|
* fix #3526: correctly filter tasks with 'since' and 'until'
|
||||||
|
|
||||||
|
* tape: changer: make scsi request for DVCID a separate one, as some
|
||||||
|
libraries cannot handle requesting that combined with volume tags in one
|
||||||
|
go
|
||||||
|
|
||||||
|
* api, ui: datastore: add new 'prune-datastore' api call and expose it with
|
||||||
|
a 'Prune All' button
|
||||||
|
|
||||||
|
* make creating log files more robust so that theys are always owned by the
|
||||||
|
less privileged `backup` user
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 21 Jul 2021 09:12:39 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.4-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* change tape drive lock path to avoid issues with sticky bit on tmpfs
|
||||||
|
mountpoint
|
||||||
|
|
||||||
|
* tape: changer: query transport-element types separately
|
||||||
|
|
||||||
|
* auth: improve thread safety of 'crypt' C-library
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 12 Jul 2021 18:51:21 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.3-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* api: apt: add repositories info and update calls
|
||||||
|
|
||||||
|
* ui: administration: add APT repositories status and update panel
|
||||||
|
|
||||||
|
* api: access domains: add get/create/update/delete endpoints for realms
|
||||||
|
|
||||||
|
* ui: access control: add 'Realm' tab for adding and editing OpenID Connect
|
||||||
|
identity provider
|
||||||
|
|
||||||
|
* fix #3447: ui: Dashboard: disallow selection of datastore statistics row
|
||||||
|
|
||||||
|
* ui: tapeRestore: make window non-resizable
|
||||||
|
|
||||||
|
* ui: dashboard: rework resource-load panel to a more detailed status panel,
|
||||||
|
showing, among other things, uptime, Kernel version, CPU info and
|
||||||
|
repository status.
|
||||||
|
|
||||||
|
* ui: adminsitration/dashboard: auto-scale columns count and add
|
||||||
|
browser-local setting to override that to a fixed value of columns.
|
||||||
|
|
||||||
|
* fix #3212: api, ui: add support for notes on backup groups
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 12 Jul 2021 08:07:41 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.2-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* ui: use task list component from widget toolkit
|
||||||
|
|
||||||
|
* api: add keep-job-configs flag to datastore remove endpoint
|
||||||
|
|
||||||
|
* api: config: delete datastore: also remove tape backup jobs
|
||||||
|
|
||||||
|
* ui: tape restore: mark datastore selector as 'not a form field' to fix
|
||||||
|
compatibility with ExtJS 7.0
|
||||||
|
|
||||||
|
* ui: datastore removal: only navigate away when the user actually confirmed
|
||||||
|
the removal of that datastore
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 08 Jul 2021 14:44:12 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.1-2) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* file restore daemon: log basic startup steps
|
||||||
|
|
||||||
|
* REST-API: set error message extension for bad-request response log to
|
||||||
|
ensure the actual error is logged in any (access) log, making debugging
|
||||||
|
such issues easier
|
||||||
|
|
||||||
|
* restore daemon: create /run/proxmox-backup on startup as there's now some
|
||||||
|
runtime state saved there, which failed all API requests to the restore
|
||||||
|
daemon otherwise
|
||||||
|
|
||||||
|
* restore daemon: use millisecond log resolution
|
||||||
|
|
||||||
|
* fix #3496: acme: plugin: actually sleep after setting the TXT record,
|
||||||
|
ensuring DNS propagation of that record. This makes it catch up with the
|
||||||
|
docs/web-interface, where the option was already available.
|
||||||
|
|
||||||
|
* docs: initial update to repositories for bullseye
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Sat, 03 Jul 2021 23:14:49 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.0-2) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* file-restore-daemon/disk: add LVM (thin) support
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Sat, 03 Jul 2021 02:15:16 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (2.0.0-1) bullseye; urgency=medium
|
||||||
|
|
||||||
|
* initial bump for Debian 11 Bullseye / Proxmox Backup Server 2.0
|
||||||
|
|
||||||
|
* ui: datastore list summary: catch and show errors per datastore
|
||||||
|
|
||||||
|
* ui: dashboard: task summary: add a 'close' tool to the header
|
||||||
|
|
||||||
|
* ensure that backups which are currently being restored or backed up to a
|
||||||
|
tape won't get pruned
|
||||||
|
|
||||||
|
* improve error handling when locking a tape drive for a backup job
|
||||||
|
|
||||||
|
* client/pull: log snapshots that are skipped because of creation time being
|
||||||
|
older than last sync time
|
||||||
|
|
||||||
|
* ui: datastore options: add remove button to drop a datastore from the
|
||||||
|
configuration, without removing any actual data
|
||||||
|
|
||||||
|
* ui: tape: drive selector: do not auto select the drive
|
||||||
|
|
||||||
|
* ui: tape: backup job: use correct default value for pbsUserSelector
|
||||||
|
|
||||||
|
* fix #3433: disks: port over Proxmox VE's S.M.A.R.T wearout logic
|
||||||
|
|
||||||
|
* backup: add helpers for async last recently used (LRU) caches for chunk
|
||||||
|
and index reading of backup snapshot
|
||||||
|
|
||||||
|
* fix #3459: manager: add --ignore-verified and --outdated-after parameters
|
||||||
|
|
||||||
|
* proxmox-backup-manager: show task log on datastore create
|
||||||
|
|
||||||
|
* tape: snapshot reader: read chunks sorted by inode (per index) to improve
|
||||||
|
sequential reads when backing up data from slow spinning disks to tape.
|
||||||
|
|
||||||
|
* file-restore: support ZFS pools
|
||||||
|
|
||||||
|
* improve fix for #3393: pxar create: try to read xattrs/fcaps/acls by default
|
||||||
|
|
||||||
|
* fix compatibility with ExtJS 7.0
|
||||||
|
|
||||||
|
* docs: build api-viewer from widget-toolkit-dev
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 28 Jun 2021 19:35:40 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.9-1) stable; urgency=medium
|
||||||
|
|
||||||
|
* lto/sg_tape/encryption: remove non lto-4 supported byte
|
||||||
|
|
||||||
|
* ui: improve tape restore
|
||||||
|
|
||||||
|
* ui: panel/UsageChart: change downloadServerUrl
|
||||||
|
|
||||||
|
* ui: css fixes and cleanups
|
||||||
|
|
||||||
|
* api2/tape: add api call to list media sets
|
||||||
|
|
||||||
|
* ui: tape/BackupOverview: expand pools by default
|
||||||
|
|
||||||
|
* api: node/journal: fix parameter extraction of /nodes/node/journal
|
||||||
|
|
||||||
|
* file-restore-daemon: limit concurrent download calls
|
||||||
|
|
||||||
|
* file-restore-daemon: watchdog: add inhibit for long downloads
|
||||||
|
|
||||||
|
* file-restore-daemon: work around tokio DuplexStream bug
|
||||||
|
|
||||||
|
* apt: fix removal of non-existant http-proxy config
|
||||||
|
|
||||||
|
* file-restore-daemon: disk: add RawFs bucket type
|
||||||
|
|
||||||
|
* file-restore-daemon: disk: ignore "invalid fs" error
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 01 Jun 2021 08:24:01 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.8-1) stable; urgency=medium
|
||||||
|
|
||||||
|
* api-proxy: implement 'reload-certificate' command and hot-reload proxy
|
||||||
|
certificate when updating via the API
|
||||||
|
|
||||||
|
* ui: add task descriptions for ACME/Let's Encrypt related tasks
|
||||||
|
|
||||||
|
* correctly set apt proxy configuration
|
||||||
|
|
||||||
|
* ui: configuration: support setting a HTTP proxy for APT and subscription
|
||||||
|
checks.
|
||||||
|
|
||||||
|
* ui: tape: add 'Force new Media-Set' checkbox to manual backup
|
||||||
|
|
||||||
|
* ui: datastore/Content: add forget (delete) button for whole backup groups
|
||||||
|
|
||||||
|
* ui: tape: backup overview: move restore buttons inline to action-buttons,
|
||||||
|
making the UX more similar to the datastore content tree-view
|
||||||
|
|
||||||
|
* ui: tape restore: enabling selecting multiple snapshots
|
||||||
|
|
||||||
|
* ui: dashboards statistics: visualize datastores where querying the usage
|
||||||
|
failed
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 21 May 2021 18:21:28 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.7-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* client: use stderr for all fingerprint confirm msgs
|
||||||
|
|
||||||
|
* fix #3391: improve mismatched fingerprint handling
|
||||||
|
|
||||||
|
* tape: add single snapshot restore
|
||||||
|
|
||||||
|
* docs/api-viewer: improve rendering of array format
|
||||||
|
|
||||||
|
* tape/pool_writer: do not unwrap on channel send
|
||||||
|
|
||||||
|
* ui: window/SyncJobEdit: disable autoSelect for remote datastore
|
||||||
|
|
||||||
|
* ui: tape: rename 'Datastore' to 'Target Datastore'
|
||||||
|
|
||||||
|
* manager: acme plugin: auto-complete available DNS challenge types
|
||||||
|
|
||||||
|
* manager: acme plugin: remove ID completion helper from add command
|
||||||
|
|
||||||
|
* completion: ACME plugin type: comment out http type for now, not useful
|
||||||
|
|
||||||
|
* acme: use proxmox-acme-plugins and load schema from there
|
||||||
|
|
||||||
|
* fix 3296: add http_proxy to node config, and provide a cli
|
||||||
|
|
||||||
|
* fix #3331: improve progress for last snapshot in group
|
||||||
|
|
||||||
|
* file-restore: add debug mode with serial access
|
||||||
|
|
||||||
|
* file-restore: support more drives
|
||||||
|
|
||||||
|
* file-restore: add more RAM for VMs with many drives or debug
|
||||||
|
|
||||||
|
* file-restore: try to kill VM when stale
|
||||||
|
|
||||||
|
* make sure URI paths start with a slash
|
||||||
|
|
||||||
|
* tape: use LOCATE(16) SCSI command
|
||||||
|
|
||||||
|
* call create_run_dir() at daemon startup
|
||||||
|
|
||||||
|
* tape/drive: add 'move_to_file' to TapeDriver trait
|
||||||
|
|
||||||
|
* proxmox_restore_daemon: mount ntfs with 'utf8' option
|
||||||
|
|
||||||
|
* client/http_client: add necessary brackets for ipv6
|
||||||
|
|
||||||
|
* docs: tape: clarify LTO-4/5 support
|
||||||
|
|
||||||
|
* tape/restore: optimize chunk restore behaviour
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 11 May 2021 13:22:49 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.6-2) unstable; urgency=medium
|
||||||
|
|
||||||
|
* fix permissions set in create_run_dir
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 04 May 2021 12:25:00 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.6-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* tape restore: do not verify restored files
|
||||||
|
|
||||||
|
* tape restore: add restore speed to logs
|
||||||
|
|
||||||
|
* tape restore: write datastore in separate thread
|
||||||
|
|
||||||
|
* add ACME support
|
||||||
|
|
||||||
|
* add node config
|
||||||
|
|
||||||
|
* docs: user-management: add note about untrusted certificates for
|
||||||
|
webauthn
|
||||||
|
|
||||||
|
* bin: use extract_output_format where necessary
|
||||||
|
|
||||||
|
* add ctime and size function to IndexFile trait
|
||||||
|
|
||||||
|
* ui: tape: handle tapes in changers without barcode
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 04 May 2021 12:09:25 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.5-3) stable; urgency=medium
|
||||||
|
|
||||||
|
* file-restore: use 'norecovery' for XFS filesystem to allow mounting
|
||||||
|
those which where not un-mounted during backup
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 29 Apr 2021 15:26:13 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.5-2) stable; urgency=medium
|
||||||
|
|
||||||
|
* file-restore: strip .img.fidx suffix from drive serials to avoid running
|
||||||
|
in the 20 character limit SCSI serial values have.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 28 Apr 2021 11:15:08 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.5-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* tools/sgutils2: add size workaround for mode_sense
|
||||||
|
|
||||||
|
* tape: add read_medium_configuration_page() to detect WORM media
|
||||||
|
|
||||||
|
* file-restore: fix package name for kernel/initramfs image
|
||||||
|
|
||||||
|
* tape: remove MediumType struct, which is only valid on IBM drives
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 27 Apr 2021 12:20:04 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.4-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* file-restore: add size to image files and components
|
||||||
|
|
||||||
|
* file-restore: exit with code 1 in case streaming fails
|
||||||
|
|
||||||
|
* file-restore: use less memory for VM (now 128 MiB) and reboot on panic
|
||||||
|
|
||||||
|
* ui: tape: improve reload drive-status logic on user actions
|
||||||
|
|
||||||
|
* tape backup: list the snapshots we could back up on failed backup
|
||||||
|
notification
|
||||||
|
|
||||||
|
* Improve on a scheduling issue when updating the calendar event such, that
|
||||||
|
it would had triggered between the last-run and now. Use the next future
|
||||||
|
event as actual next trigger instead.
|
||||||
|
|
||||||
|
* SCSI mode sense: include the expected and unexpected sizes in the error
|
||||||
|
message, to allow easier debugging
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 27 Apr 2021 08:27:10 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.3-2) unstable; urgency=medium
|
||||||
|
|
||||||
|
* improve check for LTO4 tapes
|
||||||
|
|
||||||
|
* api: node status: return further information about SWAP, IO-wait, CPU info
|
||||||
|
and Kernel version
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 23 Apr 2021 10:52:08 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.3-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* tape restore: improve datastore locking when GC runs at the same time
|
||||||
|
|
||||||
|
* tape restore: always do quick chunk verification
|
||||||
|
|
||||||
|
* tape: improve compatibillity with some changers
|
||||||
|
|
||||||
|
* tape: work-around missing format command on LTO-4 drives, fall-back to
|
||||||
|
slower rewind erease
|
||||||
|
|
||||||
|
* fix #3393: pxar: allow and safe the 'security.NTACL' extended attribute
|
||||||
|
|
||||||
|
* file-restore: support encrypted VM backups
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 22 Apr 2021 20:14:58 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.2-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* backup verify: always re-check if we can skip a chunk in the actual verify
|
||||||
|
loop.
|
||||||
|
|
||||||
|
* tape: do not try to backup unfinished backups
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 15 Apr 2021 13:26:52 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.1-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* docs: include tape in table of contents
|
||||||
|
|
||||||
|
* docs: tape: improve definition-list format and add screenshots
|
||||||
|
|
||||||
|
* docs: reorder maintenance and network chapters after client-usage/tools
|
||||||
|
chapters
|
||||||
|
|
||||||
|
* ui: tape changer status: add Format button to drive grid
|
||||||
|
|
||||||
|
* backup/verify: improve speed on disks with slow random-IO (spinners) by
|
||||||
|
iterating over chunks sorted by inode
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 14 Apr 2021 14:50:29 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.1.0-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* enable tape backup as technology preview by default
|
||||||
|
|
||||||
|
* tape: read drive status: clear deferred error or media changed events.
|
||||||
|
|
||||||
|
* tape: improve end-of-tape (EOT) error handling
|
||||||
|
|
||||||
|
* tape: cleanup media catalog on tape reuse
|
||||||
|
|
||||||
|
* zfs: re-use underlying pool wide IO stats for datasets
|
||||||
|
|
||||||
|
* api daemon: only log error from accepting new connections to avoid opening
|
||||||
|
to many file descriptors
|
||||||
|
|
||||||
|
* api/datastore: allow downloading the entire archive as ZIP archive, not
|
||||||
|
only sub-paths
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 13 Apr 2021 14:42:18 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.14-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* server: compress API call response and static files if client accepts that
|
||||||
|
|
||||||
|
* compress generated ZIP archives with deflate
|
||||||
|
|
||||||
|
* tape: implement LTO userspace driver
|
||||||
|
|
||||||
|
* docs: mention new user space tape driver, adopt device path names
|
||||||
|
|
||||||
|
* tape: always clear encryption key after backup (for security reasons)
|
||||||
|
|
||||||
|
* ui: improve changer status view
|
||||||
|
|
||||||
|
* add proxmox-file-restore package, providing a central file-restore binary
|
||||||
|
with preparations for restoring files also from block level backups using
|
||||||
|
QEMU for a safe encapsulation.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 08 Apr 2021 16:35:11 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.13-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* pxar: improve handling ACL entries on create and restore
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 02 Apr 2021 15:32:01 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (1.0.12-1) unstable; urgency=medium
|
||||||
|
|
||||||
|
* tape: write catalogs to tape (speedup catalog restore)
|
||||||
|
|
||||||
|
* tape: add --scan option for catalog restore
|
||||||
|
|
||||||
|
* tape: improve locking (lock media-sets)
|
||||||
|
|
||||||
|
* tape: ui: enable datastore mappings
|
||||||
|
|
||||||
|
* fix #3359: fix blocking writes in async code during pxar create
|
||||||
|
|
||||||
|
* api2/tape/backup: wait indefinitely for lock in scheduled backup jobs
|
||||||
|
|
||||||
|
* docu improvements
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 26 Mar 2021 14:08:47 +0100
|
||||||
|
|
||||||
rust-proxmox-backup (1.0.11-1) unstable; urgency=medium
|
rust-proxmox-backup (1.0.11-1) unstable; urgency=medium
|
||||||
|
|
||||||
* fix feature flag logic in pxar create
|
* fix feature flag logic in pxar create
|
||||||
|
86
debian/control
vendored
@ -1,8 +1,8 @@
|
|||||||
Source: rust-proxmox-backup
|
Source: rust-proxmox-backup
|
||||||
Section: admin
|
Section: admin
|
||||||
Priority: optional
|
Priority: optional
|
||||||
Build-Depends: debhelper (>= 11),
|
Build-Depends: debhelper (>= 12),
|
||||||
dh-cargo (>= 18),
|
dh-cargo (>= 24),
|
||||||
cargo:native,
|
cargo:native,
|
||||||
rustc:native,
|
rustc:native,
|
||||||
libstd-rust-dev,
|
libstd-rust-dev,
|
||||||
@ -15,6 +15,9 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-crossbeam-channel-0.5+default-dev,
|
librust-crossbeam-channel-0.5+default-dev,
|
||||||
librust-endian-trait-0.6+arrays-dev,
|
librust-endian-trait-0.6+arrays-dev,
|
||||||
librust-endian-trait-0.6+default-dev,
|
librust-endian-trait-0.6+default-dev,
|
||||||
|
librust-env-logger-0.7+default-dev,
|
||||||
|
librust-flate2-1+default-dev,
|
||||||
|
librust-foreign-types-0.3+default-dev,
|
||||||
librust-futures-0.3+default-dev,
|
librust-futures-0.3+default-dev,
|
||||||
librust-h2-0.3+default-dev,
|
librust-h2-0.3+default-dev,
|
||||||
librust-h2-0.3+stream-dev,
|
librust-h2-0.3+stream-dev,
|
||||||
@ -34,15 +37,23 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-pam-sys-0.5+default-dev,
|
librust-pam-sys-0.5+default-dev,
|
||||||
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
|
||||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||||
librust-pin-project-1+default-dev,
|
librust-pin-project-lite-0.2+default-dev,
|
||||||
librust-pin-utils-0.1+default-dev,
|
librust-proxmox-0.13+api-macro-dev,
|
||||||
librust-proxmox-0.11+api-macro-dev,
|
librust-proxmox-0.13+cli-dev,
|
||||||
librust-proxmox-0.11+default-dev,
|
librust-proxmox-0.13+default-dev,
|
||||||
librust-proxmox-0.11+sortable-macro-dev,
|
librust-proxmox-0.13+router-dev,
|
||||||
librust-proxmox-0.11+websocket-dev,
|
librust-proxmox-0.13+sortable-macro-dev,
|
||||||
|
librust-proxmox-0.13+tfa-dev,
|
||||||
|
librust-proxmox-acme-rs-0.2+default-dev (>= 0.2.1-~~),
|
||||||
|
librust-proxmox-apt-0.7+default-dev,
|
||||||
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
librust-proxmox-fuse-0.1+default-dev (>= 0.1.1-~~),
|
||||||
librust-pxar-0.10+default-dev,
|
librust-proxmox-http-0.4+client-dev,
|
||||||
librust-pxar-0.10+tokio-io-dev,
|
librust-proxmox-http-0.4+default-dev ,
|
||||||
|
librust-proxmox-http-0.4+http-helpers-dev,
|
||||||
|
librust-proxmox-http-0.4+websocket-dev,
|
||||||
|
librust-proxmox-openid-0.7+default-dev,
|
||||||
|
librust-pxar-0.10+default-dev (>= 0.10.1-~~),
|
||||||
|
librust-pxar-0.10+tokio-io-dev (>= 0.10.1-~~),
|
||||||
librust-regex-1+default-dev (>= 1.2-~~),
|
librust-regex-1+default-dev (>= 1.2-~~),
|
||||||
librust-rustyline-7+default-dev,
|
librust-rustyline-7+default-dev,
|
||||||
librust-serde-1+default-dev,
|
librust-serde-1+default-dev,
|
||||||
@ -50,29 +61,32 @@ Build-Depends: debhelper (>= 11),
|
|||||||
librust-serde-json-1+default-dev,
|
librust-serde-json-1+default-dev,
|
||||||
librust-siphasher-0.3+default-dev,
|
librust-siphasher-0.3+default-dev,
|
||||||
librust-syslog-4+default-dev,
|
librust-syslog-4+default-dev,
|
||||||
librust-tokio-1+default-dev,
|
librust-thiserror-1+default-dev,
|
||||||
librust-tokio-1+fs-dev,
|
librust-tokio-1+default-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+io-util-dev,
|
librust-tokio-1+fs-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+macros-dev,
|
librust-tokio-1+io-std-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+net-dev,
|
librust-tokio-1+io-util-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+parking-lot-dev,
|
librust-tokio-1+macros-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+process-dev,
|
librust-tokio-1+net-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+rt-dev,
|
librust-tokio-1+parking-lot-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+rt-multi-thread-dev,
|
librust-tokio-1+process-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+signal-dev,
|
librust-tokio-1+rt-dev (>= 1.6-~~),
|
||||||
librust-tokio-1+time-dev,
|
librust-tokio-1+rt-multi-thread-dev (>= 1.6-~~),
|
||||||
|
librust-tokio-1+signal-dev (>= 1.6-~~),
|
||||||
|
librust-tokio-1+time-dev (>= 1.6-~~),
|
||||||
librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~),
|
librust-tokio-openssl-0.6+default-dev (>= 0.6.1-~~),
|
||||||
librust-tokio-stream-0.1+default-dev,
|
librust-tokio-stream-0.1+default-dev,
|
||||||
librust-tokio-util-0.6+codec-dev,
|
librust-tokio-util-0.6+codec-dev,
|
||||||
librust-tokio-util-0.6+default-dev,
|
librust-tokio-util-0.6+default-dev,
|
||||||
|
librust-tokio-util-0.6+io-dev,
|
||||||
librust-tower-service-0.3+default-dev,
|
librust-tower-service-0.3+default-dev,
|
||||||
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
|
librust-udev-0.4+default-dev | librust-udev-0.3+default-dev,
|
||||||
librust-url-2+default-dev (>= 2.1-~~),
|
librust-url-2+default-dev (>= 2.1-~~),
|
||||||
librust-walkdir-2+default-dev,
|
librust-walkdir-2+default-dev,
|
||||||
librust-webauthn-rs-0.2+default-dev (>= 0.2.5-~~),
|
librust-webauthn-rs-0.2+default-dev (>= 0.2.5-~~),
|
||||||
librust-xdg-2+default-dev (>= 2.2-~~),
|
librust-xdg-2+default-dev (>= 2.2-~~),
|
||||||
librust-zstd-0.4+bindgen-dev,
|
librust-zstd-0.6+bindgen-dev,
|
||||||
librust-zstd-0.4+default-dev,
|
librust-zstd-0.6+default-dev,
|
||||||
libacl1-dev,
|
libacl1-dev,
|
||||||
libfuse3-dev,
|
libfuse3-dev,
|
||||||
libsystemd-dev,
|
libsystemd-dev,
|
||||||
@ -86,6 +100,7 @@ Build-Depends: debhelper (>= 11),
|
|||||||
graphviz <!nodoc>,
|
graphviz <!nodoc>,
|
||||||
latexmk <!nodoc>,
|
latexmk <!nodoc>,
|
||||||
patchelf,
|
patchelf,
|
||||||
|
proxmox-widget-toolkit-dev <!nodoc>,
|
||||||
pve-eslint (>= 7.18.0-1),
|
pve-eslint (>= 7.18.0-1),
|
||||||
python3-docutils,
|
python3-docutils,
|
||||||
python3-pygments,
|
python3-pygments,
|
||||||
@ -96,27 +111,27 @@ Build-Depends: debhelper (>= 11),
|
|||||||
texlive-xetex <!nodoc>,
|
texlive-xetex <!nodoc>,
|
||||||
xindy <!nodoc>
|
xindy <!nodoc>
|
||||||
Maintainer: Proxmox Support Team <support@proxmox.com>
|
Maintainer: Proxmox Support Team <support@proxmox.com>
|
||||||
Standards-Version: 4.4.1
|
Standards-Version: 4.5.1
|
||||||
Vcs-Git: git://git.proxmox.com/git/proxmox-backup.git
|
Vcs-Git: git://git.proxmox.com/git/proxmox-backup.git
|
||||||
Vcs-Browser: https://git.proxmox.com/?p=proxmox-backup.git;a=summary
|
Vcs-Browser: https://git.proxmox.com/?p=proxmox-backup.git;a=summary
|
||||||
Homepage: https://www.proxmox.com
|
Homepage: https://www.proxmox.com
|
||||||
|
Rules-Requires-Root: binary-targets
|
||||||
|
|
||||||
Package: proxmox-backup-server
|
Package: proxmox-backup-server
|
||||||
Architecture: any
|
Architecture: any
|
||||||
Depends: fonts-font-awesome,
|
Depends: fonts-font-awesome,
|
||||||
libjs-extjs (>= 6.0.1),
|
libjs-extjs (>= 7~),
|
||||||
libjs-qrcodejs (>= 1.20201119),
|
libjs-qrcodejs (>= 1.20201119),
|
||||||
|
libproxmox-acme-plugins,
|
||||||
libsgutils2-2,
|
libsgutils2-2,
|
||||||
libzstd1 (>= 1.3.8),
|
libzstd1 (>= 1.3.8),
|
||||||
lvm2,
|
lvm2,
|
||||||
mt-st,
|
|
||||||
mtx,
|
|
||||||
openssh-server,
|
openssh-server,
|
||||||
pbs-i18n,
|
pbs-i18n,
|
||||||
postfix | mail-transport-agent,
|
postfix | mail-transport-agent,
|
||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 2.3-6),
|
proxmox-widget-toolkit (>= 3.3-2),
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
sg3-utils,
|
sg3-utils,
|
||||||
smartmontools,
|
smartmontools,
|
||||||
@ -140,9 +155,22 @@ Description: Proxmox Backup Client tools
|
|||||||
Package: proxmox-backup-docs
|
Package: proxmox-backup-docs
|
||||||
Build-Profiles: <!nodoc>
|
Build-Profiles: <!nodoc>
|
||||||
Section: doc
|
Section: doc
|
||||||
Depends: libjs-extjs,
|
Depends: fonts-font-awesome,
|
||||||
|
libjs-extjs,
|
||||||
libjs-mathjax,
|
libjs-mathjax,
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
Architecture: all
|
Architecture: all
|
||||||
Description: Proxmox Backup Documentation
|
Description: Proxmox Backup Documentation
|
||||||
This package contains the Proxmox Backup Documentation files.
|
This package contains the Proxmox Backup Documentation files.
|
||||||
|
|
||||||
|
Package: proxmox-backup-file-restore
|
||||||
|
Architecture: any
|
||||||
|
Depends: ${misc:Depends},
|
||||||
|
${shlibs:Depends},
|
||||||
|
Recommends: pve-qemu-kvm (>= 5.0.0-9),
|
||||||
|
proxmox-backup-restore-image,
|
||||||
|
Breaks: proxmox-backup-restore-image (<< 0.3.1)
|
||||||
|
Description: Proxmox Backup single file restore tools for pxar and block device backups
|
||||||
|
This package contains the Proxmox Backup single file restore client for
|
||||||
|
restoring individual files and folders from both host/container and VM/block
|
||||||
|
device backups. It includes a block device restore driver using QEMU.
|
||||||
|
45
debian/control.in
vendored
@ -1,45 +0,0 @@
|
|||||||
Package: proxmox-backup-server
|
|
||||||
Architecture: any
|
|
||||||
Depends: fonts-font-awesome,
|
|
||||||
libjs-extjs (>= 6.0.1),
|
|
||||||
libjs-qrcodejs (>= 1.20201119),
|
|
||||||
libsgutils2-2,
|
|
||||||
libzstd1 (>= 1.3.8),
|
|
||||||
lvm2,
|
|
||||||
mt-st,
|
|
||||||
mtx,
|
|
||||||
openssh-server,
|
|
||||||
pbs-i18n,
|
|
||||||
postfix | mail-transport-agent,
|
|
||||||
proxmox-backup-docs,
|
|
||||||
proxmox-mini-journalreader,
|
|
||||||
proxmox-widget-toolkit (>= 2.3-6),
|
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
|
||||||
sg3-utils,
|
|
||||||
smartmontools,
|
|
||||||
${misc:Depends},
|
|
||||||
${shlibs:Depends},
|
|
||||||
Recommends: zfsutils-linux,
|
|
||||||
ifupdown2,
|
|
||||||
Description: Proxmox Backup Server daemon with tools and GUI
|
|
||||||
This package contains the Proxmox Backup Server daemons and related
|
|
||||||
tools. This includes a web-based graphical user interface.
|
|
||||||
|
|
||||||
Package: proxmox-backup-client
|
|
||||||
Architecture: any
|
|
||||||
Depends: qrencode,
|
|
||||||
${misc:Depends},
|
|
||||||
${shlibs:Depends},
|
|
||||||
Description: Proxmox Backup Client tools
|
|
||||||
This package contains the Proxmox Backup client, which provides a
|
|
||||||
simple command line tool to create and restore backups.
|
|
||||||
|
|
||||||
Package: proxmox-backup-docs
|
|
||||||
Build-Profiles: <!nodoc>
|
|
||||||
Section: doc
|
|
||||||
Depends: libjs-extjs,
|
|
||||||
libjs-mathjax,
|
|
||||||
${misc:Depends},
|
|
||||||
Architecture: all
|
|
||||||
Description: Proxmox Backup Documentation
|
|
||||||
This package contains the Proxmox Backup Documentation files.
|
|
42
debian/debcargo.toml
vendored
@ -1,42 +0,0 @@
|
|||||||
overlay = "."
|
|
||||||
crate_src_path = ".."
|
|
||||||
whitelist = ["tests/*.c"]
|
|
||||||
|
|
||||||
maintainer = "Proxmox Support Team <support@proxmox.com>"
|
|
||||||
|
|
||||||
[source]
|
|
||||||
vcs_git = "git://git.proxmox.com/git/proxmox-backup.git"
|
|
||||||
vcs_browser = "https://git.proxmox.com/?p=proxmox-backup.git;a=summary"
|
|
||||||
section = "admin"
|
|
||||||
build_depends = [
|
|
||||||
"bash-completion",
|
|
||||||
"debhelper (>= 12~)",
|
|
||||||
"fonts-dejavu-core <!nodoc>",
|
|
||||||
"fonts-lato <!nodoc>",
|
|
||||||
"fonts-open-sans <!nodoc>",
|
|
||||||
"graphviz <!nodoc>",
|
|
||||||
"latexmk <!nodoc>",
|
|
||||||
"patchelf",
|
|
||||||
"pve-eslint (>= 7.18.0-1)",
|
|
||||||
"python3-docutils",
|
|
||||||
"python3-pygments",
|
|
||||||
"python3-sphinx <!nodoc>",
|
|
||||||
"rsync",
|
|
||||||
"texlive-fonts-extra <!nodoc>",
|
|
||||||
"texlive-fonts-recommended <!nodoc>",
|
|
||||||
"texlive-xetex <!nodoc>",
|
|
||||||
"xindy <!nodoc>",
|
|
||||||
]
|
|
||||||
|
|
||||||
build_depends_excludes = [
|
|
||||||
"debhelper (>=11)",
|
|
||||||
]
|
|
||||||
|
|
||||||
[packages.lib]
|
|
||||||
depends = [
|
|
||||||
"libacl1-dev",
|
|
||||||
"libfuse3-dev",
|
|
||||||
"libsystemd-dev",
|
|
||||||
"uuid-dev",
|
|
||||||
"libsgutils2-dev",
|
|
||||||
]
|
|
26
debian/postinst
vendored
@ -26,33 +26,7 @@ case "$1" in
|
|||||||
fi
|
fi
|
||||||
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
deb-systemd-invoke $_dh_action proxmox-backup.service proxmox-backup-proxy.service >/dev/null || true
|
||||||
|
|
||||||
# FIXME: Remove with 1.1
|
|
||||||
if test -n "$2"; then
|
if test -n "$2"; then
|
||||||
if dpkg --compare-versions "$2" 'lt' '0.9.4-1'; then
|
|
||||||
if grep -s -q -P -e '^\s+verify-schedule ' /etc/proxmox-backup/datastore.cfg; then
|
|
||||||
echo "NOTE: drop all verify schedules from datastore config."
|
|
||||||
echo "You can now add more flexible verify jobs"
|
|
||||||
flock -w 30 /etc/proxmox-backup/.datastore.lck \
|
|
||||||
sed -i '/^\s\+verify-schedule /d' /etc/proxmox-backup/datastore.cfg || true
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
if dpkg --compare-versions "$2" 'le' '0.9.5-1'; then
|
|
||||||
chown --quiet backup:backup /var/log/proxmox-backup/api/auth.log || true
|
|
||||||
fi
|
|
||||||
if dpkg --compare-versions "$2" 'le' '0.9.7-1'; then
|
|
||||||
if [ -e /etc/proxmox-backup/remote.cfg ]; then
|
|
||||||
echo "NOTE: Switching over remote.cfg to new field names.."
|
|
||||||
flock -w 30 /etc/proxmox-backup/.remote.lck \
|
|
||||||
sed -i \
|
|
||||||
-e 's/^\s\+userid /\tauth-id /g' \
|
|
||||||
/etc/proxmox-backup/remote.cfg || true
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
# FIXME: remove with 2.0
|
|
||||||
if [ -d "/var/lib/proxmox-backup/tape" ] &&
|
|
||||||
[ "$(stat --printf '%a' '/var/lib/proxmox-backup/tape')" != "750" ]; then
|
|
||||||
chmod 0750 /var/lib/proxmox-backup/tape || true
|
|
||||||
fi
|
|
||||||
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
# FIXME: Remove in future version once we're sure no broken entries remain in anyone's files
|
||||||
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
if grep -q -e ':termproxy::[^@]\+: ' /var/log/proxmox-backup/tasks/active; then
|
||||||
echo "Fixing up termproxy user id in task log..."
|
echo "Fixing up termproxy user id in task log..."
|
||||||
|
8
debian/proxmox-backup-debug.bc
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# proxmox-backup-debug bash completion
|
||||||
|
|
||||||
|
# see http://tiswww.case.edu/php/chet/bash/FAQ
|
||||||
|
# and __ltrim_colon_completions() in /usr/share/bash-completion/bash_completion
|
||||||
|
# this modifies global var, but I found no better way
|
||||||
|
COMP_WORDBREAKS=${COMP_WORDBREAKS//:}
|
||||||
|
|
||||||
|
complete -C 'proxmox-backup-debug bashcomplete' proxmox-backup-debug
|
1
debian/proxmox-backup-docs.links
vendored
@ -1,5 +1,6 @@
|
|||||||
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
|
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
|
||||||
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/prune-simulator/extjs
|
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/prune-simulator/extjs
|
||||||
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/lto-barcode/extjs
|
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/lto-barcode/extjs
|
||||||
|
/usr/share/fonts-font-awesome/ /usr/share/doc/proxmox-backup/html/lto-barcode/font-awesome
|
||||||
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/api-viewer/extjs
|
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/api-viewer/extjs
|
||||||
/usr/share/javascript/mathjax /usr/share/doc/proxmox-backup/html/_static/mathjax
|
/usr/share/javascript/mathjax /usr/share/doc/proxmox-backup/html/_static/mathjax
|
||||||
|
1
debian/proxmox-backup-file-restore.bash-completion
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
debian/proxmox-file-restore.bc proxmox-file-restore
|
8
debian/proxmox-backup-file-restore.bc
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# proxmox-file-restore bash completion
|
||||||
|
|
||||||
|
# see http://tiswww.case.edu/php/chet/bash/FAQ
|
||||||
|
# and __ltrim_colon_completions() in /usr/share/bash-completion/bash_completion
|
||||||
|
# this modifies global var, but I found no better way
|
||||||
|
COMP_WORDBREAKS=${COMP_WORDBREAKS//:}
|
||||||
|
|
||||||
|
complete -C 'proxmox-file-restore bashcomplete' proxmox-file-restore
|
4
debian/proxmox-backup-file-restore.install
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
usr/bin/proxmox-file-restore
|
||||||
|
usr/share/man/man1/proxmox-file-restore.1
|
||||||
|
usr/share/zsh/vendor-completions/_proxmox-file-restore
|
||||||
|
usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore/proxmox-restore-daemon
|
74
debian/proxmox-backup-file-restore.postinst
vendored
Executable file
@ -0,0 +1,74 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
update_initramfs() {
|
||||||
|
# regenerate initramfs for single file restore VM
|
||||||
|
INST_PATH="/usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore"
|
||||||
|
CACHE_PATH="/var/cache/proxmox-backup/file-restore-initramfs.img"
|
||||||
|
CACHE_PATH_DBG="/var/cache/proxmox-backup/file-restore-initramfs-debug.img"
|
||||||
|
|
||||||
|
# cleanup first, in case proxmox-file-restore was uninstalled since we do
|
||||||
|
# not want an unuseable image lying around
|
||||||
|
rm -f "$CACHE_PATH"
|
||||||
|
|
||||||
|
if [ ! -f "$INST_PATH/initramfs.img" ]; then
|
||||||
|
echo "proxmox-backup-restore-image is not installed correctly, skipping update" >&2
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Updating file-restore initramfs..."
|
||||||
|
|
||||||
|
# avoid leftover temp file
|
||||||
|
cleanup() {
|
||||||
|
rm -f "$CACHE_PATH.tmp" "$CACHE_PATH_DBG.tmp"
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
mkdir -p "/var/cache/proxmox-backup"
|
||||||
|
cp "$INST_PATH/initramfs.img" "$CACHE_PATH.tmp"
|
||||||
|
|
||||||
|
# cpio uses passed in path as offset inside the archive as well, so we need
|
||||||
|
# to be in the same dir as the daemon binary to ensure it's placed in /
|
||||||
|
( cd "$INST_PATH"; \
|
||||||
|
printf "./proxmox-restore-daemon" \
|
||||||
|
| cpio -o --format=newc -A -F "$CACHE_PATH.tmp" )
|
||||||
|
mv -f "$CACHE_PATH.tmp" "$CACHE_PATH"
|
||||||
|
|
||||||
|
if [ -f "$INST_PATH/initramfs-debug.img" ]; then
|
||||||
|
echo "Updating file-restore debug initramfs..."
|
||||||
|
cp "$INST_PATH/initramfs-debug.img" "$CACHE_PATH_DBG.tmp"
|
||||||
|
( cd "$INST_PATH"; \
|
||||||
|
printf "./proxmox-restore-daemon" \
|
||||||
|
| cpio -o --format=newc -A -F "$CACHE_PATH_DBG.tmp" )
|
||||||
|
mv -f "$CACHE_PATH_DBG.tmp" "$CACHE_PATH_DBG"
|
||||||
|
fi
|
||||||
|
|
||||||
|
trap - EXIT
|
||||||
|
}
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
configure)
|
||||||
|
# in case restore daemon was updated
|
||||||
|
update_initramfs
|
||||||
|
;;
|
||||||
|
|
||||||
|
triggered)
|
||||||
|
if [ "$2" = "proxmox-backup-restore-image-update" ]; then
|
||||||
|
# in case base-image was updated
|
||||||
|
update_initramfs
|
||||||
|
else
|
||||||
|
echo "postinst called with unknown trigger name: \`$2'" >&2
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
abort-upgrade|abort-remove|abort-deconfigure)
|
||||||
|
;;
|
||||||
|
|
||||||
|
*)
|
||||||
|
echo "postinst called with unknown argument \`$1'" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
exit 0
|
1
debian/proxmox-backup-file-restore.triggers
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
interest-noawait proxmox-backup-restore-image-update
|
1
debian/proxmox-backup-server.bash-completion
vendored
@ -1,4 +1,5 @@
|
|||||||
debian/proxmox-backup-manager.bc proxmox-backup-manager
|
debian/proxmox-backup-manager.bc proxmox-backup-manager
|
||||||
|
debian/proxmox-backup-debug.bc proxmox-backup-debug
|
||||||
debian/proxmox-tape.bc proxmox-tape
|
debian/proxmox-tape.bc proxmox-tape
|
||||||
debian/pmtx.bc pmtx
|
debian/pmtx.bc pmtx
|
||||||
debian/pmt.bc pmt
|
debian/pmt.bc pmt
|
||||||
|
3
debian/proxmox-backup-server.install
vendored
@ -9,6 +9,7 @@ usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
|
|||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-daily-update
|
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-daily-update
|
||||||
usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd
|
usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd
|
||||||
|
usr/sbin/proxmox-backup-debug
|
||||||
usr/sbin/proxmox-backup-manager
|
usr/sbin/proxmox-backup-manager
|
||||||
usr/bin/pmtx
|
usr/bin/pmtx
|
||||||
usr/bin/pmt
|
usr/bin/pmt
|
||||||
@ -17,6 +18,7 @@ usr/share/javascript/proxmox-backup/index.hbs
|
|||||||
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
|
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
|
||||||
usr/share/javascript/proxmox-backup/images
|
usr/share/javascript/proxmox-backup/images
|
||||||
usr/share/javascript/proxmox-backup/js/proxmox-backup-gui.js
|
usr/share/javascript/proxmox-backup/js/proxmox-backup-gui.js
|
||||||
|
usr/share/man/man1/proxmox-backup-debug.1
|
||||||
usr/share/man/man1/proxmox-backup-manager.1
|
usr/share/man/man1/proxmox-backup-manager.1
|
||||||
usr/share/man/man1/proxmox-backup-proxy.1
|
usr/share/man/man1/proxmox-backup-proxy.1
|
||||||
usr/share/man/man1/proxmox-tape.1
|
usr/share/man/man1/proxmox-tape.1
|
||||||
@ -31,6 +33,7 @@ usr/share/man/man5/verification.cfg.5
|
|||||||
usr/share/man/man5/media-pool.cfg.5
|
usr/share/man/man5/media-pool.cfg.5
|
||||||
usr/share/man/man5/tape.cfg.5
|
usr/share/man/man5/tape.cfg.5
|
||||||
usr/share/man/man5/tape-job.cfg.5
|
usr/share/man/man5/tape-job.cfg.5
|
||||||
|
usr/share/zsh/vendor-completions/_proxmox-backup-debug
|
||||||
usr/share/zsh/vendor-completions/_proxmox-backup-manager
|
usr/share/zsh/vendor-completions/_proxmox-backup-manager
|
||||||
usr/share/zsh/vendor-completions/_proxmox-tape
|
usr/share/zsh/vendor-completions/_proxmox-tape
|
||||||
usr/share/zsh/vendor-completions/_pmtx
|
usr/share/zsh/vendor-completions/_pmtx
|
||||||
|
18
debian/proxmox-backup-server.udev
vendored
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# do not edit this file, it will be overwritten on update
|
||||||
|
|
||||||
|
# persistent storage links: /dev/tape/{by-id,by-path}
|
||||||
|
|
||||||
|
ACTION=="remove", GOTO="persistent_storage_tape_end"
|
||||||
|
ENV{UDEV_DISABLE_PERSISTENT_STORAGE_RULES_FLAG}=="1", GOTO="persistent_storage_tape_end"
|
||||||
|
|
||||||
|
# also see: /lib/udev/rules.d/60-persistent-storage-tape.rules
|
||||||
|
|
||||||
|
SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", IMPORT{program}="scsi_id --sg-version=3 --export --whitelisted -d $devnode", \
|
||||||
|
SYMLINK+="tape/by-id/scsi-$env{ID_SERIAL}-sg"
|
||||||
|
|
||||||
|
# iSCSI devices from the same host have all the same ID_SERIAL,
|
||||||
|
# but additionally a property named ID_SCSI_SERIAL.
|
||||||
|
SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", ENV{ID_SCSI_SERIAL}=="?*", \
|
||||||
|
SYMLINK+="tape/by-id/scsi-$env{ID_SCSI_SERIAL}-sg"
|
||||||
|
|
||||||
|
LABEL="persistent_storage_tape_end"
|
15
debian/rules
vendored
@ -32,6 +32,9 @@ override_dh_auto_build:
|
|||||||
override_dh_missing:
|
override_dh_missing:
|
||||||
dh_missing --fail-missing
|
dh_missing --fail-missing
|
||||||
|
|
||||||
|
override_dh_auto_test:
|
||||||
|
# ignore here to avoid rebuilding the binaries with the wrong target
|
||||||
|
|
||||||
override_dh_auto_install:
|
override_dh_auto_install:
|
||||||
dh_auto_install -- \
|
dh_auto_install -- \
|
||||||
PROXY_USER=backup \
|
PROXY_USER=backup \
|
||||||
@ -45,15 +48,13 @@ override_dh_installsystemd:
|
|||||||
override_dh_fixperms:
|
override_dh_fixperms:
|
||||||
dh_fixperms --exclude sg-tape-cmd
|
dh_fixperms --exclude sg-tape-cmd
|
||||||
|
|
||||||
# workaround https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=933541
|
|
||||||
# TODO: remove once available (Debian 11 ?)
|
|
||||||
override_dh_dwz:
|
|
||||||
dh_dwz --no-dwz-multifile
|
|
||||||
|
|
||||||
override_dh_strip:
|
override_dh_strip:
|
||||||
dh_strip
|
dh_strip
|
||||||
for exe in $$(find debian/proxmox-backup-client/usr \
|
for exe in $$(find \
|
||||||
debian/proxmox-backup-server/usr -executable -type f); do \
|
debian/proxmox-backup-client/usr \
|
||||||
|
debian/proxmox-backup-server/usr \
|
||||||
|
debian/proxmox-backup-file-restore \
|
||||||
|
-executable -type f); do \
|
||||||
debian/scripts/elf-strip-unused-dependencies.sh "$$exe" || true; \
|
debian/scripts/elf-strip-unused-dependencies.sh "$$exe" || true; \
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -5,6 +5,8 @@ GENERATED_SYNOPSIS := \
|
|||||||
proxmox-backup-client/synopsis.rst \
|
proxmox-backup-client/synopsis.rst \
|
||||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||||
proxmox-backup-manager/synopsis.rst \
|
proxmox-backup-manager/synopsis.rst \
|
||||||
|
proxmox-backup-debug/synopsis.rst \
|
||||||
|
proxmox-file-restore/synopsis.rst \
|
||||||
pxar/synopsis.rst \
|
pxar/synopsis.rst \
|
||||||
pmtx/synopsis.rst \
|
pmtx/synopsis.rst \
|
||||||
pmt/synopsis.rst \
|
pmt/synopsis.rst \
|
||||||
@ -25,7 +27,9 @@ MAN1_PAGES := \
|
|||||||
proxmox-tape.1 \
|
proxmox-tape.1 \
|
||||||
proxmox-backup-proxy.1 \
|
proxmox-backup-proxy.1 \
|
||||||
proxmox-backup-client.1 \
|
proxmox-backup-client.1 \
|
||||||
proxmox-backup-manager.1
|
proxmox-backup-manager.1 \
|
||||||
|
proxmox-file-restore.1 \
|
||||||
|
proxmox-backup-debug.1
|
||||||
|
|
||||||
MAN5_PAGES := \
|
MAN5_PAGES := \
|
||||||
media-pool.cfg.5 \
|
media-pool.cfg.5 \
|
||||||
@ -44,23 +48,35 @@ PRUNE_SIMULATOR_FILES := \
|
|||||||
prune-simulator/clear-trigger.png \
|
prune-simulator/clear-trigger.png \
|
||||||
prune-simulator/prune-simulator.js
|
prune-simulator/prune-simulator.js
|
||||||
|
|
||||||
|
PRUNE_SIMULATOR_JS_SOURCE := \
|
||||||
|
/usr/share/javascript/proxmox-widget-toolkit-dev/Toolkit.js \
|
||||||
|
prune-simulator/prune-simulator_source.js
|
||||||
|
|
||||||
|
LTO_BARCODE_JS_SOURCE := \
|
||||||
|
/usr/share/javascript/proxmox-widget-toolkit-dev/Toolkit.js \
|
||||||
|
lto-barcode/code39.js \
|
||||||
|
lto-barcode/prefix-field.js \
|
||||||
|
lto-barcode/label-style.js \
|
||||||
|
lto-barcode/tape-type.js \
|
||||||
|
lto-barcode/paper-size.js \
|
||||||
|
lto-barcode/page-layout.js \
|
||||||
|
lto-barcode/page-calibration.js \
|
||||||
|
lto-barcode/label-list.js \
|
||||||
|
lto-barcode/label-setup.js \
|
||||||
|
lto-barcode/lto-barcode.js
|
||||||
|
|
||||||
LTO_BARCODE_FILES := \
|
LTO_BARCODE_FILES := \
|
||||||
lto-barcode/index.html \
|
lto-barcode/index.html \
|
||||||
lto-barcode/code39.js \
|
lto-barcode/lto-barcode-generator.js
|
||||||
lto-barcode/prefix-field.js \
|
|
||||||
lto-barcode/label-style.js \
|
|
||||||
lto-barcode/tape-type.js \
|
|
||||||
lto-barcode/paper-size.js \
|
|
||||||
lto-barcode/page-layout.js \
|
|
||||||
lto-barcode/page-calibration.js \
|
|
||||||
lto-barcode/label-list.js \
|
|
||||||
lto-barcode/label-setup.js \
|
|
||||||
lto-barcode/lto-barcode.js
|
|
||||||
|
|
||||||
API_VIEWER_SOURCES= \
|
API_VIEWER_SOURCES= \
|
||||||
api-viewer/index.html \
|
api-viewer/index.html \
|
||||||
api-viewer/apidoc.js
|
api-viewer/apidoc.js
|
||||||
|
|
||||||
|
API_VIEWER_FILES := \
|
||||||
|
api-viewer/apidata.js \
|
||||||
|
/usr/share/javascript/proxmox-widget-toolkit-dev/APIViewer.js \
|
||||||
|
|
||||||
# Sphinx documentation setup
|
# Sphinx documentation setup
|
||||||
SPHINXOPTS =
|
SPHINXOPTS =
|
||||||
SPHINXBUILD = sphinx-build
|
SPHINXBUILD = sphinx-build
|
||||||
@ -179,17 +195,38 @@ proxmox-backup-manager.1: proxmox-backup-manager/man1.rst proxmox-backup-manage
|
|||||||
proxmox-backup-proxy.1: proxmox-backup-proxy/man1.rst proxmox-backup-proxy/description.rst
|
proxmox-backup-proxy.1: proxmox-backup-proxy/man1.rst proxmox-backup-proxy/description.rst
|
||||||
rst2man $< >$@
|
rst2man $< >$@
|
||||||
|
|
||||||
|
proxmox-file-restore/synopsis.rst: ${COMPILEDIR}/proxmox-file-restore
|
||||||
|
${COMPILEDIR}/proxmox-file-restore printdoc > proxmox-file-restore/synopsis.rst
|
||||||
|
|
||||||
|
proxmox-file-restore.1: proxmox-file-restore/man1.rst proxmox-file-restore/description.rst proxmox-file-restore/synopsis.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
|
proxmox-backup-debug/synopsis.rst: ${COMPILEDIR}/proxmox-backup-debug
|
||||||
|
${COMPILEDIR}/proxmox-backup-debug printdoc > proxmox-backup-debug/synopsis.rst
|
||||||
|
|
||||||
|
proxmox-backup-debug.1: proxmox-backup-debug/man1.rst proxmox-backup-debug/description.rst proxmox-backup-debug/synopsis.rst
|
||||||
|
rst2man $< >$@
|
||||||
|
|
||||||
.PHONY: onlinehelpinfo
|
.PHONY: onlinehelpinfo
|
||||||
onlinehelpinfo:
|
onlinehelpinfo:
|
||||||
@echo "Generating OnlineHelpInfo.js..."
|
@echo "Generating OnlineHelpInfo.js..."
|
||||||
$(SPHINXBUILD) -b proxmox-scanrefs $(ALLSPHINXOPTS) $(BUILDDIR)/scanrefs
|
$(SPHINXBUILD) -b proxmox-scanrefs -Q $(ALLSPHINXOPTS) $(BUILDDIR)/scanrefs
|
||||||
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
|
@echo "Build finished. OnlineHelpInfo.js is in $(BUILDDIR)/scanrefs."
|
||||||
|
|
||||||
api-viewer/apidata.js: ${COMPILEDIR}/docgen
|
api-viewer/apidata.js: ${COMPILEDIR}/docgen
|
||||||
${COMPILEDIR}/docgen apidata.js >$@
|
${COMPILEDIR}/docgen apidata.js >$@
|
||||||
|
|
||||||
api-viewer/apidoc.js: api-viewer/apidata.js api-viewer/PBSAPI.js
|
api-viewer/apidoc.js: ${API_VIEWER_FILES}
|
||||||
cat api-viewer/apidata.js api-viewer/PBSAPI.js >$@
|
cat ${API_VIEWER_FILES} >$@.tmp
|
||||||
|
mv $@.tmp $@
|
||||||
|
|
||||||
|
prune-simulator/prune-simulator.js: ${PRUNE_SIMULATOR_JS_SOURCE}
|
||||||
|
cat ${PRUNE_SIMULATOR_JS_SOURCE} >$@.tmp
|
||||||
|
mv $@.tmp $@
|
||||||
|
|
||||||
|
lto-barcode/lto-barcode-generator.js: ${LTO_BARCODE_JS_SOURCE}
|
||||||
|
cat ${LTO_BARCODE_JS_SOURCE} >$@.tmp
|
||||||
|
mv $@.tmp $@
|
||||||
|
|
||||||
.PHONY: html
|
.PHONY: html
|
||||||
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES} ${API_VIEWER_SOURCES}
|
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES} ${API_VIEWER_SOURCES}
|
||||||
@ -220,6 +257,7 @@ epub3: ${GENERATED_SYNOPSIS}
|
|||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -r -f *~ *.1 ${BUILDDIR} ${GENERATED_SYNOPSIS} api-viewer/apidata.js
|
rm -r -f *~ *.1 ${BUILDDIR} ${GENERATED_SYNOPSIS} api-viewer/apidata.js
|
||||||
|
rm -f api-viewer/apidoc.js lto-barcode/lto-barcode-generator.js prune-simulator/prune-simulator.js
|
||||||
|
|
||||||
|
|
||||||
install_manual_pages: ${MAN1_PAGES} ${MAN5_PAGES}
|
install_manual_pages: ${MAN1_PAGES} ${MAN5_PAGES}
|
||||||
|
@ -1,511 +0,0 @@
|
|||||||
// avoid errors when running without development tools
|
|
||||||
if (!Ext.isDefined(Ext.global.console)) {
|
|
||||||
var console = {
|
|
||||||
dir: function() {},
|
|
||||||
log: function() {}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
Ext.onReady(function() {
|
|
||||||
|
|
||||||
Ext.define('pve-param-schema', {
|
|
||||||
extend: 'Ext.data.Model',
|
|
||||||
fields: [
|
|
||||||
'name', 'type', 'typetext', 'description', 'verbose_description',
|
|
||||||
'enum', 'minimum', 'maximum', 'minLength', 'maxLength',
|
|
||||||
'pattern', 'title', 'requires', 'format', 'default',
|
|
||||||
'disallow', 'extends', 'links',
|
|
||||||
{
|
|
||||||
name: 'optional',
|
|
||||||
type: 'boolean'
|
|
||||||
}
|
|
||||||
]
|
|
||||||
});
|
|
||||||
|
|
||||||
var store = Ext.define('pve-updated-treestore', {
|
|
||||||
extend: 'Ext.data.TreeStore',
|
|
||||||
model: Ext.define('pve-api-doc', {
|
|
||||||
extend: 'Ext.data.Model',
|
|
||||||
fields: [
|
|
||||||
'path', 'info', 'text',
|
|
||||||
]
|
|
||||||
}),
|
|
||||||
proxy: {
|
|
||||||
type: 'memory',
|
|
||||||
data: pbsapi
|
|
||||||
},
|
|
||||||
sorters: [{
|
|
||||||
property: 'leaf',
|
|
||||||
direction: 'ASC'
|
|
||||||
}, {
|
|
||||||
property: 'text',
|
|
||||||
direction: 'ASC'
|
|
||||||
}],
|
|
||||||
filterer: 'bottomup',
|
|
||||||
doFilter: function(node) {
|
|
||||||
this.filterNodes(node, this.getFilters().getFilterFn(), true);
|
|
||||||
},
|
|
||||||
|
|
||||||
filterNodes: function(node, filterFn, parentVisible) {
|
|
||||||
var me = this,
|
|
||||||
bottomUpFiltering = me.filterer === 'bottomup',
|
|
||||||
match = filterFn(node) && parentVisible || (node.isRoot() && !me.getRootVisible()),
|
|
||||||
childNodes = node.childNodes,
|
|
||||||
len = childNodes && childNodes.length, i, matchingChildren;
|
|
||||||
|
|
||||||
if (len) {
|
|
||||||
for (i = 0; i < len; ++i) {
|
|
||||||
matchingChildren = me.filterNodes(childNodes[i], filterFn, match || bottomUpFiltering) || matchingChildren;
|
|
||||||
}
|
|
||||||
if (bottomUpFiltering) {
|
|
||||||
match = matchingChildren || match;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
node.set("visible", match, me._silentOptions);
|
|
||||||
return match;
|
|
||||||
},
|
|
||||||
|
|
||||||
}).create();
|
|
||||||
|
|
||||||
var render_description = function(value, metaData, record) {
|
|
||||||
var pdef = record.data;
|
|
||||||
|
|
||||||
value = pdef.verbose_description || value;
|
|
||||||
|
|
||||||
// TODO: try to render asciidoc correctly
|
|
||||||
|
|
||||||
metaData.style = 'white-space:pre-wrap;'
|
|
||||||
|
|
||||||
return Ext.htmlEncode(value);
|
|
||||||
};
|
|
||||||
|
|
||||||
var render_type = function(value, metaData, record) {
|
|
||||||
var pdef = record.data;
|
|
||||||
|
|
||||||
return pdef['enum'] ? 'enum' : (pdef.type || 'string');
|
|
||||||
};
|
|
||||||
|
|
||||||
var render_format = function(value, metaData, record) {
|
|
||||||
var pdef = record.data;
|
|
||||||
|
|
||||||
metaData.style = 'white-space:normal;'
|
|
||||||
|
|
||||||
if (pdef.typetext)
|
|
||||||
return Ext.htmlEncode(pdef.typetext);
|
|
||||||
|
|
||||||
if (pdef['enum'])
|
|
||||||
return pdef['enum'].join(' | ');
|
|
||||||
|
|
||||||
if (pdef.format)
|
|
||||||
return pdef.format;
|
|
||||||
|
|
||||||
if (pdef.pattern)
|
|
||||||
return Ext.htmlEncode(pdef.pattern);
|
|
||||||
|
|
||||||
return '';
|
|
||||||
};
|
|
||||||
|
|
||||||
var real_path = function(path) {
|
|
||||||
return path.replace(/^.*\/_upgrade_(\/)?/, "/");
|
|
||||||
};
|
|
||||||
|
|
||||||
var permission_text = function(permission) {
|
|
||||||
let permhtml = "";
|
|
||||||
|
|
||||||
if (permission.user) {
|
|
||||||
if (!permission.description) {
|
|
||||||
if (permission.user === 'world') {
|
|
||||||
permhtml += "Accessible without any authentication.";
|
|
||||||
} else if (permission.user === 'all') {
|
|
||||||
permhtml += "Accessible by all authenticated users.";
|
|
||||||
} else {
|
|
||||||
permhtml += 'Onyl accessible by user "' +
|
|
||||||
permission.user + '"';
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (permission.check) {
|
|
||||||
permhtml += "<pre>Check: " +
|
|
||||||
Ext.htmlEncode(Ext.JSON.encode(permission.check)) + "</pre>";
|
|
||||||
} else if (permission.userParam) {
|
|
||||||
permhtml += `<div>Check if user matches parameter '${permission.userParam}'`;
|
|
||||||
} else if (permission.or) {
|
|
||||||
permhtml += "<div>Or<div style='padding-left: 10px;'>";
|
|
||||||
Ext.Array.each(permission.or, function(sub_permission) {
|
|
||||||
permhtml += permission_text(sub_permission);
|
|
||||||
})
|
|
||||||
permhtml += "</div></div>";
|
|
||||||
} else if (permission.and) {
|
|
||||||
permhtml += "<div>And<div style='padding-left: 10px;'>";
|
|
||||||
Ext.Array.each(permission.and, function(sub_permission) {
|
|
||||||
permhtml += permission_text(sub_permission);
|
|
||||||
})
|
|
||||||
permhtml += "</div></div>";
|
|
||||||
} else {
|
|
||||||
//console.log(permission);
|
|
||||||
permhtml += "Unknown systax!";
|
|
||||||
}
|
|
||||||
|
|
||||||
return permhtml;
|
|
||||||
};
|
|
||||||
|
|
||||||
var render_docu = function(data) {
|
|
||||||
var md = data.info;
|
|
||||||
|
|
||||||
// console.dir(data);
|
|
||||||
|
|
||||||
var items = [];
|
|
||||||
|
|
||||||
var clicmdhash = {
|
|
||||||
GET: 'get',
|
|
||||||
POST: 'create',
|
|
||||||
PUT: 'set',
|
|
||||||
DELETE: 'delete'
|
|
||||||
};
|
|
||||||
|
|
||||||
Ext.Array.each(['GET', 'POST', 'PUT', 'DELETE'], function(method) {
|
|
||||||
var info = md[method];
|
|
||||||
if (info) {
|
|
||||||
|
|
||||||
var usage = "";
|
|
||||||
|
|
||||||
usage += "<table><tr><td>HTTP: </td><td>"
|
|
||||||
+ method + " " + real_path("/api2/json" + data.path) + "</td></tr>";
|
|
||||||
|
|
||||||
var sections = [
|
|
||||||
{
|
|
||||||
title: 'Description',
|
|
||||||
html: Ext.htmlEncode(info.description),
|
|
||||||
bodyPadding: 10
|
|
||||||
},
|
|
||||||
{
|
|
||||||
title: 'Usage',
|
|
||||||
html: usage,
|
|
||||||
bodyPadding: 10
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
if (info.parameters && info.parameters.properties) {
|
|
||||||
|
|
||||||
var pstore = Ext.create('Ext.data.Store', {
|
|
||||||
model: 'pve-param-schema',
|
|
||||||
proxy: {
|
|
||||||
type: 'memory'
|
|
||||||
},
|
|
||||||
groupField: 'optional',
|
|
||||||
sorters: [
|
|
||||||
{
|
|
||||||
property: 'name',
|
|
||||||
direction: 'ASC'
|
|
||||||
}
|
|
||||||
]
|
|
||||||
});
|
|
||||||
|
|
||||||
Ext.Object.each(info.parameters.properties, function(name, pdef) {
|
|
||||||
pdef.name = name;
|
|
||||||
pstore.add(pdef);
|
|
||||||
});
|
|
||||||
|
|
||||||
pstore.sort();
|
|
||||||
|
|
||||||
var groupingFeature = Ext.create('Ext.grid.feature.Grouping',{
|
|
||||||
enableGroupingMenu: false,
|
|
||||||
groupHeaderTpl: '<tpl if="groupValue">Optional</tpl><tpl if="!groupValue">Required</tpl>'
|
|
||||||
});
|
|
||||||
|
|
||||||
sections.push({
|
|
||||||
xtype: 'gridpanel',
|
|
||||||
title: 'Parameters',
|
|
||||||
features: [groupingFeature],
|
|
||||||
store: pstore,
|
|
||||||
viewConfig: {
|
|
||||||
trackOver: false,
|
|
||||||
stripeRows: true
|
|
||||||
},
|
|
||||||
columns: [
|
|
||||||
{
|
|
||||||
header: 'Name',
|
|
||||||
dataIndex: 'name',
|
|
||||||
flex: 1
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: 'Type',
|
|
||||||
dataIndex: 'type',
|
|
||||||
renderer: render_type,
|
|
||||||
flex: 1
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: 'Default',
|
|
||||||
dataIndex: 'default',
|
|
||||||
flex: 1
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: 'Format',
|
|
||||||
dataIndex: 'type',
|
|
||||||
renderer: render_format,
|
|
||||||
flex: 2
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: 'Description',
|
|
||||||
dataIndex: 'description',
|
|
||||||
renderer: render_description,
|
|
||||||
flex: 6
|
|
||||||
}
|
|
||||||
]
|
|
||||||
});
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if (info.returns) {
|
|
||||||
|
|
||||||
var retinf = info.returns;
|
|
||||||
var rtype = retinf.type;
|
|
||||||
if (!rtype && retinf.items)
|
|
||||||
rtype = 'array';
|
|
||||||
if (!rtype)
|
|
||||||
rtype = 'object';
|
|
||||||
|
|
||||||
var rpstore = Ext.create('Ext.data.Store', {
|
|
||||||
model: 'pve-param-schema',
|
|
||||||
proxy: {
|
|
||||||
type: 'memory'
|
|
||||||
},
|
|
||||||
groupField: 'optional',
|
|
||||||
sorters: [
|
|
||||||
{
|
|
||||||
property: 'name',
|
|
||||||
direction: 'ASC'
|
|
||||||
}
|
|
||||||
]
|
|
||||||
});
|
|
||||||
|
|
||||||
var properties;
|
|
||||||
if (rtype === 'array' && retinf.items.properties) {
|
|
||||||
properties = retinf.items.properties;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rtype === 'object' && retinf.properties) {
|
|
||||||
properties = retinf.properties;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ext.Object.each(properties, function(name, pdef) {
|
|
||||||
pdef.name = name;
|
|
||||||
rpstore.add(pdef);
|
|
||||||
});
|
|
||||||
|
|
||||||
rpstore.sort();
|
|
||||||
|
|
||||||
var groupingFeature = Ext.create('Ext.grid.feature.Grouping',{
|
|
||||||
enableGroupingMenu: false,
|
|
||||||
groupHeaderTpl: '<tpl if="groupValue">Optional</tpl><tpl if="!groupValue">Obligatory</tpl>'
|
|
||||||
});
|
|
||||||
var returnhtml;
|
|
||||||
if (retinf.items) {
|
|
||||||
returnhtml = '<pre>items: ' + Ext.htmlEncode(JSON.stringify(retinf.items, null, 4)) + '</pre>';
|
|
||||||
}
|
|
||||||
|
|
||||||
if (retinf.properties) {
|
|
||||||
returnhtml = returnhtml || '';
|
|
||||||
returnhtml += '<pre>properties:' + Ext.htmlEncode(JSON.stringify(retinf.properties, null, 4)) + '</pre>';
|
|
||||||
}
|
|
||||||
|
|
||||||
var rawSection = Ext.create('Ext.panel.Panel', {
|
|
||||||
bodyPadding: '0px 10px 10px 10px',
|
|
||||||
html: returnhtml,
|
|
||||||
hidden: true
|
|
||||||
});
|
|
||||||
|
|
||||||
sections.push({
|
|
||||||
xtype: 'gridpanel',
|
|
||||||
title: 'Returns: ' + rtype,
|
|
||||||
features: [groupingFeature],
|
|
||||||
store: rpstore,
|
|
||||||
viewConfig: {
|
|
||||||
trackOver: false,
|
|
||||||
stripeRows: true
|
|
||||||
},
|
|
||||||
columns: [
|
|
||||||
{
|
|
||||||
header: 'Name',
|
|
||||||
dataIndex: 'name',
|
|
||||||
flex: 1
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: 'Type',
|
|
||||||
dataIndex: 'type',
|
|
||||||
renderer: render_type,
|
|
||||||
flex: 1
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: 'Default',
|
|
||||||
dataIndex: 'default',
|
|
||||||
flex: 1
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: 'Format',
|
|
||||||
dataIndex: 'type',
|
|
||||||
renderer: render_format,
|
|
||||||
flex: 2
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: 'Description',
|
|
||||||
dataIndex: 'description',
|
|
||||||
renderer: render_description,
|
|
||||||
flex: 6
|
|
||||||
}
|
|
||||||
],
|
|
||||||
bbar: [
|
|
||||||
{
|
|
||||||
xtype: 'button',
|
|
||||||
text: 'Show RAW',
|
|
||||||
handler: function(btn) {
|
|
||||||
rawSection.setVisible(!rawSection.isVisible());
|
|
||||||
btn.setText(rawSection.isVisible() ? 'Hide RAW' : 'Show RAW');
|
|
||||||
}}
|
|
||||||
]
|
|
||||||
});
|
|
||||||
|
|
||||||
sections.push(rawSection);
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!data.path.match(/\/_upgrade_/)) {
|
|
||||||
var permhtml = '';
|
|
||||||
|
|
||||||
if (!info.permissions) {
|
|
||||||
permhtml = "Root only.";
|
|
||||||
} else {
|
|
||||||
if (info.permissions.description) {
|
|
||||||
permhtml += "<div style='white-space:pre-wrap;padding-bottom:10px;'>" +
|
|
||||||
Ext.htmlEncode(info.permissions.description) + "</div>";
|
|
||||||
}
|
|
||||||
permhtml += permission_text(info.permissions);
|
|
||||||
}
|
|
||||||
|
|
||||||
// we do not have this information for PBS api
|
|
||||||
//if (!info.allowtoken) {
|
|
||||||
// permhtml += "<br />This API endpoint is not available for API tokens."
|
|
||||||
//}
|
|
||||||
|
|
||||||
sections.push({
|
|
||||||
title: 'Required permissions',
|
|
||||||
bodyPadding: 10,
|
|
||||||
html: permhtml
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
items.push({
|
|
||||||
title: method,
|
|
||||||
autoScroll: true,
|
|
||||||
defaults: {
|
|
||||||
border: false
|
|
||||||
},
|
|
||||||
items: sections
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
var ct = Ext.getCmp('docview');
|
|
||||||
ct.setTitle("Path: " + real_path(data.path));
|
|
||||||
ct.removeAll(true);
|
|
||||||
ct.add(items);
|
|
||||||
ct.setActiveTab(0);
|
|
||||||
};
|
|
||||||
|
|
||||||
Ext.define('Ext.form.SearchField', {
|
|
||||||
extend: 'Ext.form.field.Text',
|
|
||||||
alias: 'widget.searchfield',
|
|
||||||
|
|
||||||
emptyText: 'Search...',
|
|
||||||
|
|
||||||
flex: 1,
|
|
||||||
|
|
||||||
inputType: 'search',
|
|
||||||
listeners: {
|
|
||||||
'change': function(){
|
|
||||||
|
|
||||||
var value = this.getValue();
|
|
||||||
if (!Ext.isEmpty(value)) {
|
|
||||||
store.filter({
|
|
||||||
property: 'path',
|
|
||||||
value: value,
|
|
||||||
anyMatch: true
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
store.clearFilter();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
var tree = Ext.create('Ext.tree.Panel', {
|
|
||||||
title: 'Resource Tree',
|
|
||||||
tbar: [
|
|
||||||
{
|
|
||||||
xtype: 'searchfield',
|
|
||||||
}
|
|
||||||
],
|
|
||||||
tools: [
|
|
||||||
{
|
|
||||||
type: 'expand',
|
|
||||||
tooltip: 'Expand all',
|
|
||||||
tooltipType: 'title',
|
|
||||||
callback: (tree) => tree.expandAll(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
type: 'collapse',
|
|
||||||
tooltip: 'Collapse all',
|
|
||||||
tooltipType: 'title',
|
|
||||||
callback: (tree) => tree.collapseAll(),
|
|
||||||
},
|
|
||||||
],
|
|
||||||
store: store,
|
|
||||||
width: 200,
|
|
||||||
region: 'west',
|
|
||||||
split: true,
|
|
||||||
margins: '5 0 5 5',
|
|
||||||
rootVisible: false,
|
|
||||||
listeners: {
|
|
||||||
selectionchange: function(v, selections) {
|
|
||||||
if (!selections[0])
|
|
||||||
return;
|
|
||||||
var rec = selections[0];
|
|
||||||
render_docu(rec.data);
|
|
||||||
location.hash = '#' + rec.data.path;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
Ext.create('Ext.container.Viewport', {
|
|
||||||
layout: 'border',
|
|
||||||
renderTo: Ext.getBody(),
|
|
||||||
items: [
|
|
||||||
tree,
|
|
||||||
{
|
|
||||||
xtype: 'tabpanel',
|
|
||||||
title: 'Documentation',
|
|
||||||
id: 'docview',
|
|
||||||
region: 'center',
|
|
||||||
margins: '5 5 5 0',
|
|
||||||
layout: 'fit',
|
|
||||||
items: []
|
|
||||||
}
|
|
||||||
]
|
|
||||||
});
|
|
||||||
|
|
||||||
var deepLink = function() {
|
|
||||||
var path = window.location.hash.substring(1).replace(/\/\s*$/, '')
|
|
||||||
var endpoint = store.findNode('path', path);
|
|
||||||
|
|
||||||
if (endpoint) {
|
|
||||||
tree.getSelectionModel().select(endpoint);
|
|
||||||
tree.expandPath(endpoint.getPath());
|
|
||||||
render_docu(endpoint.data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
window.onhashchange = deepLink;
|
|
||||||
|
|
||||||
deepLink();
|
|
||||||
|
|
||||||
});
|
|
@ -3,9 +3,10 @@ Backup Client Usage
|
|||||||
|
|
||||||
The command line client is called :command:`proxmox-backup-client`.
|
The command line client is called :command:`proxmox-backup-client`.
|
||||||
|
|
||||||
|
.. _client_repository:
|
||||||
|
|
||||||
Repository Locations
|
Backup Repository Locations
|
||||||
--------------------
|
---------------------------
|
||||||
|
|
||||||
The client uses the following notation to specify a datastore repository
|
The client uses the following notation to specify a datastore repository
|
||||||
on the backup server.
|
on the backup server.
|
||||||
@ -48,15 +49,31 @@ Environment Variables
|
|||||||
When set, this value is used for the password required for the backup server.
|
When set, this value is used for the password required for the backup server.
|
||||||
You can also set this to a API token secret.
|
You can also set this to a API token secret.
|
||||||
|
|
||||||
|
``PBS_PASSWORD_FD``, ``PBS_PASSWORD_FILE``, ``PBS_PASSWORD_CMD``
|
||||||
|
Like ``PBS_PASSWORD``, but read data from an open file descriptor, a file
|
||||||
|
name or from the `stdout` of a command, respectively. The first defined
|
||||||
|
environment variable from the order above is preferred.
|
||||||
|
|
||||||
``PBS_ENCRYPTION_PASSWORD``
|
``PBS_ENCRYPTION_PASSWORD``
|
||||||
When set, this value is used to access the secret encryption key (if
|
When set, this value is used to access the secret encryption key (if
|
||||||
protected by password).
|
protected by password).
|
||||||
|
|
||||||
|
``PBS_ENCRYPTION_PASSWORD_FD``, ``PBS_ENCRYPTION_PASSWORD_FILE``, ``PBS_ENCRYPTION_PASSWORD_CMD``
|
||||||
|
Like ``PBS_ENCRYPTION_PASSWORD``, but read data from an open file descriptor,
|
||||||
|
a file name or from the `stdout` of a command, respectively. The first
|
||||||
|
defined environment variable from the order above is preferred.
|
||||||
|
|
||||||
``PBS_FINGERPRINT`` When set, this value is used to verify the server
|
``PBS_FINGERPRINT`` When set, this value is used to verify the server
|
||||||
certificate (only used if the system CA certificates cannot validate the
|
certificate (only used if the system CA certificates cannot validate the
|
||||||
certificate).
|
certificate).
|
||||||
|
|
||||||
|
|
||||||
|
.. Note:: Passwords must be valid UTF8 an may not contain
|
||||||
|
newlines. For your convienience, we just use the first line as
|
||||||
|
password, so you can add arbitrary comments after the
|
||||||
|
first newline.
|
||||||
|
|
||||||
|
|
||||||
Output Format
|
Output Format
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
@ -471,7 +488,7 @@ located in ``/etc``, you could do the following:
|
|||||||
pxar:/ > restore target/ --pattern etc/**/*.conf
|
pxar:/ > restore target/ --pattern etc/**/*.conf
|
||||||
...
|
...
|
||||||
|
|
||||||
The above will scan trough all the directories below ``/etc`` and restore all
|
The above will scan through all the directories below ``/etc`` and restore all
|
||||||
files ending in ``.conf``.
|
files ending in ``.conf``.
|
||||||
|
|
||||||
.. todo:: Explain interactive restore in more detail
|
.. todo:: Explain interactive restore in more detail
|
||||||
@ -691,8 +708,15 @@ Benchmarking
|
|||||||
------------
|
------------
|
||||||
|
|
||||||
The backup client also comes with a benchmarking tool. This tool measures
|
The backup client also comes with a benchmarking tool. This tool measures
|
||||||
various metrics relating to compression and encryption speeds. You can run a
|
various metrics relating to compression and encryption speeds. If a Proxmox
|
||||||
benchmark using the ``benchmark`` subcommand of ``proxmox-backup-client``:
|
Backup repository (remote or local) is specified, the TLS upload speed will get
|
||||||
|
measured too.
|
||||||
|
|
||||||
|
You can run a benchmark using the ``benchmark`` subcommand of
|
||||||
|
``proxmox-backup-client``:
|
||||||
|
|
||||||
|
.. note:: The TLS speed test is only included if a :ref:`backup server
|
||||||
|
repository is specified <client_repository>`.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -723,8 +747,7 @@ benchmark using the ``benchmark`` subcommand of ``proxmox-backup-client``:
|
|||||||
|
|
||||||
|
|
||||||
.. note:: The percentages given in the output table correspond to a
|
.. note:: The percentages given in the output table correspond to a
|
||||||
comparison against a Ryzen 7 2700X. The TLS test connects to the
|
comparison against a Ryzen 7 2700X.
|
||||||
local host, so there is no network involved.
|
|
||||||
|
|
||||||
You can also pass the ``--output-format`` parameter to output stats in ``json``,
|
You can also pass the ``--output-format`` parameter to output stats in ``json``,
|
||||||
rather than the default table format.
|
rather than the default table format.
|
||||||
|
@ -6,6 +6,11 @@ Command Line Tools
|
|||||||
|
|
||||||
.. include:: proxmox-backup-client/description.rst
|
.. include:: proxmox-backup-client/description.rst
|
||||||
|
|
||||||
|
``proxmox-file-restore``
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. include:: proxmox-file-restore/description.rst
|
||||||
|
|
||||||
``proxmox-backup-manager``
|
``proxmox-backup-manager``
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
@ -16,3 +21,7 @@ Command Line Tools
|
|||||||
|
|
||||||
.. include:: pxar/description.rst
|
.. include:: pxar/description.rst
|
||||||
|
|
||||||
|
``proxmox-backup-debug``
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
.. include:: proxmox-backup-debug/description.rst
|
||||||
|
@ -26,6 +26,27 @@ Those command are available when you start an interactive restore shell:
|
|||||||
.. include:: proxmox-backup-manager/synopsis.rst
|
.. include:: proxmox-backup-manager/synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
|
``proxmox-tape``
|
||||||
|
----------------
|
||||||
|
|
||||||
|
.. include:: proxmox-tape/synopsis.rst
|
||||||
|
|
||||||
|
``pmt``
|
||||||
|
-------
|
||||||
|
|
||||||
|
.. include:: pmt/options.rst
|
||||||
|
|
||||||
|
....
|
||||||
|
|
||||||
|
.. include:: pmt/synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
|
``pmtx``
|
||||||
|
--------
|
||||||
|
|
||||||
|
.. include:: pmtx/synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
``pxar``
|
``pxar``
|
||||||
--------
|
--------
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ PygmentsBridge.latex_formatter = CustomLatexFormatter
|
|||||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||||
# ones.
|
# ones.
|
||||||
|
|
||||||
extensions = ["sphinx.ext.graphviz", "sphinx.ext.todo", "proxmox-scanrefs"]
|
extensions = ["sphinx.ext.graphviz", 'sphinx.ext.mathjax', "sphinx.ext.todo", "proxmox-scanrefs"]
|
||||||
|
|
||||||
todo_link_only = True
|
todo_link_only = True
|
||||||
|
|
||||||
@ -307,6 +307,9 @@ html_show_sourcelink = False
|
|||||||
# Output file base name for HTML help builder.
|
# Output file base name for HTML help builder.
|
||||||
htmlhelp_basename = 'ProxmoxBackupdoc'
|
htmlhelp_basename = 'ProxmoxBackupdoc'
|
||||||
|
|
||||||
|
# use local mathjax package, symlink comes from debian/proxmox-backup-docs.links
|
||||||
|
mathjax_path = "mathjax/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
|
||||||
|
|
||||||
# -- Options for LaTeX output ---------------------------------------------
|
# -- Options for LaTeX output ---------------------------------------------
|
||||||
|
|
||||||
latex_engine = 'xelatex'
|
latex_engine = 'xelatex'
|
||||||
@ -464,6 +467,3 @@ epub_exclude_files = ['search.html']
|
|||||||
# If false, no index is generated.
|
# If false, no index is generated.
|
||||||
#
|
#
|
||||||
# epub_use_index = True
|
# epub_use_index = True
|
||||||
|
|
||||||
# use local mathjax package, symlink comes from debian/proxmox-backup-docs.links
|
|
||||||
mathjax_path = "mathjax/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
Each drive configuration section starts with a header ``linux: <name>``,
|
Each LTO drive configuration section starts with a header ``lto: <name>``,
|
||||||
followed by the drive configuration options.
|
followed by the drive configuration options.
|
||||||
|
|
||||||
Tape changer configurations starts with ``changer: <name>``,
|
Tape changer configurations starts with ``changer: <name>``,
|
||||||
@ -6,7 +6,7 @@ followed by the changer configuration options.
|
|||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
linux: hh8
|
lto: hh8
|
||||||
changer sl3
|
changer sl3
|
||||||
path /dev/tape/by-id/scsi-10WT065325-nst
|
path /dev/tape/by-id/scsi-10WT065325-nst
|
||||||
|
|
||||||
|
@ -37,8 +37,53 @@ Options
|
|||||||
.. include:: config/datastore/config.rst
|
.. include:: config/datastore/config.rst
|
||||||
|
|
||||||
|
|
||||||
|
``media-pool.cfg``
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/media-pool/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/media-pool/config.rst
|
||||||
|
|
||||||
|
|
||||||
|
``tape.cfg``
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/tape/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/tape/config.rst
|
||||||
|
|
||||||
|
|
||||||
|
``tape-job.cfg``
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/tape-job/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/tape-job/config.rst
|
||||||
|
|
||||||
|
|
||||||
``user.cfg``
|
``user.cfg``
|
||||||
~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
File Format
|
File Format
|
||||||
^^^^^^^^^^^
|
^^^^^^^^^^^
|
||||||
|
@ -57,6 +57,11 @@ div.sphinxsidebar h3 {
|
|||||||
div.sphinxsidebar h1.logo-name {
|
div.sphinxsidebar h1.logo-name {
|
||||||
display: none;
|
display: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
div.document, div.footer {
|
||||||
|
width: min(100%, 1320px);
|
||||||
|
}
|
||||||
|
|
||||||
@media screen and (max-width: 875px) {
|
@media screen and (max-width: 875px) {
|
||||||
div.sphinxsidebar p.logo {
|
div.sphinxsidebar p.logo {
|
||||||
display: initial;
|
display: initial;
|
||||||
@ -65,9 +70,19 @@ div.sphinxsidebar h1.logo-name {
|
|||||||
display: block;
|
display: block;
|
||||||
}
|
}
|
||||||
div.sphinxsidebar span {
|
div.sphinxsidebar span {
|
||||||
color: #AAA;
|
color: #EEE;
|
||||||
}
|
}
|
||||||
ul li.toctree-l1 > a {
|
.sphinxsidebar ul li.toctree-l1 > a, div.sphinxsidebar a {
|
||||||
color: #FFF;
|
color: #FFF;
|
||||||
}
|
}
|
||||||
|
div.sphinxsidebar {
|
||||||
|
background-color: #555;
|
||||||
|
}
|
||||||
|
div.body {
|
||||||
|
min-width: 300px;
|
||||||
|
}
|
||||||
|
div.footer {
|
||||||
|
display: block;
|
||||||
|
margin: 15px auto 0px auto;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
16
docs/faq.rst
@ -24,11 +24,13 @@ future plans to support 32-bit processors.
|
|||||||
How long will my Proxmox Backup Server version be supported?
|
How long will my Proxmox Backup Server version be supported?
|
||||||
------------------------------------------------------------
|
------------------------------------------------------------
|
||||||
|
|
||||||
+-----------------------+--------------------+---------------+------------+--------------------+
|
+-----------------------+----------------------+---------------+------------+--------------------+
|
||||||
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
|
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
|
||||||
+=======================+====================+===============+============+====================+
|
+=======================+======================+===============+============+====================+
|
||||||
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | tba | tba |
|
|Proxmox Backup 2.x | Debian 11 (Bullseye) | 2021-07 | tba | tba |
|
||||||
+-----------------------+--------------------+---------------+------------+--------------------+
|
+-----------------------+----------------------+---------------+------------+--------------------+
|
||||||
|
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | ~Q2/2022 | Q2-Q3/2022 |
|
||||||
|
+-----------------------+----------------------+---------------+------------+--------------------+
|
||||||
|
|
||||||
|
|
||||||
Can I copy or synchronize my datastore to another location?
|
Can I copy or synchronize my datastore to another location?
|
||||||
@ -61,9 +63,7 @@ attacker gains access to the server or any point of the network, they will not
|
|||||||
be able to read the data.
|
be able to read the data.
|
||||||
|
|
||||||
.. note:: Encryption is not enabled by default. To set up encryption, see the
|
.. note:: Encryption is not enabled by default. To set up encryption, see the
|
||||||
`Encryption
|
:ref:`backup client encryption section <client_encryption>`.
|
||||||
<https://pbs.proxmox.com/docs/administration-guide.html#encryption>`_ section
|
|
||||||
of the Proxmox Backup Server Administration Guide.
|
|
||||||
|
|
||||||
|
|
||||||
Is the backup incremental/deduplicated?
|
Is the backup incremental/deduplicated?
|
||||||
|
@ -51,7 +51,7 @@ data:
|
|||||||
|
|
||||||
* - ``MAGIC: [u8; 8]``
|
* - ``MAGIC: [u8; 8]``
|
||||||
* - ``CRC32: [u8; 4]``
|
* - ``CRC32: [u8; 4]``
|
||||||
* - ``ÌV: [u8; 16]``
|
* - ``IV: [u8; 16]``
|
||||||
* - ``TAG: [u8; 16]``
|
* - ``TAG: [u8; 16]``
|
||||||
* - ``Data: (max 16MiB)``
|
* - ``Data: (max 16MiB)``
|
||||||
|
|
||||||
|
12
docs/gui.rst
@ -112,6 +112,18 @@ The administration menu item also contains a disk management subsection:
|
|||||||
* **Directory**: Create and view information on *ext4* and *xfs* disks
|
* **Directory**: Create and view information on *ext4* and *xfs* disks
|
||||||
* **ZFS**: Create and view information on *ZFS* disks
|
* **ZFS**: Create and view information on *ZFS* disks
|
||||||
|
|
||||||
|
Tape Backup
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-changer-overview.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Tape changer overview
|
||||||
|
|
||||||
|
The `Tape Backup`_ section contains a top panel, managing tape media sets,
|
||||||
|
inventories, drives, changers and the tape backup jobs itself.
|
||||||
|
|
||||||
|
It also contains a subsection per standalone drive and per changer, with a
|
||||||
|
status and management view for those devices.
|
||||||
|
|
||||||
Datastore
|
Datastore
|
||||||
^^^^^^^^^
|
^^^^^^^^^
|
||||||
|
BIN
docs/images/screenshots/pbs-gui-tape-backup-jobs-add.png
Normal file
After Width: | Height: | Size: 28 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-backup-jobs.png
Normal file
After Width: | Height: | Size: 75 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-changer-overview.png
Normal file
After Width: | Height: | Size: 117 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-changers-add.png
Normal file
After Width: | Height: | Size: 12 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-changers.png
Normal file
After Width: | Height: | Size: 79 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-crypt-keys.png
Normal file
After Width: | Height: | Size: 72 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-drives-add.png
Normal file
After Width: | Height: | Size: 13 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-drives.png
Normal file
After Width: | Height: | Size: 112 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-pools-add.png
Normal file
After Width: | Height: | Size: 18 KiB |
BIN
docs/images/screenshots/pbs-gui-tape-pools.png
Normal file
After Width: | Height: | Size: 70 KiB |
@ -25,14 +25,15 @@ in the section entitled "GNU Free Documentation License".
|
|||||||
terminology.rst
|
terminology.rst
|
||||||
gui.rst
|
gui.rst
|
||||||
storage.rst
|
storage.rst
|
||||||
network-management.rst
|
|
||||||
user-management.rst
|
user-management.rst
|
||||||
managing-remotes.rst
|
|
||||||
maintenance.rst
|
|
||||||
backup-client.rst
|
backup-client.rst
|
||||||
pve-integration.rst
|
pve-integration.rst
|
||||||
pxar-tool.rst
|
pxar-tool.rst
|
||||||
|
tape-backup.rst
|
||||||
|
managing-remotes.rst
|
||||||
|
maintenance.rst
|
||||||
sysadmin.rst
|
sysadmin.rst
|
||||||
|
network-management.rst
|
||||||
technical-overview.rst
|
technical-overview.rst
|
||||||
faq.rst
|
faq.rst
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@ for various management tasks such as disk management.
|
|||||||
`Proxmox Backup`_ without the server part.
|
`Proxmox Backup`_ without the server part.
|
||||||
|
|
||||||
The disk image (ISO file) provided by Proxmox includes a complete Debian system
|
The disk image (ISO file) provided by Proxmox includes a complete Debian system
|
||||||
("buster" for version 1.x) as well as all necessary packages for the `Proxmox Backup`_ server.
|
as well as all necessary packages for the `Proxmox Backup`_ server.
|
||||||
|
|
||||||
The installer will guide you through the setup process and allow
|
The installer will guide you through the setup process and allow
|
||||||
you to partition the local disk(s), apply basic system configurations
|
you to partition the local disk(s), apply basic system configurations
|
||||||
@ -113,9 +113,9 @@ Client Installation
|
|||||||
Install `Proxmox Backup`_ Client on Debian
|
Install `Proxmox Backup`_ Client on Debian
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Proxmox ships as a set of Debian packages to be installed on
|
Proxmox ships as a set of Debian packages to be installed on top of a standard
|
||||||
top of a standard Debian installation. After configuring the
|
Debian installation. After configuring the :ref:`package_repositories_client_only_apt`,
|
||||||
:ref:`sysadmin_package_repositories`, you need to run:
|
you need to run:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -123,12 +123,6 @@ top of a standard Debian installation. After configuring the
|
|||||||
# apt-get install proxmox-backup-client
|
# apt-get install proxmox-backup-client
|
||||||
|
|
||||||
|
|
||||||
Installing from source
|
.. note:: The client-only repository should be usable by most recent Debian and
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
Ubuntu derivatives.
|
||||||
|
|
||||||
.. todo:: Add section "Installing from source"
|
|
||||||
|
|
||||||
Installing statically linked binary
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
.. todo:: Add section "Installing statically linked binary"
|
|
||||||
|
@ -76,7 +76,7 @@ Main Features
|
|||||||
:Open Source: No secrets. Proxmox Backup Server is free and open-source
|
:Open Source: No secrets. Proxmox Backup Server is free and open-source
|
||||||
software. The source code is licensed under AGPL, v3.
|
software. The source code is licensed under AGPL, v3.
|
||||||
|
|
||||||
:No Limits: Proxmox Backup Server has no artifical limits for backup storage or
|
:No Limits: Proxmox Backup Server has no artificial limits for backup storage or
|
||||||
backup-clients.
|
backup-clients.
|
||||||
|
|
||||||
:Enterprise Support: Proxmox Server Solutions GmbH offers enterprise support in
|
:Enterprise Support: Proxmox Server Solutions GmbH offers enterprise support in
|
||||||
@ -149,8 +149,8 @@ Enterprise Support
|
|||||||
|
|
||||||
Users with a `Proxmox Backup Server Basic, Standard or Premium Subscription Plan
|
Users with a `Proxmox Backup Server Basic, Standard or Premium Subscription Plan
|
||||||
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_ have access to the
|
<https://www.proxmox.com/en/proxmox-backup-server/pricing>`_ have access to the
|
||||||
Proxmox Customer Portal. The Customer Portal provides support with guaranteed
|
`Proxmox Customer Portal <https://my.proxmox.com>`_. The customer portal
|
||||||
response times from the Proxmox developers.
|
provides support with guaranteed response times from the Proxmox developers.
|
||||||
For more information or for volume discounts, please contact office@proxmox.com.
|
For more information or for volume discounts, please contact office@proxmox.com.
|
||||||
|
|
||||||
Community Support Forum
|
Community Support Forum
|
||||||
|
@ -34,17 +34,7 @@
|
|||||||
</style>
|
</style>
|
||||||
<link rel="stylesheet" type="text/css" href="font-awesome/css/font-awesome.css"/>
|
<link rel="stylesheet" type="text/css" href="font-awesome/css/font-awesome.css"/>
|
||||||
<script type="text/javascript" src="extjs/ext-all.js"></script>
|
<script type="text/javascript" src="extjs/ext-all.js"></script>
|
||||||
|
<script type="text/javascript" src="lto-barcode-generator.js"></script>
|
||||||
<script type="text/javascript" src="code39.js"></script>
|
|
||||||
<script type="text/javascript" src="prefix-field.js"></script>
|
|
||||||
<script type="text/javascript" src="label-style.js"></script>
|
|
||||||
<script type="text/javascript" src="tape-type.js"></script>
|
|
||||||
<script type="text/javascript" src="paper-size.js"></script>
|
|
||||||
<script type="text/javascript" src="page-layout.js"></script>
|
|
||||||
<script type="text/javascript" src="page-calibration.js"></script>
|
|
||||||
<script type="text/javascript" src="label-list.js"></script>
|
|
||||||
<script type="text/javascript" src="label-setup.js"></script>
|
|
||||||
<script type="text/javascript" src="lto-barcode.js"></script>
|
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
</body>
|
</body>
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
// FIXME: HACK! Makes scrolling in number spinner work again. fixed in ExtJS >= 6.1
|
// for toolkit.js
|
||||||
if (Ext.isFirefox) {
|
function gettext(val) { return val; };
|
||||||
Ext.$eventNameMap.DOMMouseScroll = 'DOMMouseScroll';
|
|
||||||
}
|
|
||||||
|
|
||||||
function draw_labels(target_id, label_list, page_layout, calibration) {
|
function draw_labels(target_id, label_list, page_layout, calibration) {
|
||||||
let max_labels = compute_max_labels(page_layout);
|
let max_labels = compute_max_labels(page_layout);
|
||||||
|
@ -148,7 +148,7 @@ are checked again. The interface for creating verify jobs can be found under the
|
|||||||
**Verify Jobs** tab of the datastore.
|
**Verify Jobs** tab of the datastore.
|
||||||
|
|
||||||
.. Note:: It is recommended that you reverify all backups at least monthly, even
|
.. Note:: It is recommended that you reverify all backups at least monthly, even
|
||||||
if a previous verification was successful. This is becuase physical drives
|
if a previous verification was successful. This is because physical drives
|
||||||
are susceptible to damage over time, which can cause an old, working backup
|
are susceptible to damage over time, which can cause an old, working backup
|
||||||
to become corrupted in a process known as `bit rot/data degradation
|
to become corrupted in a process known as `bit rot/data degradation
|
||||||
<https://en.wikipedia.org/wiki/Data_degradation>`_. It is good practice to
|
<https://en.wikipedia.org/wiki/Data_degradation>`_. It is good practice to
|
||||||
|
@ -17,18 +17,18 @@ update``.
|
|||||||
.. code-block:: sources.list
|
.. code-block:: sources.list
|
||||||
:caption: File: ``/etc/apt/sources.list``
|
:caption: File: ``/etc/apt/sources.list``
|
||||||
|
|
||||||
deb http://ftp.debian.org/debian buster main contrib
|
deb http://ftp.debian.org/debian bullseye main contrib
|
||||||
deb http://ftp.debian.org/debian buster-updates main contrib
|
deb http://ftp.debian.org/debian bullseye-updates main contrib
|
||||||
|
|
||||||
# security updates
|
# security updates
|
||||||
deb http://security.debian.org/debian-security buster/updates main contrib
|
deb http://security.debian.org/debian-security bullseye-security main contrib
|
||||||
|
|
||||||
|
|
||||||
.. FIXME for 7.0: change security update suite to bullseye-security
|
|
||||||
|
|
||||||
In addition, you need a package repository from Proxmox to get Proxmox Backup
|
In addition, you need a package repository from Proxmox to get Proxmox Backup
|
||||||
updates.
|
updates.
|
||||||
|
|
||||||
|
.. _package_repos_secure_apt:
|
||||||
|
|
||||||
SecureApt
|
SecureApt
|
||||||
~~~~~~~~~
|
~~~~~~~~~
|
||||||
|
|
||||||
@ -43,31 +43,21 @@ key with the following commands:
|
|||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# wget http://download.proxmox.com/debian/proxmox-ve-release-6.x.gpg -O /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
# wget https://enterprise.proxmox.com/debian/proxmox-release-bullseye.gpg -O /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
|
||||||
|
|
||||||
Verify the SHA512 checksum afterwards with:
|
Verify the SHA512 checksum afterwards with the expected output below:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# sha512sum /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
# sha512sum /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
|
||||||
|
7fb03ec8a1675723d2853b84aa4fdb49a46a3bb72b9951361488bfd19b29aab0a789a4f8c7406e71a69aabbc727c936d3549731c4659ffa1a08f44db8fdcebfa /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
|
||||||
|
|
||||||
The output should be:
|
and the md5sum, with the expected output below:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
acca6f416917e8e11490a08a1e2842d500b3a5d9f322c6319db0927b2901c3eae23cfb5cd5df6facf2b57399d3cfa52ad7769ebdd75d9b204549ca147da52626 /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
# md5sum /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
|
||||||
|
bcc35c7173e0845c0d6ad6470b70f50e /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
|
||||||
and the md5sum:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
# md5sum /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
|
||||||
|
|
||||||
Here, the output should be:
|
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
f3f6c5a3a67baf38ad178e5ff1ee270c /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
|
||||||
|
|
||||||
.. _sysadmin_package_repos_enterprise:
|
.. _sysadmin_package_repos_enterprise:
|
||||||
|
|
||||||
@ -82,7 +72,7 @@ enabled by default:
|
|||||||
.. code-block:: sources.list
|
.. code-block:: sources.list
|
||||||
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
|
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
|
||||||
|
|
||||||
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
deb https://enterprise.proxmox.com/debian/pbs bullseye pbs-enterprise
|
||||||
|
|
||||||
|
|
||||||
To never miss important security fixes, the superuser (``root@pam`` user) is
|
To never miss important security fixes, the superuser (``root@pam`` user) is
|
||||||
@ -112,15 +102,15 @@ We recommend to configure this repository in ``/etc/apt/sources.list``.
|
|||||||
.. code-block:: sources.list
|
.. code-block:: sources.list
|
||||||
:caption: File: ``/etc/apt/sources.list``
|
:caption: File: ``/etc/apt/sources.list``
|
||||||
|
|
||||||
deb http://ftp.debian.org/debian buster main contrib
|
deb http://ftp.debian.org/debian bullseye main contrib
|
||||||
deb http://ftp.debian.org/debian buster-updates main contrib
|
deb http://ftp.debian.org/debian bullseye-updates main contrib
|
||||||
|
|
||||||
# PBS pbs-no-subscription repository provided by proxmox.com,
|
# PBS pbs-no-subscription repository provided by proxmox.com,
|
||||||
# NOT recommended for production use
|
# NOT recommended for production use
|
||||||
deb http://download.proxmox.com/debian/pbs buster pbs-no-subscription
|
deb http://download.proxmox.com/debian/pbs bullseye pbs-no-subscription
|
||||||
|
|
||||||
# security updates
|
# security updates
|
||||||
deb http://security.debian.org/debian-security buster/updates main contrib
|
deb http://security.debian.org/debian-security bullseye-security main contrib
|
||||||
|
|
||||||
|
|
||||||
`Proxmox Backup`_ Test Repository
|
`Proxmox Backup`_ Test Repository
|
||||||
@ -138,4 +128,74 @@ You can access this repository by adding the following line to
|
|||||||
.. code-block:: sources.list
|
.. code-block:: sources.list
|
||||||
:caption: sources.list entry for ``pbstest``
|
:caption: sources.list entry for ``pbstest``
|
||||||
|
|
||||||
deb http://download.proxmox.com/debian/pbs buster pbstest
|
deb http://download.proxmox.com/debian/pbs bullseye pbstest
|
||||||
|
|
||||||
|
.. _package_repositories_client_only:
|
||||||
|
|
||||||
|
Proxmox Backup Client-only Repository
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If you want to :ref:`use the the Proxmox Backup Client <client_creating_backups>`
|
||||||
|
on systems using a Linux distribution not based on Proxmox projects, you can
|
||||||
|
use the client-only repository.
|
||||||
|
|
||||||
|
Currently there's only a client-repository for APT based systems.
|
||||||
|
|
||||||
|
.. _package_repositories_client_only_apt:
|
||||||
|
|
||||||
|
APT-based Proxmox Backup Client Repository
|
||||||
|
++++++++++++++++++++++++++++++++++++++++++
|
||||||
|
|
||||||
|
For modern Linux distributions using `apt` as package manager, like all Debian
|
||||||
|
and Ubuntu Derivative do, you may be able to use the APT-based repository.
|
||||||
|
|
||||||
|
In order to configure this repository you need to first :ref:`setup the Proxmox
|
||||||
|
release key <package_repos_secure_apt>`. After that, add the repository URL to
|
||||||
|
the APT sources lists.
|
||||||
|
|
||||||
|
**Repositories for Debian 11 (Bullseye) based releases**
|
||||||
|
|
||||||
|
This repository is tested with:
|
||||||
|
|
||||||
|
- Debian Bullseye
|
||||||
|
|
||||||
|
Edit the file ``/etc/apt/sources.list.d/pbs-client.list`` and add the following
|
||||||
|
snipped
|
||||||
|
|
||||||
|
.. code-block:: sources.list
|
||||||
|
:caption: File: ``/etc/apt/sources.list``
|
||||||
|
|
||||||
|
deb http://download.proxmox.com/debian/pbs-client bullseye main
|
||||||
|
|
||||||
|
**Repositories for Debian 10 (Buster) based releases**
|
||||||
|
|
||||||
|
This repository is tested with:
|
||||||
|
|
||||||
|
- Debian Buster
|
||||||
|
- Ubuntu 20.04 LTS
|
||||||
|
|
||||||
|
It may work with older, and should work with more recent released versions.
|
||||||
|
|
||||||
|
Edit the file ``/etc/apt/sources.list.d/pbs-client.list`` and add the following
|
||||||
|
snipped
|
||||||
|
|
||||||
|
.. code-block:: sources.list
|
||||||
|
:caption: File: ``/etc/apt/sources.list``
|
||||||
|
|
||||||
|
deb http://download.proxmox.com/debian/pbs-client buster main
|
||||||
|
|
||||||
|
.. _node_options_http_proxy:
|
||||||
|
|
||||||
|
Repository Access Behind HTTP Proxy
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Some setups have restricted access to the internet, sometimes only through a
|
||||||
|
central proxy. You can setup a HTTP proxy through the Proxmox Backup Server's
|
||||||
|
web-interface in the `Configuration -> Authentication` tab.
|
||||||
|
|
||||||
|
Once configured this proxy will be used for apt network requests and for
|
||||||
|
checking a Proxmox Backup Server support subscription.
|
||||||
|
|
||||||
|
Standard HTTP proxy configurations are accepted, `[http://]<host>[:port]` where
|
||||||
|
the `<host>` part may include an authorization, for example:
|
||||||
|
`http://user:pass@proxy.example.org:12345`
|
||||||
|
@ -13,39 +13,3 @@ parameter. It accepts the following values:
|
|||||||
:``json``: JSON (single line).
|
:``json``: JSON (single line).
|
||||||
|
|
||||||
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
:``json-pretty``: JSON (multiple lines, nicely formatted).
|
||||||
|
|
||||||
|
|
||||||
Device driver options can be specified as integer numbers (see
|
|
||||||
``/usr/include/linux/mtio.h``), or using symbolic names:
|
|
||||||
|
|
||||||
:``buffer-writes``: Enable buffered writes
|
|
||||||
|
|
||||||
:``async-writes``: Enable async writes
|
|
||||||
|
|
||||||
:``read-ahead``: Use read-ahead for fixed block size
|
|
||||||
|
|
||||||
:``debugging``: Enable debugging if compiled into the driver
|
|
||||||
|
|
||||||
:``two-fm``: Write two file marks when closing the file
|
|
||||||
|
|
||||||
:``fast-mteom``: Space directly to eod (and lose file number)
|
|
||||||
|
|
||||||
:``auto-lock``: Automatically lock/unlock drive door
|
|
||||||
|
|
||||||
:``def-writes``: Defaults are meant only for writes
|
|
||||||
|
|
||||||
:``can-bsr``: Indicates that the drive can space backwards
|
|
||||||
|
|
||||||
:``no-blklims``: Drive does not support read block limits
|
|
||||||
|
|
||||||
:``can-partitions``: Drive can handle partitioned tapes
|
|
||||||
|
|
||||||
:``scsi2locical``: Seek and tell use SCSI-2 logical block addresses
|
|
||||||
|
|
||||||
:``sysv``: Enable the System V semantics
|
|
||||||
|
|
||||||
:``nowait``: Do not wait for rewind, etc. to complete
|
|
||||||
|
|
||||||
:``sili``: Enables setting the SILI bit in SCSI commands when reading
|
|
||||||
in variable block mode to enhance performance when reading blocks
|
|
||||||
shorter than the byte count
|
|
||||||
|
14
docs/proxmox-backup-debug/description.rst
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
Implements debugging functionality to inspect Proxmox Backup datastore
|
||||||
|
files, verify the integrity of chunks.
|
||||||
|
|
||||||
|
Also contains an 'api' subcommand where arbitrary api paths can be called
|
||||||
|
(get/create/set/delete) as well as display their parameters (usage) and
|
||||||
|
their child-links (ls).
|
||||||
|
|
||||||
|
By default, it connects to the proxmox-backup-proxy on localhost via https,
|
||||||
|
but by setting the environment variable `PROXMOX_DEBUG_API_CODE` to `1` the
|
||||||
|
tool directly calls the corresponding code.
|
||||||
|
|
||||||
|
.. WARNING:: Using `PROXMOX_DEBUG_API_CODE` can be dangerous and is only intended
|
||||||
|
for debugging purposes. It is not intended for use on a production system.
|
||||||
|
|
33
docs/proxmox-backup-debug/man1.rst
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
==========================
|
||||||
|
proxmox-backup-debug
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../epilog.rst
|
||||||
|
|
||||||
|
-------------------------------------------------------------
|
||||||
|
Debugging command line tool for Backup and Restore
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 1
|
||||||
|
|
||||||
|
|
||||||
|
Synopsis
|
||||||
|
==========
|
||||||
|
|
||||||
|
.. include:: synopsis.rst
|
||||||
|
|
||||||
|
Common Options
|
||||||
|
==============
|
||||||
|
|
||||||
|
.. include:: ../output-format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Description
|
||||||
|
============
|
||||||
|
|
||||||
|
.. include:: description.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../pbs-copyright.rst
|
3
docs/proxmox-file-restore/description.rst
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
Command line tool for restoring files and directories from PBS archives. In contrast to
|
||||||
|
proxmox-backup-client, this supports both container/host and VM backups.
|
||||||
|
|
28
docs/proxmox-file-restore/man1.rst
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
==========================
|
||||||
|
proxmox-file-restore
|
||||||
|
==========================
|
||||||
|
|
||||||
|
.. include:: ../epilog.rst
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------
|
||||||
|
Command line tool for restoring files and directories from PBS archives
|
||||||
|
-----------------------------------------------------------------------
|
||||||
|
|
||||||
|
:Author: |AUTHOR|
|
||||||
|
:Version: Version |VERSION|
|
||||||
|
:Manual section: 1
|
||||||
|
|
||||||
|
|
||||||
|
Synopsis
|
||||||
|
==========
|
||||||
|
|
||||||
|
.. include:: synopsis.rst
|
||||||
|
|
||||||
|
|
||||||
|
Description
|
||||||
|
============
|
||||||
|
|
||||||
|
.. include:: description.rst
|
||||||
|
|
||||||
|
|
||||||
|
.. include:: ../pbs-copyright.rst
|
@ -1,7 +1,5 @@
|
|||||||
// FIXME: HACK! Makes scrolling in number spinner work again. fixed in ExtJS >= 6.1
|
// for Toolkit.js
|
||||||
if (Ext.isFirefox) {
|
function gettext(val) { return val; };
|
||||||
Ext.$eventNameMap.DOMMouseScroll = 'DOMMouseScroll';
|
|
||||||
}
|
|
||||||
|
|
||||||
Ext.onReady(function() {
|
Ext.onReady(function() {
|
||||||
const NOW = new Date();
|
const NOW = new Date();
|
||||||
@ -37,7 +35,6 @@ Ext.onReady(function() {
|
|||||||
|
|
||||||
editable: true,
|
editable: true,
|
||||||
|
|
||||||
displayField: 'text',
|
|
||||||
valueField: 'value',
|
valueField: 'value',
|
||||||
queryMode: 'local',
|
queryMode: 'local',
|
||||||
|
|
@ -3,6 +3,26 @@
|
|||||||
`Proxmox VE`_ Integration
|
`Proxmox VE`_ Integration
|
||||||
-------------------------
|
-------------------------
|
||||||
|
|
||||||
|
A Proxmox Backup Server can be integrated into a Proxmox VE setup by adding the
|
||||||
|
former as a storage in a Proxmox VE standalone or cluster setup.
|
||||||
|
|
||||||
|
See also the `Proxmox VE Storage - Proxmox Backup Server
|
||||||
|
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#storage_pbs>`_ section
|
||||||
|
of the Proxmox VE Administration Guide for Proxmox VE specific documentation.
|
||||||
|
|
||||||
|
|
||||||
|
Using the Proxmox VE Web-Interface
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Proxmox VE has native API and web-interface integration of Proxmox Backup
|
||||||
|
Server since the `Proxmox VE 6.3 release
|
||||||
|
<https://pve.proxmox.com/wiki/Roadmap#Proxmox_VE_6.3>`_.
|
||||||
|
|
||||||
|
A Proxmox Backup Server can be added under ``Datacenter -> Storage``.
|
||||||
|
|
||||||
|
Using the Proxmox VE Command-Line
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
|
You need to define a new storage with type 'pbs' on your `Proxmox VE`_
|
||||||
node. The following example uses ``store2`` as storage name, and
|
node. The following example uses ``store2`` as storage name, and
|
||||||
assumes the server address is ``localhost``, and you want to connect
|
assumes the server address is ``localhost``, and you want to connect
|
||||||
@ -41,9 +61,9 @@ After that you should be able to see storage status with:
|
|||||||
Name Type Status Total Used Available %
|
Name Type Status Total Used Available %
|
||||||
store2 pbs active 3905109820 1336687816 2568422004 34.23%
|
store2 pbs active 3905109820 1336687816 2568422004 34.23%
|
||||||
|
|
||||||
Having added the PBS datastore to `Proxmox VE`_, you can backup VMs and
|
Having added the Proxmox Backup Server datastore to `Proxmox VE`_, you can
|
||||||
containers in the same way you would for any other storage device within the
|
backup VMs and containers in the same way you would for any other storage
|
||||||
environment (see `PVE Admin Guide: Backup and Restore
|
device within the environment (see `Proxmox VE Admin Guide: Backup and Restore
|
||||||
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_vzdump>`_.
|
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_vzdump>`_.
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
Storage
|
Backup Storage
|
||||||
=======
|
==============
|
||||||
|
|
||||||
.. _storage_disk_management:
|
.. _storage_disk_management:
|
||||||
|
|
||||||
|
@ -3,13 +3,9 @@
|
|||||||
Tape Backup
|
Tape Backup
|
||||||
===========
|
===========
|
||||||
|
|
||||||
.. CAUTION:: Tape Backup is a technical preview feature, not meant for
|
.. image:: images/screenshots/pbs-gui-tape-changer-overview.png
|
||||||
production use. To enable it in the GUI, you need to issue the
|
:align: right
|
||||||
following command (as root user on the console):
|
:alt: Tape Backup: Tape changer overview
|
||||||
|
|
||||||
.. code-block:: console
|
|
||||||
|
|
||||||
# touch /etc/proxmox-backup/tape.cfg
|
|
||||||
|
|
||||||
Proxmox tape backup provides an easy way to store datastore content
|
Proxmox tape backup provides an easy way to store datastore content
|
||||||
onto magnetic tapes. This increases data safety because you get:
|
onto magnetic tapes. This increases data safety because you get:
|
||||||
@ -59,7 +55,7 @@ In general, LTO tapes offer the following advantages:
|
|||||||
- Cold Media
|
- Cold Media
|
||||||
- Movable (storable inside vault)
|
- Movable (storable inside vault)
|
||||||
- Multiple vendors (for both media and drives)
|
- Multiple vendors (for both media and drives)
|
||||||
- Build in AES-GCM Encryption engine
|
- Built in AES-GCM Encryption engine
|
||||||
|
|
||||||
Note that `Proxmox Backup Server` already stores compressed data, so using the
|
Note that `Proxmox Backup Server` already stores compressed data, so using the
|
||||||
tape compression feature has no advantage.
|
tape compression feature has no advantage.
|
||||||
@ -68,13 +64,19 @@ tape compression feature has no advantage.
|
|||||||
Supported Hardware
|
Supported Hardware
|
||||||
------------------
|
------------------
|
||||||
|
|
||||||
Proxmox Backup Server supports `Linear Tape-Open`_ generation 4 (LTO-4)
|
Proxmox Backup Server supports `Linear Tape-Open`_ generation 5 (LTO-5)
|
||||||
or later. In general, all SCSI-2 tape drives supported by the Linux
|
or later and has best-effort support for generation 4 (LTO-4). While
|
||||||
kernel should work, but features like hardware encryption need LTO-4
|
many LTO-4 systems are known to work, some might need firmware updates or
|
||||||
or later.
|
do not implement necessary features to work with Proxmox Backup Server.
|
||||||
|
|
||||||
Tape changing is carried out using the Linux 'mtx' command line
|
Tape changing is carried out using the SCSI Medium Changer protocol,
|
||||||
tool, so any changer device supported by this tool should work.
|
so all modern tape libraries should work.
|
||||||
|
|
||||||
|
.. Note:: We use a custom user space tape driver written in Rust_. This
|
||||||
|
driver directly communicates with the tape drive using the SCSI
|
||||||
|
generic interface. This may have negative side effects when used with the old
|
||||||
|
Linux kernel tape driver, so you should not use that driver with
|
||||||
|
Proxmox tape backup.
|
||||||
|
|
||||||
|
|
||||||
Drive Performance
|
Drive Performance
|
||||||
@ -84,7 +86,7 @@ Current LTO-8 tapes provide read/write speeds of up to 360 MB/s. This means,
|
|||||||
that it still takes a minimum of 9 hours to completely write or
|
that it still takes a minimum of 9 hours to completely write or
|
||||||
read a single tape (even at maximum speed).
|
read a single tape (even at maximum speed).
|
||||||
|
|
||||||
The only way to speed up that data rate is to use more than one
|
The only way to speed that data rate up is to use more than one
|
||||||
drive. That way, you can run several backup jobs in parallel, or run
|
drive. That way, you can run several backup jobs in parallel, or run
|
||||||
restore jobs while the other dives are used for backups.
|
restore jobs while the other dives are used for backups.
|
||||||
|
|
||||||
@ -93,15 +95,16 @@ Also consider that you first need to read data from your datastore
|
|||||||
rate. We measured a maximum rate of about 60MB/s to 100MB/s in practice,
|
rate. We measured a maximum rate of about 60MB/s to 100MB/s in practice,
|
||||||
so it takes 33 hours to read the 12TB needed to fill up an LTO-8 tape. If you want
|
so it takes 33 hours to read the 12TB needed to fill up an LTO-8 tape. If you want
|
||||||
to write to your tape at full speed, please make sure that the source
|
to write to your tape at full speed, please make sure that the source
|
||||||
datastore is able to deliver that performance (e.g, by using SSDs).
|
datastore is able to deliver that performance (for example, by using SSDs).
|
||||||
|
|
||||||
|
|
||||||
Terminology
|
Terminology
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
:Tape Labels: are used to uniquely identify a tape. You would normally apply a
|
**Tape Labels:**
|
||||||
sticky paper label to the front of the cartridge. We additionally store the
|
are used to uniquely identify a tape. You would normally apply a
|
||||||
label text magnetically on the tape (first file on tape).
|
sticky paper label to the front of the cartridge. We additionally
|
||||||
|
store the label text magnetically on the tape (first file on tape).
|
||||||
|
|
||||||
.. _Code 39: https://en.wikipedia.org/wiki/Code_39
|
.. _Code 39: https://en.wikipedia.org/wiki/Code_39
|
||||||
|
|
||||||
@ -109,51 +112,59 @@ Terminology
|
|||||||
|
|
||||||
.. _LTO Barcode Generator: lto-barcode/index.html
|
.. _LTO Barcode Generator: lto-barcode/index.html
|
||||||
|
|
||||||
:Barcodes: are a special form of tape labels, which are electronically
|
**Barcodes:**
|
||||||
readable. Most LTO tape robots use an 8 character string encoded as
|
are a special form of tape labels, which are electronically
|
||||||
`Code 39`_, as defined in the `LTO Ultrium Cartridge Label
|
readable. Most LTO tape robots use an 8 character string encoded as
|
||||||
Specification`_.
|
`Code 39`_, as defined in the `LTO Ultrium Cartridge Label
|
||||||
|
Specification`_.
|
||||||
|
|
||||||
You can either buy such barcode labels from your cartridge vendor,
|
You can either buy such barcode labels from your cartridge vendor,
|
||||||
or print them yourself. You can use our `LTO Barcode Generator`_
|
or print them yourself. You can use our `LTO Barcode Generator`_
|
||||||
app, if you would like to print them yourself.
|
app, if you would like to print them yourself.
|
||||||
|
|
||||||
.. Note:: Physical labels and the associated adhesive should have an
|
.. Note:: Physical labels and the associated adhesive should have an
|
||||||
environmental performance to match or exceed the environmental
|
environmental performance to match or exceed the environmental
|
||||||
specifications of the cartridge to which it is applied.
|
specifications of the cartridge to which it is applied.
|
||||||
|
|
||||||
:Media Pools: A media pool is a logical container for tapes. A backup
|
**Media Pools:**
|
||||||
job targets one media pool, so a job only uses tapes from that
|
A media pool is a logical container for tapes. A backup job targets
|
||||||
pool. The pool additionally defines how long a backup job can
|
one media pool, so a job only uses tapes from that pool. The pool
|
||||||
append data to tapes (allocation policy) and how long you want to
|
additionally defines how long a backup job can append data to tapes
|
||||||
keep the data (retention policy).
|
(allocation policy) and how long you want to keep the data
|
||||||
|
(retention policy).
|
||||||
|
|
||||||
:Media Set: A group of continuously written tapes (all from the same
|
**Media Set:**
|
||||||
media pool).
|
A group of continuously written tapes (all from the same media pool).
|
||||||
|
|
||||||
:Tape drive: The device used to read and write data to the tape. There
|
**Tape drive:**
|
||||||
are standalone drives, but drives are usually shipped within tape libraries.
|
The device used to read and write data to the tape. There are
|
||||||
|
standalone drives, but drives are usually shipped within tape
|
||||||
|
libraries.
|
||||||
|
|
||||||
:Tape changer: A device which can change the tapes inside a tape drive
|
**Tape changer:**
|
||||||
(tape robot). They are usually part of a tape library.
|
A device which can change the tapes inside a tape drive (tape
|
||||||
|
robot). They are usually part of a tape library.
|
||||||
|
|
||||||
.. _Tape Library: https://en.wikipedia.org/wiki/Tape_library
|
.. _Tape Library: https://en.wikipedia.org/wiki/Tape_library
|
||||||
|
|
||||||
:`Tape library`_: A storage device that contains one or more tape drives,
|
`Tape library`_:
|
||||||
a number of slots to hold tape cartridges, a barcode reader to
|
A storage device that contains one or more tape drives, a number of
|
||||||
identify tape cartridges, and an automated method for loading tapes
|
slots to hold tape cartridges, a barcode reader to identify tape
|
||||||
(a robot).
|
cartridges, and an automated method for loading tapes (a robot).
|
||||||
|
|
||||||
This is also commonly known as an 'autoloader', 'tape robot' or 'tape jukebox'.
|
This is also commonly known as an 'autoloader', 'tape robot' or
|
||||||
|
'tape jukebox'.
|
||||||
|
|
||||||
:Inventory: The inventory stores the list of known tapes (with
|
**Inventory:**
|
||||||
additional status information).
|
The inventory stores the list of known tapes (with additional status
|
||||||
|
information).
|
||||||
|
|
||||||
:Catalog: A media catalog stores information about the media content.
|
**Catalog:**
|
||||||
|
A media catalog stores information about the media content.
|
||||||
|
|
||||||
|
|
||||||
Tape Quick Start
|
Tape Quick Start
|
||||||
---------------
|
----------------
|
||||||
|
|
||||||
1. Configure your tape hardware (drives and changers)
|
1. Configure your tape hardware (drives and changers)
|
||||||
|
|
||||||
@ -176,8 +187,15 @@ same configuration.
|
|||||||
Tape changers
|
Tape changers
|
||||||
~~~~~~~~~~~~~
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
Tape changers (robots) are part of a `Tape Library`_. You can skip
|
.. image:: images/screenshots/pbs-gui-tape-changers.png
|
||||||
this step if you are using a standalone drive.
|
:align: right
|
||||||
|
:alt: Tape Backup: Tape Changers
|
||||||
|
|
||||||
|
Tape changers (robots) are part of a `Tape Library`_. They contain a number of
|
||||||
|
slots to hold tape cartridges, a barcode reader to identify tape cartridges and
|
||||||
|
an automated method for loading tapes.
|
||||||
|
|
||||||
|
You can skip this step if you are using a standalone drive.
|
||||||
|
|
||||||
Linux is able to auto detect these devices, and you can get a list
|
Linux is able to auto detect these devices, and you can get a list
|
||||||
of available devices using:
|
of available devices using:
|
||||||
@ -204,6 +222,13 @@ Where ``sl3`` is an arbitrary name you can choose.
|
|||||||
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
|
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
|
||||||
different device after reboot, and that is not what you want.
|
different device after reboot, and that is not what you want.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-changers-add.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Add a new tape changer
|
||||||
|
|
||||||
|
This operation can also be carried out from the GUI, by navigating to the
|
||||||
|
**Changers** tab of **Tape Backup** and clicking **Add**.
|
||||||
|
|
||||||
You can display the final configuration with:
|
You can display the final configuration with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -217,7 +242,8 @@ You can display the final configuration with:
|
|||||||
│ path │ /dev/tape/by-id/scsi-CC2C52 │
|
│ path │ /dev/tape/by-id/scsi-CC2C52 │
|
||||||
└──────┴─────────────────────────────┘
|
└──────┴─────────────────────────────┘
|
||||||
|
|
||||||
Or simply list all configured changer devices:
|
Or simply list all configured changer devices (as seen in the **Changers** tab
|
||||||
|
of the GUI):
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -228,7 +254,7 @@ Or simply list all configured changer devices:
|
|||||||
│ sl3 │ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
│ sl3 │ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
||||||
└──────┴─────────────────────────────┴─────────┴──────────────┴────────────┘
|
└──────┴─────────────────────────────┴─────────┴──────────────┴────────────┘
|
||||||
|
|
||||||
The Vendor, Model and Serial number are auto detected, but only shown
|
The Vendor, Model and Serial number are auto-detected, but only shown
|
||||||
if the device is online.
|
if the device is online.
|
||||||
|
|
||||||
To test your setup, please query the status of the changer device with:
|
To test your setup, please query the status of the changer device with:
|
||||||
@ -261,12 +287,12 @@ It's worth noting that some of the smaller tape libraries don't have
|
|||||||
such slots. While they have something called a "Mail Slot", that slot
|
such slots. While they have something called a "Mail Slot", that slot
|
||||||
is just a way to grab the tape from the gripper. They are unable
|
is just a way to grab the tape from the gripper. They are unable
|
||||||
to hold media while the robot does other things. They also do not
|
to hold media while the robot does other things. They also do not
|
||||||
expose that "Mail Slot" over the SCSI interface, so you wont see them in
|
expose that "Mail Slot" over the SCSI interface, so you won't see them in
|
||||||
the status output.
|
the status output.
|
||||||
|
|
||||||
As a workaround, you can mark some of the normal slots as export
|
As a workaround, you can mark some of the normal slots as export
|
||||||
slot. The software treats those slots like real ``import-export``
|
slot. The software treats those slots like real ``import-export``
|
||||||
slots, and the media inside those slots is considered to be 'offline'
|
slots, and the media inside those slots are considered to be 'offline'
|
||||||
(not available for backup):
|
(not available for backup):
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
@ -302,6 +328,10 @@ the status output:
|
|||||||
Tape drives
|
Tape drives
|
||||||
~~~~~~~~~~~
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-drives.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Drive list
|
||||||
|
|
||||||
Linux is able to auto detect tape drives, and you can get a list
|
Linux is able to auto detect tape drives, and you can get a list
|
||||||
of available tape drives using:
|
of available tape drives using:
|
||||||
|
|
||||||
@ -311,18 +341,23 @@ of available tape drives using:
|
|||||||
┌────────────────────────────────┬────────┬─────────────┬────────┐
|
┌────────────────────────────────┬────────┬─────────────┬────────┐
|
||||||
│ path │ vendor │ model │ serial │
|
│ path │ vendor │ model │ serial │
|
||||||
╞════════════════════════════════╪════════╪═════════════╪════════╡
|
╞════════════════════════════════╪════════╪═════════════╪════════╡
|
||||||
│ /dev/tape/by-id/scsi-12345-nst │ IBM │ ULT3580-TD4 │ 12345 │
|
│ /dev/tape/by-id/scsi-12345-sg │ IBM │ ULT3580-TD4 │ 12345 │
|
||||||
└────────────────────────────────┴────────┴─────────────┴────────┘
|
└────────────────────────────────┴────────┴─────────────┴────────┘
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-drives-add.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Add a tape drive
|
||||||
|
|
||||||
In order to use that drive with Proxmox, you need to create a
|
In order to use that drive with Proxmox, you need to create a
|
||||||
configuration entry:
|
configuration entry. This can be done through **Tape Backup -> Drives** in the
|
||||||
|
GUI or by using the command below:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape drive create mydrive --path /dev/tape/by-id/scsi-12345-nst
|
# proxmox-tape drive create mydrive --path /dev/tape/by-id/scsi-12345-sg
|
||||||
|
|
||||||
.. Note:: Please use the persistent device path names from inside
|
.. Note:: Please use the persistent device path names from inside
|
||||||
``/dev/tape/by-id/``. Names like ``/dev/nst0`` may point to a
|
``/dev/tape/by-id/``. Names like ``/dev/sg0`` may point to a
|
||||||
different device after reboot, and that is not what you want.
|
different device after reboot, and that is not what you want.
|
||||||
|
|
||||||
If you have a tape library, you also need to set the associated
|
If you have a tape library, you also need to set the associated
|
||||||
@ -346,7 +381,7 @@ You can display the final configuration with:
|
|||||||
╞═════════╪════════════════════════════════╡
|
╞═════════╪════════════════════════════════╡
|
||||||
│ name │ mydrive │
|
│ name │ mydrive │
|
||||||
├─────────┼────────────────────────────────┤
|
├─────────┼────────────────────────────────┤
|
||||||
│ path │ /dev/tape/by-id/scsi-12345-nst │
|
│ path │ /dev/tape/by-id/scsi-12345-sg │
|
||||||
├─────────┼────────────────────────────────┤
|
├─────────┼────────────────────────────────┤
|
||||||
│ changer │ sl3 │
|
│ changer │ sl3 │
|
||||||
└─────────┴────────────────────────────────┘
|
└─────────┴────────────────────────────────┘
|
||||||
@ -362,10 +397,10 @@ To list all configured drives use:
|
|||||||
┌──────────┬────────────────────────────────┬─────────┬────────┬─────────────┬────────┐
|
┌──────────┬────────────────────────────────┬─────────┬────────┬─────────────┬────────┐
|
||||||
│ name │ path │ changer │ vendor │ model │ serial │
|
│ name │ path │ changer │ vendor │ model │ serial │
|
||||||
╞══════════╪════════════════════════════════╪═════════╪════════╪═════════════╪════════╡
|
╞══════════╪════════════════════════════════╪═════════╪════════╪═════════════╪════════╡
|
||||||
│ mydrive │ /dev/tape/by-id/scsi-12345-nst │ sl3 │ IBM │ ULT3580-TD4 │ 12345 │
|
│ mydrive │ /dev/tape/by-id/scsi-12345-sg │ sl3 │ IBM │ ULT3580-TD4 │ 12345 │
|
||||||
└──────────┴────────────────────────────────┴─────────┴────────┴─────────────┴────────┘
|
└──────────┴────────────────────────────────┴─────────┴────────┴─────────────┴────────┘
|
||||||
|
|
||||||
The Vendor, Model and Serial number are auto detected, but only shown
|
The Vendor, Model and Serial number are auto detected and only shown
|
||||||
if the device is online.
|
if the device is online.
|
||||||
|
|
||||||
For testing, you can simply query the drive status with:
|
For testing, you can simply query the drive status with:
|
||||||
@ -373,13 +408,35 @@ For testing, you can simply query the drive status with:
|
|||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-tape status --drive mydrive
|
# proxmox-tape status --drive mydrive
|
||||||
┌───────────┬────────────────────────┐
|
┌────────────────┬──────────────────────────┐
|
||||||
│ Name │ Value │
|
│ Name │ Value │
|
||||||
╞═══════════╪════════════════════════╡
|
╞════════════════╪══════════════════════════╡
|
||||||
│ blocksize │ 0 │
|
│ blocksize │ 0 │
|
||||||
├───────────┼────────────────────────┤
|
├────────────────┼──────────────────────────┤
|
||||||
│ status │ DRIVE_OPEN | IM_REP_EN │
|
│ density │ LTO4 │
|
||||||
└───────────┴────────────────────────┘
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ compression │ 1 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ buffer-mode │ 1 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ alert-flags │ (empty) │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ file-number │ 0 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ block-number │ 0 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ manufactured │ Fri Dec 13 01:00:00 2019 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ bytes-written │ 501.80 GiB │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ bytes-read │ 4.00 MiB │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ medium-passes │ 20 │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ medium-wearout │ 0.12% │
|
||||||
|
├────────────────┼──────────────────────────┤
|
||||||
|
│ volume-mounts │ 2 │
|
||||||
|
└────────────────┴──────────────────────────┘
|
||||||
|
|
||||||
.. NOTE:: Blocksize should always be 0 (variable block size
|
.. NOTE:: Blocksize should always be 0 (variable block size
|
||||||
mode). This is the default anyway.
|
mode). This is the default anyway.
|
||||||
@ -390,8 +447,12 @@ For testing, you can simply query the drive status with:
|
|||||||
Media Pools
|
Media Pools
|
||||||
~~~~~~~~~~~
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-pools.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Media Pools
|
||||||
|
|
||||||
A media pool is a logical container for tapes. A backup job targets
|
A media pool is a logical container for tapes. A backup job targets
|
||||||
one media pool, so a job only uses tapes from that pool.
|
a single media pool, so a job only uses tapes from that pool.
|
||||||
|
|
||||||
.. topic:: Media Set
|
.. topic:: Media Set
|
||||||
|
|
||||||
@ -411,7 +472,7 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
The pool additionally defines how long backup jobs can append data
|
The pool additionally defines how long backup jobs can append data
|
||||||
to a media set. The following settings are possible:
|
to a media set. The following settings are possible:
|
||||||
|
|
||||||
- Try to use the current media set.
|
- Try to use the current media set (``continue``).
|
||||||
|
|
||||||
This setting produces one large media set. While this is very
|
This setting produces one large media set. While this is very
|
||||||
space efficient (deduplication, no unused space), it can lead to
|
space efficient (deduplication, no unused space), it can lead to
|
||||||
@ -433,7 +494,7 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
.. NOTE:: Retention period starts with the existence of a newer
|
.. NOTE:: Retention period starts with the existence of a newer
|
||||||
media set.
|
media set.
|
||||||
|
|
||||||
- Always create a new media set.
|
- Always create a new media set (``always``).
|
||||||
|
|
||||||
With this setting, each backup job creates a new media set. This
|
With this setting, each backup job creates a new media set. This
|
||||||
is less space efficient, because the media from the last set
|
is less space efficient, because the media from the last set
|
||||||
@ -499,7 +560,7 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
will be double encrypted.
|
will be double encrypted.
|
||||||
|
|
||||||
The password protected key is stored on each medium, so that it is
|
The password protected key is stored on each medium, so that it is
|
||||||
possbible to `restore the key <tape_restore_encryption_key_>`_ using
|
possbible to `restore the key <tape_restore_encryption_key_>`_ using
|
||||||
the password. Please make sure to remember the password, in case
|
the password. Please make sure to remember the password, in case
|
||||||
you need to restore the key.
|
you need to restore the key.
|
||||||
|
|
||||||
@ -510,8 +571,12 @@ one media pool, so a job only uses tapes from that pool.
|
|||||||
if the sources are from different namespaces with conflicting names
|
if the sources are from different namespaces with conflicting names
|
||||||
(for example, if the sources are from different Proxmox VE clusters).
|
(for example, if the sources are from different Proxmox VE clusters).
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-pools-add.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Add a media pool
|
||||||
|
|
||||||
The following command creates a new media pool:
|
To create a new media pool, add one from **Tape Backup -> Media Pools** in the
|
||||||
|
GUI, or enter the following command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -520,7 +585,7 @@ The following command creates a new media pool:
|
|||||||
# proxmox-tape pool create daily --drive mydrive
|
# proxmox-tape pool create daily --drive mydrive
|
||||||
|
|
||||||
|
|
||||||
Additional option can be set later, using the update command:
|
Additional options can be set later, using the update command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -543,6 +608,10 @@ To list all configured pools use:
|
|||||||
Tape Backup Jobs
|
Tape Backup Jobs
|
||||||
~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-backup-jobs.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Tape Backup Jobs
|
||||||
|
|
||||||
To automate tape backup, you can configure tape backup jobs which
|
To automate tape backup, you can configure tape backup jobs which
|
||||||
write datastore content to a media pool, based on a specific time schedule.
|
write datastore content to a media pool, based on a specific time schedule.
|
||||||
The required settings are:
|
The required settings are:
|
||||||
@ -618,6 +687,14 @@ To remove a job, please use:
|
|||||||
|
|
||||||
# proxmox-tape backup-job remove job2
|
# proxmox-tape backup-job remove job2
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-tape-backup-jobs-add.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Add a backup job
|
||||||
|
|
||||||
|
This same functionality also exists in the GUI, under the **Backup Jobs** tab of
|
||||||
|
**Tape Backup**, where *Local Datastore* relates to the datastore you want to
|
||||||
|
backup and *Media Pool* is the pool to back up to.
|
||||||
|
|
||||||
|
|
||||||
Administration
|
Administration
|
||||||
--------------
|
--------------
|
||||||
@ -633,7 +710,7 @@ variable:
|
|||||||
|
|
||||||
You can then omit the ``--drive`` parameter from the command. If the
|
You can then omit the ``--drive`` parameter from the command. If the
|
||||||
drive has an associated changer device, you may also omit the changer
|
drive has an associated changer device, you may also omit the changer
|
||||||
parameter from commands that needs a changer device, for example:
|
parameter from commands that need a changer device, for example:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -707,7 +784,7 @@ can then label all unlabeled tapes with a single command:
|
|||||||
Run Tape Backups
|
Run Tape Backups
|
||||||
~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
To manually run a backup job use:
|
To manually run a backup job click *Run Now* in the GUI or use the command:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -768,11 +845,29 @@ Update Inventory
|
|||||||
Restore Catalog
|
Restore Catalog
|
||||||
~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
To restore a catalog from an existing tape, just insert the tape into the drive
|
||||||
|
and execute:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-tape catalog
|
||||||
|
|
||||||
|
|
||||||
|
You can restore from a tape even without an existing catalog, but only the
|
||||||
|
whole media set. If you do this, the catalog will be automatically created.
|
||||||
|
|
||||||
|
|
||||||
Encryption Key Management
|
Encryption Key Management
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Creating a new encryption key:
|
.. image:: images/screenshots/pbs-gui-tape-crypt-keys.png
|
||||||
|
:align: right
|
||||||
|
:alt: Tape Backup: Encryption Keys
|
||||||
|
|
||||||
|
Proxmox Backup Server also provides an interface for handling encryption keys on
|
||||||
|
the backup server. Encryption keys can be managed from the **Tape Backup ->
|
||||||
|
Encryption Keys** section of the GUI or through the ``proxmox-tape key`` command
|
||||||
|
line tool. To create a new encryption key from the command line:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -883,78 +978,3 @@ This command does the following:
|
|||||||
- run drive cleaning operation
|
- run drive cleaning operation
|
||||||
|
|
||||||
- unload the cleaning tape (to slot 3)
|
- unload the cleaning tape (to slot 3)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Configuration Files
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
``media-pool.cfg``
|
|
||||||
~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
File Format
|
|
||||||
^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/media-pool/format.rst
|
|
||||||
|
|
||||||
|
|
||||||
Options
|
|
||||||
^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/media-pool/config.rst
|
|
||||||
|
|
||||||
|
|
||||||
``tape.cfg``
|
|
||||||
~~~~~~~~~~~~
|
|
||||||
|
|
||||||
File Format
|
|
||||||
^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/tape/format.rst
|
|
||||||
|
|
||||||
|
|
||||||
Options
|
|
||||||
^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/tape/config.rst
|
|
||||||
|
|
||||||
|
|
||||||
``tape-job.cfg``
|
|
||||||
~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
File Format
|
|
||||||
^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/tape-job/format.rst
|
|
||||||
|
|
||||||
|
|
||||||
Options
|
|
||||||
^^^^^^^
|
|
||||||
|
|
||||||
.. include:: config/tape-job/config.rst
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Command Syntax
|
|
||||||
--------------
|
|
||||||
|
|
||||||
``proxmox-tape``
|
|
||||||
----------------
|
|
||||||
|
|
||||||
.. include:: proxmox-tape/synopsis.rst
|
|
||||||
|
|
||||||
|
|
||||||
``pmt``
|
|
||||||
-------
|
|
||||||
|
|
||||||
.. include:: pmt/options.rst
|
|
||||||
|
|
||||||
....
|
|
||||||
|
|
||||||
.. include:: pmt/synopsis.rst
|
|
||||||
|
|
||||||
|
|
||||||
``pmtx``
|
|
||||||
--------
|
|
||||||
|
|
||||||
.. include:: pmtx/synopsis.rst
|
|
||||||
|
@ -100,7 +100,7 @@ can be encrypted, and they are handled in a slightly different manner than
|
|||||||
normal chunks.
|
normal chunks.
|
||||||
|
|
||||||
The hashes of encrypted chunks are calculated not with the actual (encrypted)
|
The hashes of encrypted chunks are calculated not with the actual (encrypted)
|
||||||
chunk content, but with the plaintext content concatenated with the encryption
|
chunk content, but with the plain-text content concatenated with the encryption
|
||||||
key. This way, two chunks of the same data encrypted with different keys
|
key. This way, two chunks of the same data encrypted with different keys
|
||||||
generate two different checksums and no collisions occur for multiple
|
generate two different checksums and no collisions occur for multiple
|
||||||
encryption keys.
|
encryption keys.
|
||||||
@ -138,7 +138,7 @@ will see that the probability of a collision in that scenario is:
|
|||||||
|
|
||||||
For context, in a lottery game of guessing 6 out of 45, the chance to correctly
|
For context, in a lottery game of guessing 6 out of 45, the chance to correctly
|
||||||
guess all 6 numbers is only :math:`1.2277 * 10^{-7}`, that means the chance of
|
guess all 6 numbers is only :math:`1.2277 * 10^{-7}`, that means the chance of
|
||||||
collission is about the same as winning 13 such lotto games *in a row*.
|
a collision is about the same as winning 13 such lotto games *in a row*.
|
||||||
|
|
||||||
In conclusion, it is extremely unlikely that such a collision would occur by
|
In conclusion, it is extremely unlikely that such a collision would occur by
|
||||||
accident in a normal datastore.
|
accident in a normal datastore.
|
||||||
@ -164,3 +164,66 @@ Verification of encrypted chunks
|
|||||||
For encrypted chunks, only the checksum of the original (plaintext) data is
|
For encrypted chunks, only the checksum of the original (plaintext) data is
|
||||||
available, making it impossible for the server (without the encryption key), to
|
available, making it impossible for the server (without the encryption key), to
|
||||||
verify its content against it. Instead only the CRC-32 checksum gets checked.
|
verify its content against it. Instead only the CRC-32 checksum gets checked.
|
||||||
|
|
||||||
|
Troubleshooting
|
||||||
|
---------------
|
||||||
|
|
||||||
|
Index files(.fidx, .didx) contain information about how to rebuild a file, more
|
||||||
|
precisely, they contain an ordered list of references to the chunks the original
|
||||||
|
file was split up in. If there is something wrong with a snapshot it might be
|
||||||
|
useful to find out which chunks are referenced in this specific snapshot, and
|
||||||
|
check wheather all of them are present and intact. The command for getting the
|
||||||
|
list of referenced chunks could look something like this:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-debug inspect file drive-scsi0.img.fidx
|
||||||
|
|
||||||
|
The same command can be used to look at .blob file, without ``--decode`` just
|
||||||
|
the size and the encryption type, if any, is printed. If ``--decode`` is set the
|
||||||
|
blob file is decoded into the specified file('-' will decode it directly into
|
||||||
|
stdout).
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-debug inspect file qemu-server.conf.blob --decode -
|
||||||
|
|
||||||
|
would print the decoded contents of `qemu-server.conf.blob`. If the file you're
|
||||||
|
trying to inspect is encrypted, a path to the keyfile has to be provided using
|
||||||
|
``--keyfile``.
|
||||||
|
|
||||||
|
Checking in which index files a specific chunk file is referenced can be done
|
||||||
|
with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-debug inspect chunk b531d3ffc9bd7c65748a61198c060678326a431db7eded874c327b7986e595e0 --reference-filter /path/in/a/datastore/directory
|
||||||
|
|
||||||
|
Here ``--reference-filter`` specifies where index files should be searched, this
|
||||||
|
can be an arbitrary path. If, for some reason, the filename of the chunk was
|
||||||
|
changed you can explicitly specify the digest using ``--digest``, by default the
|
||||||
|
chunk filename is used as the digest to look for. Specifying no
|
||||||
|
``--reference-filter`` will just print the CRC and encryption status of the
|
||||||
|
chunk. You can also decode chunks, to do so ``--decode`` has to be set. If the
|
||||||
|
chunk is encrypted a ``--keyfile`` has to be provided for decoding.
|
||||||
|
|
||||||
|
Restore without a running PBS
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
It is possible to restore spefiic files of snapshots without a running PBS using
|
||||||
|
the `recover` sub-command, provided you have access to the intact index and
|
||||||
|
chunk files. Note that you also need the corresponding key file if the backup
|
||||||
|
was encrypted.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-debug recover index drive-scsi0.img.fidx /path/to/.chunks
|
||||||
|
|
||||||
|
In above example the `/path/to/.chunks` argument is the path to the directory
|
||||||
|
that contains contains the chunks, and `drive-scsi0.img.fidx` is the index-file
|
||||||
|
of the file you'd lile to restore. Both paths can be absolute or relative. With
|
||||||
|
``--skip-crc`` it is possible to disable the crc checks of the chunks, this will
|
||||||
|
speed up the process slightly and allows for trying to restore (partially)
|
||||||
|
corrupt chunks. It's recommended to always try without the skip-CRC option
|
||||||
|
first.
|
||||||
|
|
||||||
|
@ -360,7 +360,9 @@ WebAuthn
|
|||||||
For WebAuthn to work, you need to have two things:
|
For WebAuthn to work, you need to have two things:
|
||||||
|
|
||||||
* a trusted HTTPS certificate (for example, by using `Let's Encrypt
|
* a trusted HTTPS certificate (for example, by using `Let's Encrypt
|
||||||
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_)
|
<https://pbs.proxmox.com/wiki/index.php/HTTPS_Certificate_Configuration>`_).
|
||||||
|
While it probably works with an untrusted certificate, some browsers may warn
|
||||||
|
or refuse WebAuthn operations if it is not trusted.
|
||||||
|
|
||||||
* setup the WebAuthn configuration (see *Configuration -> Authentication* in the
|
* setup the WebAuthn configuration (see *Configuration -> Authentication* in the
|
||||||
Proxmox Backup Server web-interface). This can be auto-filled in most setups.
|
Proxmox Backup Server web-interface). This can be auto-filled in most setups.
|
||||||
|
@ -1 +1 @@
|
|||||||
deb https://enterprise.proxmox.com/debian/pbs buster pbs-enterprise
|
deb https://enterprise.proxmox.com/debian/pbs bullseye pbs-enterprise
|
||||||
|
@ -2,8 +2,8 @@ use std::io::Write;
|
|||||||
|
|
||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
|
|
||||||
use proxmox_backup::api2::types::Authid;
|
use pbs_api_types::Authid;
|
||||||
use proxmox_backup::client::{HttpClient, HttpClientOptions, BackupReader};
|
use pbs_client::{HttpClient, HttpClientOptions, BackupReader};
|
||||||
|
|
||||||
pub struct DummyWriter {
|
pub struct DummyWriter {
|
||||||
bytes: usize,
|
bytes: usize,
|
||||||
@ -59,7 +59,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
if let Err(err) = pbs_runtime::main(run()) {
|
||||||
eprintln!("ERROR: {}", err);
|
eprintln!("ERROR: {}", err);
|
||||||
}
|
}
|
||||||
println!("DONE");
|
println!("DONE");
|
||||||
|
@ -69,7 +69,7 @@ fn send_request(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_backup::tools::runtime::main(run())
|
pbs_runtime::main(run())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
|
@ -69,7 +69,7 @@ fn send_request(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_backup::tools::runtime::main(run())
|
pbs_runtime::main(run())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
|
@ -6,10 +6,10 @@ use hyper::{Body, Request, Response};
|
|||||||
use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
|
use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
|
||||||
use tokio::net::{TcpListener, TcpStream};
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
|
|
||||||
use proxmox_backup::configdir;
|
use pbs_buildcfg::configdir;
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_backup::tools::runtime::main(run())
|
pbs_runtime::main(run())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
|
@ -5,7 +5,7 @@ use hyper::{Body, Request, Response};
|
|||||||
use tokio::net::{TcpListener, TcpStream};
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_backup::tools::runtime::main(run())
|
pbs_runtime::main(run())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
|
@ -5,7 +5,7 @@ extern crate proxmox_backup;
|
|||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
use std::io::{Read, Write};
|
use std::io::{Read, Write};
|
||||||
|
|
||||||
use proxmox_backup::backup::*;
|
use pbs_datastore::Chunker;
|
||||||
|
|
||||||
struct ChunkWriter {
|
struct ChunkWriter {
|
||||||
chunker: Chunker,
|
chunker: Chunker,
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
extern crate proxmox_backup;
|
extern crate proxmox_backup;
|
||||||
|
|
||||||
//use proxmox_backup::backup::chunker::*;
|
use pbs_datastore::Chunker;
|
||||||
use proxmox_backup::backup::*;
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@ use futures::*;
|
|||||||
|
|
||||||
extern crate proxmox_backup;
|
extern crate proxmox_backup;
|
||||||
|
|
||||||
use proxmox_backup::backup::*;
|
use pbs_client::ChunkStream;
|
||||||
|
|
||||||
// Test Chunker with real data read from a file.
|
// Test Chunker with real data read from a file.
|
||||||
//
|
//
|
||||||
@ -13,7 +13,7 @@ use proxmox_backup::backup::*;
|
|||||||
// Note: I can currently get about 830MB/s
|
// Note: I can currently get about 830MB/s
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
if let Err(err) = proxmox_backup::tools::runtime::main(run()) {
|
if let Err(err) = pbs_runtime::main(run()) {
|
||||||
panic!("ERROR: {}", err);
|
panic!("ERROR: {}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use anyhow::{Error};
|
use anyhow::{Error};
|
||||||
|
|
||||||
use proxmox_backup::api2::types::Authid;
|
use pbs_client::{HttpClient, HttpClientOptions, BackupWriter};
|
||||||
use proxmox_backup::client::*;
|
use pbs_api_types::Authid;
|
||||||
|
|
||||||
async fn upload_speed() -> Result<f64, Error> {
|
async fn upload_speed() -> Result<f64, Error> {
|
||||||
|
|
||||||
@ -27,7 +27,7 @@ async fn upload_speed() -> Result<f64, Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
match proxmox_backup::tools::runtime::main(upload_speed()) {
|
match pbs_runtime::main(upload_speed()) {
|
||||||
Ok(mbs) => {
|
Ok(mbs) => {
|
||||||
println!("average upload speed: {} MB/s", mbs);
|
println!("average upload speed: {} MB/s", mbs);
|
||||||
}
|
}
|
||||||
|
20
pbs-api-types/Cargo.toml
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
[package]
|
||||||
|
name = "pbs-api-types"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Proxmox Support Team <support@proxmox.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
description = "general API type helpers for PBS"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow = "1.0"
|
||||||
|
lazy_static = "1.4"
|
||||||
|
libc = "0.2"
|
||||||
|
nix = "0.19.1"
|
||||||
|
openssl = "0.10"
|
||||||
|
regex = "1.2"
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
|
||||||
|
proxmox = { version = "0.13.3", default-features = false, features = [ "api-macro" ] }
|
||||||
|
|
||||||
|
proxmox-systemd = { path = "../proxmox-systemd" }
|
||||||
|
pbs-tools = { path = "../pbs-tools" }
|
284
pbs-api-types/src/acl.rs
Normal file
@ -0,0 +1,284 @@
|
|||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde::de::{value, IntoDeserializer};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
use proxmox::api::schema::{
|
||||||
|
ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema,
|
||||||
|
};
|
||||||
|
use proxmox::{constnamedbitmap, const_regex};
|
||||||
|
|
||||||
|
const_regex! {
|
||||||
|
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
|
||||||
|
}
|
||||||
|
|
||||||
|
// define Privilege bitfield
|
||||||
|
|
||||||
|
constnamedbitmap! {
|
||||||
|
/// Contains a list of privilege name to privilege value mappings.
|
||||||
|
///
|
||||||
|
/// The names are used when displaying/persisting privileges anywhere, the values are used to
|
||||||
|
/// allow easy matching of privileges as bitflags.
|
||||||
|
PRIVILEGES: u64 => {
|
||||||
|
/// Sys.Audit allows knowing about the system and its status
|
||||||
|
PRIV_SYS_AUDIT("Sys.Audit");
|
||||||
|
/// Sys.Modify allows modifying system-level configuration
|
||||||
|
PRIV_SYS_MODIFY("Sys.Modify");
|
||||||
|
/// Sys.Modify allows to poweroff/reboot/.. the system
|
||||||
|
PRIV_SYS_POWER_MANAGEMENT("Sys.PowerManagement");
|
||||||
|
|
||||||
|
/// Datastore.Audit allows knowing about a datastore,
|
||||||
|
/// including reading the configuration entry and listing its contents
|
||||||
|
PRIV_DATASTORE_AUDIT("Datastore.Audit");
|
||||||
|
/// Datastore.Allocate allows creating or deleting datastores
|
||||||
|
PRIV_DATASTORE_ALLOCATE("Datastore.Allocate");
|
||||||
|
/// Datastore.Modify allows modifying a datastore and its contents
|
||||||
|
PRIV_DATASTORE_MODIFY("Datastore.Modify");
|
||||||
|
/// Datastore.Read allows reading arbitrary backup contents
|
||||||
|
PRIV_DATASTORE_READ("Datastore.Read");
|
||||||
|
/// Allows verifying a datastore
|
||||||
|
PRIV_DATASTORE_VERIFY("Datastore.Verify");
|
||||||
|
|
||||||
|
/// Datastore.Backup allows Datastore.Read|Verify and creating new snapshots,
|
||||||
|
/// but also requires backup ownership
|
||||||
|
PRIV_DATASTORE_BACKUP("Datastore.Backup");
|
||||||
|
/// Datastore.Prune allows deleting snapshots,
|
||||||
|
/// but also requires backup ownership
|
||||||
|
PRIV_DATASTORE_PRUNE("Datastore.Prune");
|
||||||
|
|
||||||
|
/// Permissions.Modify allows modifying ACLs
|
||||||
|
PRIV_PERMISSIONS_MODIFY("Permissions.Modify");
|
||||||
|
|
||||||
|
/// Remote.Audit allows reading remote.cfg and sync.cfg entries
|
||||||
|
PRIV_REMOTE_AUDIT("Remote.Audit");
|
||||||
|
/// Remote.Modify allows modifying remote.cfg
|
||||||
|
PRIV_REMOTE_MODIFY("Remote.Modify");
|
||||||
|
/// Remote.Read allows reading data from a configured `Remote`
|
||||||
|
PRIV_REMOTE_READ("Remote.Read");
|
||||||
|
|
||||||
|
/// Sys.Console allows access to the system's console
|
||||||
|
PRIV_SYS_CONSOLE("Sys.Console");
|
||||||
|
|
||||||
|
/// Tape.Audit allows reading tape backup configuration and status
|
||||||
|
PRIV_TAPE_AUDIT("Tape.Audit");
|
||||||
|
/// Tape.Modify allows modifying tape backup configuration
|
||||||
|
PRIV_TAPE_MODIFY("Tape.Modify");
|
||||||
|
/// Tape.Write allows writing tape media
|
||||||
|
PRIV_TAPE_WRITE("Tape.Write");
|
||||||
|
/// Tape.Read allows reading tape backup configuration and media contents
|
||||||
|
PRIV_TAPE_READ("Tape.Read");
|
||||||
|
|
||||||
|
/// Realm.Allocate allows viewing, creating, modifying and deleting realms
|
||||||
|
PRIV_REALM_ALLOCATE("Realm.Allocate");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Admin always has all privileges. It can do everything except a few actions
|
||||||
|
/// which are limited to the 'root@pam` superuser
|
||||||
|
pub const ROLE_ADMIN: u64 = std::u64::MAX;
|
||||||
|
|
||||||
|
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
||||||
|
pub const ROLE_NO_ACCESS: u64 = 0;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Audit can view configuration and status information, but not modify it.
|
||||||
|
pub const ROLE_AUDIT: u64 = 0
|
||||||
|
| PRIV_SYS_AUDIT
|
||||||
|
| PRIV_DATASTORE_AUDIT;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Datastore.Admin can do anything on the datastore.
|
||||||
|
pub const ROLE_DATASTORE_ADMIN: u64 = 0
|
||||||
|
| PRIV_DATASTORE_AUDIT
|
||||||
|
| PRIV_DATASTORE_MODIFY
|
||||||
|
| PRIV_DATASTORE_READ
|
||||||
|
| PRIV_DATASTORE_VERIFY
|
||||||
|
| PRIV_DATASTORE_BACKUP
|
||||||
|
| PRIV_DATASTORE_PRUNE;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Datastore.Reader can read/verify datastore content and do restore
|
||||||
|
pub const ROLE_DATASTORE_READER: u64 = 0
|
||||||
|
| PRIV_DATASTORE_AUDIT
|
||||||
|
| PRIV_DATASTORE_VERIFY
|
||||||
|
| PRIV_DATASTORE_READ;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Datastore.Backup can do backup and restore, but no prune.
|
||||||
|
pub const ROLE_DATASTORE_BACKUP: u64 = 0
|
||||||
|
| PRIV_DATASTORE_BACKUP;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Datastore.PowerUser can do backup, restore, and prune.
|
||||||
|
pub const ROLE_DATASTORE_POWERUSER: u64 = 0
|
||||||
|
| PRIV_DATASTORE_PRUNE
|
||||||
|
| PRIV_DATASTORE_BACKUP;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Datastore.Audit can audit the datastore.
|
||||||
|
pub const ROLE_DATASTORE_AUDIT: u64 = 0
|
||||||
|
| PRIV_DATASTORE_AUDIT;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Remote.Audit can audit the remote
|
||||||
|
pub const ROLE_REMOTE_AUDIT: u64 = 0
|
||||||
|
| PRIV_REMOTE_AUDIT;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Remote.Admin can do anything on the remote.
|
||||||
|
pub const ROLE_REMOTE_ADMIN: u64 = 0
|
||||||
|
| PRIV_REMOTE_AUDIT
|
||||||
|
| PRIV_REMOTE_MODIFY
|
||||||
|
| PRIV_REMOTE_READ;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Remote.SyncOperator can do read and prune on the remote.
|
||||||
|
pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0
|
||||||
|
| PRIV_REMOTE_AUDIT
|
||||||
|
| PRIV_REMOTE_READ;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Tape.Audit can audit the tape backup configuration and media content
|
||||||
|
pub const ROLE_TAPE_AUDIT: u64 = 0
|
||||||
|
| PRIV_TAPE_AUDIT;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Tape.Admin can do anything on the tape backup
|
||||||
|
pub const ROLE_TAPE_ADMIN: u64 = 0
|
||||||
|
| PRIV_TAPE_AUDIT
|
||||||
|
| PRIV_TAPE_MODIFY
|
||||||
|
| PRIV_TAPE_READ
|
||||||
|
| PRIV_TAPE_WRITE;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Tape.Operator can do tape backup and restore (but no configuration changes)
|
||||||
|
pub const ROLE_TAPE_OPERATOR: u64 = 0
|
||||||
|
| PRIV_TAPE_AUDIT
|
||||||
|
| PRIV_TAPE_READ
|
||||||
|
| PRIV_TAPE_WRITE;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::identity_op)]
|
||||||
|
/// Tape.Reader can do read and inspect tape content
|
||||||
|
pub const ROLE_TAPE_READER: u64 = 0
|
||||||
|
| PRIV_TAPE_AUDIT
|
||||||
|
| PRIV_TAPE_READ;
|
||||||
|
|
||||||
|
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
||||||
|
pub const ROLE_NAME_NO_ACCESS: &str = "NoAccess";
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
type_text: "<role>",
|
||||||
|
)]
|
||||||
|
#[repr(u64)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
/// Enum representing roles via their [PRIVILEGES] combination.
|
||||||
|
///
|
||||||
|
/// Since privileges are implemented as bitflags, each unique combination of privileges maps to a
|
||||||
|
/// single, unique `u64` value that is used in this enum definition.
|
||||||
|
pub enum Role {
|
||||||
|
/// Administrator
|
||||||
|
Admin = ROLE_ADMIN,
|
||||||
|
/// Auditor
|
||||||
|
Audit = ROLE_AUDIT,
|
||||||
|
/// Disable Access
|
||||||
|
NoAccess = ROLE_NO_ACCESS,
|
||||||
|
/// Datastore Administrator
|
||||||
|
DatastoreAdmin = ROLE_DATASTORE_ADMIN,
|
||||||
|
/// Datastore Reader (inspect datastore content and do restores)
|
||||||
|
DatastoreReader = ROLE_DATASTORE_READER,
|
||||||
|
/// Datastore Backup (backup and restore owned backups)
|
||||||
|
DatastoreBackup = ROLE_DATASTORE_BACKUP,
|
||||||
|
/// Datastore PowerUser (backup, restore and prune owned backup)
|
||||||
|
DatastorePowerUser = ROLE_DATASTORE_POWERUSER,
|
||||||
|
/// Datastore Auditor
|
||||||
|
DatastoreAudit = ROLE_DATASTORE_AUDIT,
|
||||||
|
/// Remote Auditor
|
||||||
|
RemoteAudit = ROLE_REMOTE_AUDIT,
|
||||||
|
/// Remote Administrator
|
||||||
|
RemoteAdmin = ROLE_REMOTE_ADMIN,
|
||||||
|
/// Syncronisation Opertator
|
||||||
|
RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR,
|
||||||
|
/// Tape Auditor
|
||||||
|
TapeAudit = ROLE_TAPE_AUDIT,
|
||||||
|
/// Tape Administrator
|
||||||
|
TapeAdmin = ROLE_TAPE_ADMIN,
|
||||||
|
/// Tape Operator
|
||||||
|
TapeOperator = ROLE_TAPE_OPERATOR,
|
||||||
|
/// Tape Reader
|
||||||
|
TapeReader = ROLE_TAPE_READER,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
impl FromStr for Role {
|
||||||
|
type Err = value::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
Self::deserialize(s.into_deserializer())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const ACL_PATH_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&ACL_PATH_REGEX);
|
||||||
|
|
||||||
|
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Access control path.")
|
||||||
|
.format(&ACL_PATH_FORMAT)
|
||||||
|
.min_length(1)
|
||||||
|
.max_length(128)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const ACL_PROPAGATE_SCHEMA: Schema = BooleanSchema::new(
|
||||||
|
"Allow to propagate (inherit) permissions.")
|
||||||
|
.default(true)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Type of 'ugid' property.")
|
||||||
|
.format(&ApiStringFormat::Enum(&[
|
||||||
|
EnumEntry::new("user", "User"),
|
||||||
|
EnumEntry::new("group", "Group")]))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
propagate: {
|
||||||
|
schema: ACL_PROPAGATE_SCHEMA,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
schema: ACL_PATH_SCHEMA,
|
||||||
|
},
|
||||||
|
ugid_type: {
|
||||||
|
schema: ACL_UGID_TYPE_SCHEMA,
|
||||||
|
},
|
||||||
|
ugid: {
|
||||||
|
type: String,
|
||||||
|
description: "User or Group ID.",
|
||||||
|
},
|
||||||
|
roleid: {
|
||||||
|
type: Role,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
/// ACL list entry.
|
||||||
|
pub struct AclListItem {
|
||||||
|
pub path: String,
|
||||||
|
pub ugid: String,
|
||||||
|
pub ugid_type: String,
|
||||||
|
pub propagate: bool,
|
||||||
|
pub roleid: String,
|
||||||
|
}
|
57
pbs-api-types/src/crypto.rs
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
use std::fmt::{self, Display};
|
||||||
|
|
||||||
|
use anyhow::Error;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
|
use pbs_tools::format::{as_fingerprint, bytes_as_fingerprint};
|
||||||
|
|
||||||
|
#[api(default: "encrypt")]
|
||||||
|
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Defines whether data is encrypted (using an AEAD cipher), only signed, or neither.
|
||||||
|
pub enum CryptMode {
|
||||||
|
/// Don't encrypt.
|
||||||
|
None,
|
||||||
|
/// Encrypt.
|
||||||
|
Encrypt,
|
||||||
|
/// Only sign.
|
||||||
|
SignOnly,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Eq, PartialEq, Hash, Clone, Deserialize, Serialize)]
|
||||||
|
#[serde(transparent)]
|
||||||
|
/// 32-byte fingerprint, usually calculated with SHA256.
|
||||||
|
pub struct Fingerprint {
|
||||||
|
#[serde(with = "bytes_as_fingerprint")]
|
||||||
|
bytes: [u8; 32],
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Fingerprint {
|
||||||
|
pub fn new(bytes: [u8; 32]) -> Self {
|
||||||
|
Self { bytes }
|
||||||
|
}
|
||||||
|
pub fn bytes(&self) -> &[u8; 32] {
|
||||||
|
&self.bytes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Display as short key ID
|
||||||
|
impl Display for Fingerprint {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
write!(f, "{}", as_fingerprint(&self.bytes[0..8]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::str::FromStr for Fingerprint {
|
||||||
|
type Err = Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Error> {
|
||||||
|
let mut tmp = s.to_string();
|
||||||
|
tmp.retain(|c| c != ':');
|
||||||
|
let bytes = proxmox::tools::hex_to_digest(&tmp)?;
|
||||||
|
Ok(Fingerprint::new(bytes))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
622
pbs-api-types/src/datastore.rs
Normal file
@ -0,0 +1,622 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
use proxmox::api::schema::{
|
||||||
|
ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType, Schema,
|
||||||
|
StringSchema, Updater,
|
||||||
|
};
|
||||||
|
|
||||||
|
use proxmox::const_regex;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
PROXMOX_SAFE_ID_FORMAT, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, CryptMode, UPID,
|
||||||
|
Fingerprint, Userid, Authid,
|
||||||
|
GC_SCHEDULE_SCHEMA, DATASTORE_NOTIFY_STRING_SCHEMA, PRUNE_SCHEDULE_SCHEMA,
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
const_regex!{
|
||||||
|
pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
|
||||||
|
|
||||||
|
pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
|
||||||
|
|
||||||
|
pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
|
||||||
|
|
||||||
|
pub GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$");
|
||||||
|
|
||||||
|
pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
|
||||||
|
|
||||||
|
pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$");
|
||||||
|
|
||||||
|
pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
|
||||||
|
|
||||||
|
pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name")
|
||||||
|
.min_length(1)
|
||||||
|
.max_length(4096)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema = StringSchema::new("Backup archive name.")
|
||||||
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
|
||||||
|
|
||||||
|
pub const BACKUP_ID_SCHEMA: Schema = StringSchema::new("Backup ID.")
|
||||||
|
.format(&BACKUP_ID_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const BACKUP_TYPE_SCHEMA: Schema = StringSchema::new("Backup type.")
|
||||||
|
.format(&ApiStringFormat::Enum(&[
|
||||||
|
EnumEntry::new("vm", "Virtual Machine Backup"),
|
||||||
|
EnumEntry::new("ct", "Container Backup"),
|
||||||
|
EnumEntry::new("host", "Host Backup"),
|
||||||
|
]))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const BACKUP_TIME_SCHEMA: Schema = IntegerSchema::new("Backup time (Unix epoch.)")
|
||||||
|
.minimum(1_547_797_308)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
|
||||||
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(32)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const CHUNK_DIGEST_SCHEMA: Schema = StringSchema::new("Chunk digest (SHA256).")
|
||||||
|
.format(&CHUNK_DIGEST_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DATASTORE_MAP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DATASTORE_MAP_REGEX);
|
||||||
|
|
||||||
|
pub const DATASTORE_MAP_SCHEMA: Schema = StringSchema::new("Datastore mapping.")
|
||||||
|
.format(&DATASTORE_MAP_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(65)
|
||||||
|
.type_text("(<source>=)?<target>")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DATASTORE_MAP_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||||
|
"Datastore mapping list.", &DATASTORE_MAP_SCHEMA)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"A list of Datastore mappings (or single datastore), comma separated. \
|
||||||
|
For example 'a=b,e' maps the source datastore 'a' to target 'b and \
|
||||||
|
all other sources to the default 'e'. If no default is given, only the \
|
||||||
|
specified sources are mapped.")
|
||||||
|
.format(&ApiStringFormat::PropertyString(&DATASTORE_MAP_ARRAY_SCHEMA))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new("Number of daily backups to keep.")
|
||||||
|
.minimum(1)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema =
|
||||||
|
IntegerSchema::new("Number of hourly backups to keep.")
|
||||||
|
.minimum(1)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new("Number of backups to keep.")
|
||||||
|
.minimum(1)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema =
|
||||||
|
IntegerSchema::new("Number of monthly backups to keep.")
|
||||||
|
.minimum(1)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema =
|
||||||
|
IntegerSchema::new("Number of weekly backups to keep.")
|
||||||
|
.minimum(1)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema =
|
||||||
|
IntegerSchema::new("Number of yearly backups to keep.")
|
||||||
|
.minimum(1)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"keep-last": {
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"keep-hourly": {
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_HOURLY,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"keep-daily": {
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_DAILY,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"keep-weekly": {
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_WEEKLY,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"keep-monthly": {
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_MONTHLY,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"keep-yearly": {
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_YEARLY,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize, Default)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Common pruning options
|
||||||
|
pub struct PruneOptions {
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_last: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_hourly: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_daily: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_weekly: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_monthly: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_yearly: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
path: {
|
||||||
|
schema: DIR_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
"notify-user": {
|
||||||
|
optional: true,
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
"notify": {
|
||||||
|
optional: true,
|
||||||
|
schema: DATASTORE_NOTIFY_STRING_SCHEMA,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
"gc-schedule": {
|
||||||
|
optional: true,
|
||||||
|
schema: GC_SCHEDULE_SCHEMA,
|
||||||
|
},
|
||||||
|
"prune-schedule": {
|
||||||
|
optional: true,
|
||||||
|
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||||
|
},
|
||||||
|
"keep-last": {
|
||||||
|
optional: true,
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_LAST,
|
||||||
|
},
|
||||||
|
"keep-hourly": {
|
||||||
|
optional: true,
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_HOURLY,
|
||||||
|
},
|
||||||
|
"keep-daily": {
|
||||||
|
optional: true,
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_DAILY,
|
||||||
|
},
|
||||||
|
"keep-weekly": {
|
||||||
|
optional: true,
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_WEEKLY,
|
||||||
|
},
|
||||||
|
"keep-monthly": {
|
||||||
|
optional: true,
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_MONTHLY,
|
||||||
|
},
|
||||||
|
"keep-yearly": {
|
||||||
|
optional: true,
|
||||||
|
schema: PRUNE_SCHEMA_KEEP_YEARLY,
|
||||||
|
},
|
||||||
|
"verify-new": {
|
||||||
|
description: "If enabled, all new backups will be verified right after completion.",
|
||||||
|
optional: true,
|
||||||
|
type: bool,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize,Updater)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Datastore configuration properties.
|
||||||
|
pub struct DataStoreConfig {
|
||||||
|
#[updater(skip)]
|
||||||
|
pub name: String,
|
||||||
|
#[updater(skip)]
|
||||||
|
pub path: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub gc_schedule: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub prune_schedule: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_last: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_hourly: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_daily: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_weekly: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_monthly: Option<u64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub keep_yearly: Option<u64>,
|
||||||
|
/// If enabled, all backups will be verified right after completion.
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub verify_new: Option<bool>,
|
||||||
|
/// Send job email notification to this user
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub notify_user: Option<Userid>,
|
||||||
|
/// Send notification only for job errors
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub notify: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Basic information about a datastore.
|
||||||
|
pub struct DataStoreListItem {
|
||||||
|
pub store: String,
|
||||||
|
pub comment: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"filename": {
|
||||||
|
schema: BACKUP_ARCHIVE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
"crypt-mode": {
|
||||||
|
type: CryptMode,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Basic information about archive files inside a backup snapshot.
|
||||||
|
pub struct BackupContent {
|
||||||
|
pub filename: String,
|
||||||
|
/// Info if file is encrypted, signed, or neither.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub crypt_mode: Option<CryptMode>,
|
||||||
|
/// Archive size (from backup manifest).
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub size: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// Result of a verify operation.
|
||||||
|
pub enum VerifyState {
|
||||||
|
/// Verification was successful
|
||||||
|
Ok,
|
||||||
|
/// Verification reported one or more errors
|
||||||
|
Failed,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
upid: {
|
||||||
|
type: UPID,
|
||||||
|
},
|
||||||
|
state: {
|
||||||
|
type: VerifyState,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
/// Task properties.
|
||||||
|
pub struct SnapshotVerifyState {
|
||||||
|
/// UPID of the verify task
|
||||||
|
pub upid: UPID,
|
||||||
|
/// State of the verification. Enum.
|
||||||
|
pub state: VerifyState,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"backup-type": {
|
||||||
|
schema: BACKUP_TYPE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-id": {
|
||||||
|
schema: BACKUP_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-time": {
|
||||||
|
schema: BACKUP_TIME_SCHEMA,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
verification: {
|
||||||
|
type: SnapshotVerifyState,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
fingerprint: {
|
||||||
|
type: String,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
files: {
|
||||||
|
items: {
|
||||||
|
schema: BACKUP_ARCHIVE_NAME_SCHEMA
|
||||||
|
},
|
||||||
|
},
|
||||||
|
owner: {
|
||||||
|
type: Authid,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Basic information about backup snapshot.
|
||||||
|
pub struct SnapshotListItem {
|
||||||
|
pub backup_type: String, // enum
|
||||||
|
pub backup_id: String,
|
||||||
|
pub backup_time: i64,
|
||||||
|
/// The first line from manifest "notes"
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
/// The result of the last run verify task
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub verification: Option<SnapshotVerifyState>,
|
||||||
|
/// Fingerprint of encryption key
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub fingerprint: Option<Fingerprint>,
|
||||||
|
/// List of contained archive files.
|
||||||
|
pub files: Vec<BackupContent>,
|
||||||
|
/// Overall snapshot size (sum of all archive sizes).
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub size: Option<u64>,
|
||||||
|
/// The owner of the snapshots group
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub owner: Option<Authid>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"backup-type": {
|
||||||
|
schema: BACKUP_TYPE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-id": {
|
||||||
|
schema: BACKUP_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
"last-backup": {
|
||||||
|
schema: BACKUP_TIME_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-count": {
|
||||||
|
type: Integer,
|
||||||
|
},
|
||||||
|
files: {
|
||||||
|
items: {
|
||||||
|
schema: BACKUP_ARCHIVE_NAME_SCHEMA
|
||||||
|
},
|
||||||
|
},
|
||||||
|
owner: {
|
||||||
|
type: Authid,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Basic information about a backup group.
|
||||||
|
pub struct GroupListItem {
|
||||||
|
pub backup_type: String, // enum
|
||||||
|
pub backup_id: String,
|
||||||
|
pub last_backup: i64,
|
||||||
|
/// Number of contained snapshots
|
||||||
|
pub backup_count: u64,
|
||||||
|
/// List of contained archive files.
|
||||||
|
pub files: Vec<String>,
|
||||||
|
/// The owner of group
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub owner: Option<Authid>,
|
||||||
|
/// The first line from group "notes"
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"backup-type": {
|
||||||
|
schema: BACKUP_TYPE_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-id": {
|
||||||
|
schema: BACKUP_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
"backup-time": {
|
||||||
|
schema: BACKUP_TIME_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Prune result.
|
||||||
|
pub struct PruneListItem {
|
||||||
|
pub backup_type: String, // enum
|
||||||
|
pub backup_id: String,
|
||||||
|
pub backup_time: i64,
|
||||||
|
/// Keep snapshot
|
||||||
|
pub keep: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
ct: {
|
||||||
|
type: TypeCounts,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
host: {
|
||||||
|
type: TypeCounts,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
vm: {
|
||||||
|
type: TypeCounts,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
other: {
|
||||||
|
type: TypeCounts,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize, Default)]
|
||||||
|
/// Counts of groups/snapshots per BackupType.
|
||||||
|
pub struct Counts {
|
||||||
|
/// The counts for CT backups
|
||||||
|
pub ct: Option<TypeCounts>,
|
||||||
|
/// The counts for Host backups
|
||||||
|
pub host: Option<TypeCounts>,
|
||||||
|
/// The counts for VM backups
|
||||||
|
pub vm: Option<TypeCounts>,
|
||||||
|
/// The counts for other backup types
|
||||||
|
pub other: Option<TypeCounts>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize, Deserialize, Default)]
|
||||||
|
/// Backup Type group/snapshot counts.
|
||||||
|
pub struct TypeCounts {
|
||||||
|
/// The number of groups of the type.
|
||||||
|
pub groups: u64,
|
||||||
|
/// The number of snapshots of the type.
|
||||||
|
pub snapshots: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"upid": {
|
||||||
|
optional: true,
|
||||||
|
type: UPID,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Clone, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Garbage collection status.
|
||||||
|
pub struct GarbageCollectionStatus {
|
||||||
|
pub upid: Option<String>,
|
||||||
|
/// Number of processed index files.
|
||||||
|
pub index_file_count: usize,
|
||||||
|
/// Sum of bytes referred by index files.
|
||||||
|
pub index_data_bytes: u64,
|
||||||
|
/// Bytes used on disk.
|
||||||
|
pub disk_bytes: u64,
|
||||||
|
/// Chunks used on disk.
|
||||||
|
pub disk_chunks: usize,
|
||||||
|
/// Sum of removed bytes.
|
||||||
|
pub removed_bytes: u64,
|
||||||
|
/// Number of removed chunks.
|
||||||
|
pub removed_chunks: usize,
|
||||||
|
/// Sum of pending bytes (pending removal - kept for safety).
|
||||||
|
pub pending_bytes: u64,
|
||||||
|
/// Number of pending chunks (pending removal - kept for safety).
|
||||||
|
pub pending_chunks: usize,
|
||||||
|
/// Number of chunks marked as .bad by verify that have been removed by GC.
|
||||||
|
pub removed_bad: usize,
|
||||||
|
/// Number of chunks still marked as .bad after garbage collection.
|
||||||
|
pub still_bad: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for GarbageCollectionStatus {
|
||||||
|
fn default() -> Self {
|
||||||
|
GarbageCollectionStatus {
|
||||||
|
upid: None,
|
||||||
|
index_file_count: 0,
|
||||||
|
index_data_bytes: 0,
|
||||||
|
disk_bytes: 0,
|
||||||
|
disk_chunks: 0,
|
||||||
|
removed_bytes: 0,
|
||||||
|
removed_chunks: 0,
|
||||||
|
pending_bytes: 0,
|
||||||
|
pending_chunks: 0,
|
||||||
|
removed_bad: 0,
|
||||||
|
still_bad: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"gc-status": {
|
||||||
|
type: GarbageCollectionStatus,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
counts: {
|
||||||
|
type: Counts,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Overall Datastore status and useful information.
|
||||||
|
pub struct DataStoreStatus {
|
||||||
|
/// Total space (bytes).
|
||||||
|
pub total: u64,
|
||||||
|
/// Used space (bytes).
|
||||||
|
pub used: u64,
|
||||||
|
/// Available space (bytes).
|
||||||
|
pub avail: u64,
|
||||||
|
/// Status of last GC
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub gc_status: Option<GarbageCollectionStatus>,
|
||||||
|
/// Group/Snapshot counts
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub counts: Option<Counts>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE: ReturnType = ReturnType {
|
||||||
|
optional: false,
|
||||||
|
schema: &ArraySchema::new(
|
||||||
|
"Returns the list of snapshots.",
|
||||||
|
&SnapshotListItem::API_SCHEMA,
|
||||||
|
).schema(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE: ReturnType = ReturnType {
|
||||||
|
optional: false,
|
||||||
|
schema: &ArraySchema::new(
|
||||||
|
"Returns the list of archive files inside a backup snapshots.",
|
||||||
|
&BackupContent::API_SCHEMA,
|
||||||
|
).schema(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE: ReturnType = ReturnType {
|
||||||
|
optional: false,
|
||||||
|
schema: &ArraySchema::new(
|
||||||
|
"Returns the list of backup groups.",
|
||||||
|
&GroupListItem::API_SCHEMA,
|
||||||
|
).schema(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const ADMIN_DATASTORE_PRUNE_RETURN_TYPE: ReturnType = ReturnType {
|
||||||
|
optional: false,
|
||||||
|
schema: &ArraySchema::new(
|
||||||
|
"Returns the list of snapshots and a flag indicating if there are kept or removed.",
|
||||||
|
&PruneListItem::API_SCHEMA,
|
||||||
|
).schema(),
|
||||||
|
};
|
15
pbs-api-types/src/file_restore.rs
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
|
#[api]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// General status information about a running VM file-restore daemon
|
||||||
|
pub struct RestoreDaemonStatus {
|
||||||
|
/// VM uptime in seconds
|
||||||
|
pub uptime: i64,
|
||||||
|
/// time left until auto-shutdown, keep in mind that this is useless when 'keep-timeout' is
|
||||||
|
/// not set, as then the status call will have reset the timer before returning the value
|
||||||
|
pub timeout: i64,
|
||||||
|
}
|
392
pbs-api-types/src/jobs.rs
Normal file
@ -0,0 +1,392 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::const_regex;
|
||||||
|
|
||||||
|
use proxmox::api::{api, schema::*};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
Userid, Authid, REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
SINGLE_LINE_COMMENT_SCHEMA, PROXMOX_SAFE_ID_FORMAT, DATASTORE_SCHEMA,
|
||||||
|
};
|
||||||
|
|
||||||
|
const_regex!{
|
||||||
|
|
||||||
|
/// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
|
||||||
|
pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
|
||||||
|
/// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:ACTUAL_JOB_ID'
|
||||||
|
pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
|
||||||
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(32)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Run sync job at specified schedule.")
|
||||||
|
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
|
||||||
|
.type_text("<calendar-event>")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Run garbage collection job at specified schedule.")
|
||||||
|
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
|
||||||
|
.type_text("<calendar-event>")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Run prune job at specified schedule.")
|
||||||
|
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
|
||||||
|
.type_text("<calendar-event>")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Run verify job at specified schedule.")
|
||||||
|
.format(&ApiStringFormat::VerifyFn(proxmox_systemd::time::verify_calendar_event))
|
||||||
|
.type_text("<calendar-event>")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||||
|
"Delete vanished backups. This remove the local copy if the remote backup was deleted.")
|
||||||
|
.default(true)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"next-run": {
|
||||||
|
description: "Estimated time of the next run (UNIX epoch).",
|
||||||
|
optional: true,
|
||||||
|
type: Integer,
|
||||||
|
},
|
||||||
|
"last-run-state": {
|
||||||
|
description: "Result of the last run.",
|
||||||
|
optional: true,
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
"last-run-upid": {
|
||||||
|
description: "Task UPID of the last run.",
|
||||||
|
optional: true,
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
"last-run-endtime": {
|
||||||
|
description: "Endtime of the last run.",
|
||||||
|
optional: true,
|
||||||
|
type: Integer,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize,Default)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Job Scheduling Status
|
||||||
|
pub struct JobScheduleStatus {
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub next_run: Option<i64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub last_run_state: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub last_run_upid: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub last_run_endtime: Option<i64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// When do we send notifications
|
||||||
|
pub enum Notify {
|
||||||
|
/// Never send notification
|
||||||
|
Never,
|
||||||
|
/// Send notifications for failed and successful jobs
|
||||||
|
Always,
|
||||||
|
/// Send notifications for failed jobs only
|
||||||
|
Error,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
gc: {
|
||||||
|
type: Notify,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
verify: {
|
||||||
|
type: Notify,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
sync: {
|
||||||
|
type: Notify,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
/// Datastore notify settings
|
||||||
|
pub struct DatastoreNotify {
|
||||||
|
/// Garbage collection settings
|
||||||
|
pub gc: Option<Notify>,
|
||||||
|
/// Verify job setting
|
||||||
|
pub verify: Option<Notify>,
|
||||||
|
/// Sync job setting
|
||||||
|
pub sync: Option<Notify>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Datastore notification setting")
|
||||||
|
.format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||||
|
"Do not verify backups that are already verified if their verification is not outdated.")
|
||||||
|
.default(true)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new(
|
||||||
|
"Days after that a verification becomes outdated")
|
||||||
|
.minimum(1)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"ignore-verified": {
|
||||||
|
optional: true,
|
||||||
|
schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
||||||
|
},
|
||||||
|
"outdated-after": {
|
||||||
|
optional: true,
|
||||||
|
schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
schedule: {
|
||||||
|
optional: true,
|
||||||
|
schema: VERIFICATION_SCHEDULE_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize,Updater)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Verification Job
|
||||||
|
pub struct VerificationJobConfig {
|
||||||
|
/// unique ID to address this job
|
||||||
|
#[updater(skip)]
|
||||||
|
pub id: String,
|
||||||
|
/// the datastore ID this verificaiton job affects
|
||||||
|
pub store: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// if not set to false, check the age of the last snapshot verification to filter
|
||||||
|
/// out recent ones, depending on 'outdated_after' configuration.
|
||||||
|
pub ignore_verified: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
|
||||||
|
pub outdated_after: Option<i64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// when to schedule this job in calendar event notation
|
||||||
|
pub schedule: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
config: {
|
||||||
|
type: VerificationJobConfig,
|
||||||
|
},
|
||||||
|
status: {
|
||||||
|
type: JobScheduleStatus,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Status of Verification Job
|
||||||
|
pub struct VerificationJobStatus {
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub config: VerificationJobConfig,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub status: JobScheduleStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
pool: {
|
||||||
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
drive: {
|
||||||
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
"eject-media": {
|
||||||
|
description: "Eject media upon job completion.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"export-media-set": {
|
||||||
|
description: "Export media set upon job completion.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"latest-only": {
|
||||||
|
description: "Backup latest snapshots only.",
|
||||||
|
type: bool,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"notify-user": {
|
||||||
|
optional: true,
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize,Clone,Updater)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Tape Backup Job Setup
|
||||||
|
pub struct TapeBackupJobSetup {
|
||||||
|
pub store: String,
|
||||||
|
pub pool: String,
|
||||||
|
pub drive: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub eject_media: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub export_media_set: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub latest_only: Option<bool>,
|
||||||
|
/// Send job email notification to this user
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub notify_user: Option<Userid>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
setup: {
|
||||||
|
type: TapeBackupJobSetup,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
schedule: {
|
||||||
|
optional: true,
|
||||||
|
schema: SYNC_SCHEDULE_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize,Clone,Updater)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Tape Backup Job
|
||||||
|
pub struct TapeBackupJobConfig {
|
||||||
|
#[updater(skip)]
|
||||||
|
pub id: String,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub setup: TapeBackupJobSetup,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub schedule: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
config: {
|
||||||
|
type: TapeBackupJobConfig,
|
||||||
|
},
|
||||||
|
status: {
|
||||||
|
type: JobScheduleStatus,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Status of Tape Backup Job
|
||||||
|
pub struct TapeBackupJobStatus {
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub config: TapeBackupJobConfig,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub status: JobScheduleStatus,
|
||||||
|
/// Next tape used (best guess)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub next_media_label: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"owner": {
|
||||||
|
type: Authid,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
remote: {
|
||||||
|
schema: REMOTE_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
"remote-store": {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
},
|
||||||
|
"remove-vanished": {
|
||||||
|
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
schedule: {
|
||||||
|
optional: true,
|
||||||
|
schema: SYNC_SCHEDULE_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize,Clone,Updater)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Sync Job
|
||||||
|
pub struct SyncJobConfig {
|
||||||
|
#[updater(skip)]
|
||||||
|
pub id: String,
|
||||||
|
pub store: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub owner: Option<Authid>,
|
||||||
|
pub remote: String,
|
||||||
|
pub remote_store: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub remove_vanished: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub schedule: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
config: {
|
||||||
|
type: SyncJobConfig,
|
||||||
|
},
|
||||||
|
status: {
|
||||||
|
type: JobScheduleStatus,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Status of Sync Job
|
||||||
|
pub struct SyncJobStatus {
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub config: SyncJobConfig,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub status: JobScheduleStatus,
|
||||||
|
}
|
56
pbs-api-types/src/key_derivation.rs
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
|
||||||
|
use crate::CERT_FINGERPRINT_SHA256_SCHEMA;
|
||||||
|
|
||||||
|
#[api(default: "scrypt")]
|
||||||
|
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// Key derivation function for password protected encryption keys.
|
||||||
|
pub enum Kdf {
|
||||||
|
/// Do not encrypt the key.
|
||||||
|
None,
|
||||||
|
/// Encrypt they key with a password using SCrypt.
|
||||||
|
Scrypt,
|
||||||
|
/// Encrtypt the Key with a password using PBKDF2
|
||||||
|
PBKDF2,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Kdf {
|
||||||
|
#[inline]
|
||||||
|
fn default() -> Self {
|
||||||
|
Kdf::Scrypt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
kdf: {
|
||||||
|
type: Kdf,
|
||||||
|
},
|
||||||
|
fingerprint: {
|
||||||
|
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
/// Encryption Key Information
|
||||||
|
pub struct KeyInfo {
|
||||||
|
/// Path to key (if stored in a file)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub path: Option<String>,
|
||||||
|
pub kdf: Kdf,
|
||||||
|
/// Key creation time
|
||||||
|
pub created: i64,
|
||||||
|
/// Key modification time
|
||||||
|
pub modified: i64,
|
||||||
|
/// Key fingerprint
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub fingerprint: Option<String>,
|
||||||
|
/// Password hint
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub hint: Option<String>,
|
||||||
|
}
|
||||||
|
|
399
pbs-api-types/src/lib.rs
Normal file
@ -0,0 +1,399 @@
|
|||||||
|
//! Basic API types used by most of the PBS code.
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use anyhow::bail;
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
use proxmox::api::schema::{ApiStringFormat, ArraySchema, Schema, StringSchema};
|
||||||
|
use proxmox::const_regex;
|
||||||
|
use proxmox::{IPRE, IPRE_BRACKET, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE};
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => { r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)" }; }
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9_][A-Za-z0-9._\-]*") }
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") }
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z") }
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! SNAPSHOT_PATH_REGEX_STR {
|
||||||
|
() => (
|
||||||
|
concat!(r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
mod acl;
|
||||||
|
pub use acl::*;
|
||||||
|
|
||||||
|
mod datastore;
|
||||||
|
pub use datastore::*;
|
||||||
|
|
||||||
|
mod jobs;
|
||||||
|
pub use jobs::*;
|
||||||
|
|
||||||
|
mod key_derivation;
|
||||||
|
pub use key_derivation::{Kdf, KeyInfo};
|
||||||
|
|
||||||
|
mod network;
|
||||||
|
pub use network::*;
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
mod userid;
|
||||||
|
pub use userid::Authid;
|
||||||
|
pub use userid::Userid;
|
||||||
|
pub use userid::{Realm, RealmRef};
|
||||||
|
pub use userid::{Tokenname, TokennameRef};
|
||||||
|
pub use userid::{Username, UsernameRef};
|
||||||
|
pub use userid::{PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA};
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
mod user;
|
||||||
|
pub use user::*;
|
||||||
|
|
||||||
|
pub mod upid;
|
||||||
|
pub use upid::*;
|
||||||
|
|
||||||
|
mod crypto;
|
||||||
|
pub use crypto::{CryptMode, Fingerprint};
|
||||||
|
|
||||||
|
pub mod file_restore;
|
||||||
|
|
||||||
|
mod remote;
|
||||||
|
pub use remote::*;
|
||||||
|
|
||||||
|
mod tape;
|
||||||
|
pub use tape::*;
|
||||||
|
|
||||||
|
mod zfs;
|
||||||
|
pub use zfs::*;
|
||||||
|
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[macro_use]
|
||||||
|
mod local_macros {
|
||||||
|
macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
|
||||||
|
macro_rules! DNS_NAME { () => (concat!(r"(?:(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!(), ")")) }
|
||||||
|
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
|
||||||
|
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
|
||||||
|
macro_rules! DNS_ALIAS_LABEL { () => (r"(?:[a-zA-Z0-9_](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
|
||||||
|
macro_rules! DNS_ALIAS_NAME {
|
||||||
|
() => (concat!(r"(?:(?:", DNS_ALIAS_LABEL!() , r"\.)*", DNS_ALIAS_LABEL!(), ")"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const_regex! {
|
||||||
|
pub IP_V4_REGEX = concat!(r"^", IPV4RE!(), r"$");
|
||||||
|
pub IP_V6_REGEX = concat!(r"^", IPV6RE!(), r"$");
|
||||||
|
pub IP_REGEX = concat!(r"^", IPRE!(), r"$");
|
||||||
|
pub CIDR_V4_REGEX = concat!(r"^", CIDR_V4_REGEX_STR!(), r"$");
|
||||||
|
pub CIDR_V6_REGEX = concat!(r"^", CIDR_V6_REGEX_STR!(), r"$");
|
||||||
|
pub CIDR_REGEX = concat!(r"^(?:", CIDR_V4_REGEX_STR!(), "|", CIDR_V6_REGEX_STR!(), r")$");
|
||||||
|
pub HOSTNAME_REGEX = r"^(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)$";
|
||||||
|
pub DNS_NAME_REGEX = concat!(r"^", DNS_NAME!(), r"$");
|
||||||
|
pub DNS_ALIAS_REGEX = concat!(r"^", DNS_ALIAS_NAME!(), r"$");
|
||||||
|
pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE!(), r")$");
|
||||||
|
|
||||||
|
pub SHA256_HEX_REGEX = r"^[a-f0-9]{64}$"; // fixme: define in common_regex ?
|
||||||
|
|
||||||
|
pub PASSWORD_REGEX = r"^[[:^cntrl:]]*$"; // everything but control characters
|
||||||
|
|
||||||
|
pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$";
|
||||||
|
|
||||||
|
pub SYSTEMD_DATETIME_REGEX = r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$"; // fixme: define in common_regex ?
|
||||||
|
|
||||||
|
pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
|
||||||
|
|
||||||
|
/// Regex for safe identifiers.
|
||||||
|
///
|
||||||
|
/// This
|
||||||
|
/// [article](https://dwheeler.com/essays/fixing-unix-linux-filenames.html)
|
||||||
|
/// contains further information why it is reasonable to restict
|
||||||
|
/// names this way. This is not only useful for filenames, but for
|
||||||
|
/// any identifier command line tools work with.
|
||||||
|
pub PROXMOX_SAFE_ID_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r"$");
|
||||||
|
|
||||||
|
pub SINGLE_LINE_COMMENT_REGEX = r"^[[:^cntrl:]]*$";
|
||||||
|
|
||||||
|
pub BACKUP_REPO_URL_REGEX = concat!(
|
||||||
|
r"^^(?:(?:(",
|
||||||
|
USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(),
|
||||||
|
")@)?(",
|
||||||
|
DNS_NAME!(), "|", IPRE_BRACKET!(),
|
||||||
|
"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$"
|
||||||
|
);
|
||||||
|
|
||||||
|
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
|
||||||
|
pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const IP_V4_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_V4_REGEX);
|
||||||
|
pub const IP_V6_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_V6_REGEX);
|
||||||
|
pub const IP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_REGEX);
|
||||||
|
pub const CIDR_V4_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V4_REGEX);
|
||||||
|
pub const CIDR_V6_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V6_REGEX);
|
||||||
|
pub const CIDR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_REGEX);
|
||||||
|
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
|
||||||
|
pub const PASSWORD_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PASSWORD_REGEX);
|
||||||
|
pub const UUID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&UUID_REGEX);
|
||||||
|
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
|
||||||
|
pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
|
||||||
|
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX);
|
||||||
|
pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_REGEX);
|
||||||
|
|
||||||
|
pub const DNS_ALIAS_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&DNS_ALIAS_REGEX);
|
||||||
|
|
||||||
|
pub const SEARCH_DOMAIN_SCHEMA: Schema =
|
||||||
|
StringSchema::new("Search domain for host-name lookup.").schema();
|
||||||
|
|
||||||
|
pub const FIRST_DNS_SERVER_SCHEMA: Schema =
|
||||||
|
StringSchema::new("First name server IP address.")
|
||||||
|
.format(&IP_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const SECOND_DNS_SERVER_SCHEMA: Schema =
|
||||||
|
StringSchema::new("Second name server IP address.")
|
||||||
|
.format(&IP_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const THIRD_DNS_SERVER_SCHEMA: Schema =
|
||||||
|
StringSchema::new("Third name server IP address.")
|
||||||
|
.format(&IP_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const HOSTNAME_SCHEMA: Schema = StringSchema::new("Hostname (as defined in RFC1123).")
|
||||||
|
.format(&HOSTNAME_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DNS_NAME_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&DNS_NAME_REGEX);
|
||||||
|
|
||||||
|
pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
|
||||||
|
|
||||||
|
pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP address.")
|
||||||
|
.format(&DNS_NAME_OR_IP_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')")
|
||||||
|
.format(&ApiStringFormat::VerifyFn(|node| {
|
||||||
|
if node == "localhost" || node == proxmox::tools::nodename() {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
bail!("no such node '{}'", node);
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const TIME_ZONE_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.")
|
||||||
|
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||||
|
.min_length(2)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
|
||||||
|
.format(&BLOCKDEVICE_NAME_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DISK_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||||
|
"Disk name list.", &BLOCKDEVICE_NAME_SCHEMA)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const DISK_LIST_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"A list of disk names, comma separated.")
|
||||||
|
.format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
|
||||||
|
.format(&PASSWORD_FORMAT)
|
||||||
|
.min_length(1)
|
||||||
|
.max_length(1024)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
||||||
|
.format(&PASSWORD_FORMAT)
|
||||||
|
.min_length(5)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.")
|
||||||
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
|
.min_length(2)
|
||||||
|
.max_length(32)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const FINGERPRINT_SHA256_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&FINGERPRINT_SHA256_REGEX);
|
||||||
|
|
||||||
|
pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema =
|
||||||
|
StringSchema::new("X509 certificate fingerprint (sha256).")
|
||||||
|
.format(&FINGERPRINT_SHA256_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
||||||
|
|
||||||
|
pub const SINGLE_LINE_COMMENT_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&SINGLE_LINE_COMMENT_REGEX);
|
||||||
|
|
||||||
|
pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (single line).")
|
||||||
|
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const SUBSCRIPTION_KEY_SCHEMA: Schema = StringSchema::new("Proxmox Backup Server subscription key.")
|
||||||
|
.format(&SUBSCRIPTION_KEY_FORMAT)
|
||||||
|
.min_length(15)
|
||||||
|
.max_length(16)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const SERVICE_ID_SCHEMA: Schema = StringSchema::new("Service ID.")
|
||||||
|
.max_length(256)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Prevent changes if current configuration file has different \
|
||||||
|
SHA256 digest. This can be used to prevent concurrent \
|
||||||
|
modifications.",
|
||||||
|
)
|
||||||
|
.format(&PVE_CONFIG_DIGEST_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
/// API schema format definition for repository URLs
|
||||||
|
pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX);
|
||||||
|
|
||||||
|
|
||||||
|
// Complex type definitions
|
||||||
|
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Default, Serialize, Deserialize)]
|
||||||
|
/// Storage space usage information.
|
||||||
|
pub struct StorageStatus {
|
||||||
|
/// Total space (bytes).
|
||||||
|
pub total: u64,
|
||||||
|
/// Used space (bytes).
|
||||||
|
pub used: u64,
|
||||||
|
/// Available space (bytes).
|
||||||
|
pub avail: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
|
||||||
|
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||||
|
.min_length(1)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
|
||||||
|
#[api]
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
/// RSA public key information
|
||||||
|
pub struct RsaPubKeyInfo {
|
||||||
|
/// Path to key (if stored in a file)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub path: Option<String>,
|
||||||
|
/// RSA exponent
|
||||||
|
pub exponent: String,
|
||||||
|
/// Hex-encoded RSA modulus
|
||||||
|
pub modulus: String,
|
||||||
|
/// Key (modulus) length in bits
|
||||||
|
pub length: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::convert::TryFrom<openssl::rsa::Rsa<openssl::pkey::Public>> for RsaPubKeyInfo {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
|
fn try_from(value: openssl::rsa::Rsa<openssl::pkey::Public>) -> Result<Self, Self::Error> {
|
||||||
|
let modulus = value.n().to_hex_str()?.to_string();
|
||||||
|
let exponent = value.e().to_dec_str()?.to_string();
|
||||||
|
let length = value.size() as usize * 8;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
path: None,
|
||||||
|
exponent,
|
||||||
|
modulus,
|
||||||
|
length,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "PascalCase")]
|
||||||
|
/// Describes a package for which an update is available.
|
||||||
|
pub struct APTUpdateInfo {
|
||||||
|
/// Package name
|
||||||
|
pub package: String,
|
||||||
|
/// Package title
|
||||||
|
pub title: String,
|
||||||
|
/// Package architecture
|
||||||
|
pub arch: String,
|
||||||
|
/// Human readable package description
|
||||||
|
pub description: String,
|
||||||
|
/// New version to be updated to
|
||||||
|
pub version: String,
|
||||||
|
/// Old version currently installed
|
||||||
|
pub old_version: String,
|
||||||
|
/// Package origin
|
||||||
|
pub origin: String,
|
||||||
|
/// Package priority in human-readable form
|
||||||
|
pub priority: String,
|
||||||
|
/// Package section
|
||||||
|
pub section: String,
|
||||||
|
/// URL under which the package's changelog can be retrieved
|
||||||
|
pub change_log_url: String,
|
||||||
|
/// Custom extra field for additional package information
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub extra_info: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "UPPERCASE")]
|
||||||
|
pub enum RRDMode {
|
||||||
|
/// Maximum
|
||||||
|
Max,
|
||||||
|
/// Average
|
||||||
|
Average,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[repr(u64)]
|
||||||
|
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
pub enum RRDTimeFrameResolution {
|
||||||
|
/// 1 min => last 70 minutes
|
||||||
|
Hour = 60,
|
||||||
|
/// 30 min => last 35 hours
|
||||||
|
Day = 60*30,
|
||||||
|
/// 3 hours => about 8 days
|
||||||
|
Week = 60*180,
|
||||||
|
/// 12 hours => last 35 days
|
||||||
|
Month = 60*720,
|
||||||
|
/// 1 week => last 490 days
|
||||||
|
Year = 60*10080,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// Node Power command type.
|
||||||
|
pub enum NodePowerCommand {
|
||||||
|
/// Restart the server
|
||||||
|
Reboot,
|
||||||
|
/// Shutdown the server
|
||||||
|
Shutdown,
|
||||||
|
}
|
308
pbs-api-types/src/network.rs
Normal file
@ -0,0 +1,308 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::{api, schema::*};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
PROXMOX_SAFE_ID_REGEX,
|
||||||
|
IP_V4_FORMAT, IP_V6_FORMAT, IP_FORMAT,
|
||||||
|
CIDR_V4_FORMAT, CIDR_V6_FORMAT, CIDR_FORMAT,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
||||||
|
|
||||||
|
pub const IP_V4_SCHEMA: Schema =
|
||||||
|
StringSchema::new("IPv4 address.")
|
||||||
|
.format(&IP_V4_FORMAT)
|
||||||
|
.max_length(15)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const IP_V6_SCHEMA: Schema =
|
||||||
|
StringSchema::new("IPv6 address.")
|
||||||
|
.format(&IP_V6_FORMAT)
|
||||||
|
.max_length(39)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const IP_SCHEMA: Schema =
|
||||||
|
StringSchema::new("IP (IPv4 or IPv6) address.")
|
||||||
|
.format(&IP_FORMAT)
|
||||||
|
.max_length(39)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const CIDR_V4_SCHEMA: Schema =
|
||||||
|
StringSchema::new("IPv4 address with netmask (CIDR notation).")
|
||||||
|
.format(&CIDR_V4_FORMAT)
|
||||||
|
.max_length(18)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const CIDR_V6_SCHEMA: Schema =
|
||||||
|
StringSchema::new("IPv6 address with netmask (CIDR notation).")
|
||||||
|
.format(&CIDR_V6_FORMAT)
|
||||||
|
.max_length(43)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const CIDR_SCHEMA: Schema =
|
||||||
|
StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).")
|
||||||
|
.format(&CIDR_FORMAT)
|
||||||
|
.max_length(43)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// Interface configuration method
|
||||||
|
pub enum NetworkConfigMethod {
|
||||||
|
/// Configuration is done manually using other tools
|
||||||
|
Manual,
|
||||||
|
/// Define interfaces with statically allocated addresses.
|
||||||
|
Static,
|
||||||
|
/// Obtain an address via DHCP
|
||||||
|
DHCP,
|
||||||
|
/// Define the loopback interface.
|
||||||
|
Loopback,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
#[repr(u8)]
|
||||||
|
/// Linux Bond Mode
|
||||||
|
pub enum LinuxBondMode {
|
||||||
|
/// Round-robin policy
|
||||||
|
balance_rr = 0,
|
||||||
|
/// Active-backup policy
|
||||||
|
active_backup = 1,
|
||||||
|
/// XOR policy
|
||||||
|
balance_xor = 2,
|
||||||
|
/// Broadcast policy
|
||||||
|
broadcast = 3,
|
||||||
|
/// IEEE 802.3ad Dynamic link aggregation
|
||||||
|
#[serde(rename = "802.3ad")]
|
||||||
|
ieee802_3ad = 4,
|
||||||
|
/// Adaptive transmit load balancing
|
||||||
|
balance_tlb = 5,
|
||||||
|
/// Adaptive load balancing
|
||||||
|
balance_alb = 6,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
#[repr(u8)]
|
||||||
|
/// Bond Transmit Hash Policy for LACP (802.3ad)
|
||||||
|
pub enum BondXmitHashPolicy {
|
||||||
|
/// Layer 2
|
||||||
|
layer2 = 0,
|
||||||
|
/// Layer 2+3
|
||||||
|
#[serde(rename = "layer2+3")]
|
||||||
|
layer2_3 = 1,
|
||||||
|
/// Layer 3+4
|
||||||
|
#[serde(rename = "layer3+4")]
|
||||||
|
layer3_4 = 2,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// Network interface type
|
||||||
|
pub enum NetworkInterfaceType {
|
||||||
|
/// Loopback
|
||||||
|
Loopback,
|
||||||
|
/// Physical Ethernet device
|
||||||
|
Eth,
|
||||||
|
/// Linux Bridge
|
||||||
|
Bridge,
|
||||||
|
/// Linux Bond
|
||||||
|
Bond,
|
||||||
|
/// Linux VLAN (eth.10)
|
||||||
|
Vlan,
|
||||||
|
/// Interface Alias (eth:1)
|
||||||
|
Alias,
|
||||||
|
/// Unknown interface type
|
||||||
|
Unknown,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.")
|
||||||
|
.format(&NETWORK_INTERFACE_FORMAT)
|
||||||
|
.min_length(1)
|
||||||
|
.max_length(libc::IFNAMSIZ-1)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||||
|
"Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"A list of network devices, comma separated.")
|
||||||
|
.format(&ApiStringFormat::PropertyString(&NETWORK_INTERFACE_ARRAY_SCHEMA))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
type: NetworkInterfaceType,
|
||||||
|
},
|
||||||
|
method: {
|
||||||
|
type: NetworkConfigMethod,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
method6: {
|
||||||
|
type: NetworkConfigMethod,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
cidr: {
|
||||||
|
schema: CIDR_V4_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
cidr6: {
|
||||||
|
schema: CIDR_V6_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
gateway: {
|
||||||
|
schema: IP_V4_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
gateway6: {
|
||||||
|
schema: IP_V6_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
options: {
|
||||||
|
description: "Option list (inet)",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
description: "Optional attribute line.",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
options6: {
|
||||||
|
description: "Option list (inet6)",
|
||||||
|
type: Array,
|
||||||
|
items: {
|
||||||
|
description: "Optional attribute line.",
|
||||||
|
type: String,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
comments: {
|
||||||
|
description: "Comments (inet, may span multiple lines)",
|
||||||
|
type: String,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
comments6: {
|
||||||
|
description: "Comments (inet6, may span multiple lines)",
|
||||||
|
type: String,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
bridge_ports: {
|
||||||
|
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
slaves: {
|
||||||
|
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
bond_mode: {
|
||||||
|
type: LinuxBondMode,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"bond-primary": {
|
||||||
|
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
bond_xmit_hash_policy: {
|
||||||
|
type: BondXmitHashPolicy,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
/// Network Interface configuration
|
||||||
|
pub struct Interface {
|
||||||
|
/// Autostart interface
|
||||||
|
#[serde(rename = "autostart")]
|
||||||
|
pub autostart: bool,
|
||||||
|
/// Interface is active (UP)
|
||||||
|
pub active: bool,
|
||||||
|
/// Interface name
|
||||||
|
pub name: String,
|
||||||
|
/// Interface type
|
||||||
|
#[serde(rename = "type")]
|
||||||
|
pub interface_type: NetworkInterfaceType,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub method: Option<NetworkConfigMethod>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub method6: Option<NetworkConfigMethod>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// IPv4 address with netmask
|
||||||
|
pub cidr: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// IPv4 gateway
|
||||||
|
pub gateway: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// IPv6 address with netmask
|
||||||
|
pub cidr6: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// IPv6 gateway
|
||||||
|
pub gateway6: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if="Vec::is_empty")]
|
||||||
|
pub options: Vec<String>,
|
||||||
|
#[serde(skip_serializing_if="Vec::is_empty")]
|
||||||
|
pub options6: Vec<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comments: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comments6: Option<String>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
/// Maximum Transmission Unit
|
||||||
|
pub mtu: Option<u64>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub bridge_ports: Option<Vec<String>>,
|
||||||
|
/// Enable bridge vlan support.
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub bridge_vlan_aware: Option<bool>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub slaves: Option<Vec<String>>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub bond_mode: Option<LinuxBondMode>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
#[serde(rename = "bond-primary")]
|
||||||
|
pub bond_primary: Option<String>,
|
||||||
|
pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Interface {
|
||||||
|
pub fn new(name: String) -> Self {
|
||||||
|
Self {
|
||||||
|
name,
|
||||||
|
interface_type: NetworkInterfaceType::Unknown,
|
||||||
|
autostart: false,
|
||||||
|
active: false,
|
||||||
|
method: None,
|
||||||
|
method6: None,
|
||||||
|
cidr: None,
|
||||||
|
gateway: None,
|
||||||
|
cidr6: None,
|
||||||
|
gateway6: None,
|
||||||
|
options: Vec::new(),
|
||||||
|
options6: Vec::new(),
|
||||||
|
comments: None,
|
||||||
|
comments6: None,
|
||||||
|
mtu: None,
|
||||||
|
bridge_ports: None,
|
||||||
|
bridge_vlan_aware: None,
|
||||||
|
slaves: None,
|
||||||
|
bond_mode: None,
|
||||||
|
bond_primary: None,
|
||||||
|
bond_xmit_hash_policy: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
86
pbs-api-types/src/remote.rs
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use proxmox::api::{api, schema::*};
|
||||||
|
|
||||||
|
pub const REMOTE_PASSWORD_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host.")
|
||||||
|
.format(&PASSWORD_FORMAT)
|
||||||
|
.min_length(1)
|
||||||
|
.max_length(1024)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const REMOTE_PASSWORD_BASE64_SCHEMA: Schema = StringSchema::new("Password or auth token for remote host (stored as base64 string).")
|
||||||
|
.format(&PASSWORD_FORMAT)
|
||||||
|
.min_length(1)
|
||||||
|
.max_length(1024)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
|
||||||
|
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||||
|
.min_length(3)
|
||||||
|
.max_length(32)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
host: {
|
||||||
|
schema: DNS_NAME_OR_IP_SCHEMA,
|
||||||
|
},
|
||||||
|
port: {
|
||||||
|
optional: true,
|
||||||
|
description: "The (optional) port",
|
||||||
|
type: u16,
|
||||||
|
},
|
||||||
|
"auth-id": {
|
||||||
|
type: Authid,
|
||||||
|
},
|
||||||
|
fingerprint: {
|
||||||
|
optional: true,
|
||||||
|
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize,Updater)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Remote configuration properties.
|
||||||
|
pub struct RemoteConfig {
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
pub host: String,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub port: Option<u16>,
|
||||||
|
pub auth_id: Authid,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub fingerprint: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
name: {
|
||||||
|
schema: REMOTE_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
config: {
|
||||||
|
type: RemoteConfig,
|
||||||
|
},
|
||||||
|
password: {
|
||||||
|
schema: REMOTE_PASSWORD_BASE64_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Remote properties.
|
||||||
|
pub struct Remote {
|
||||||
|
pub name: String,
|
||||||
|
// Note: The stored password is base64 encoded
|
||||||
|
#[serde(skip_serializing_if="String::is_empty")]
|
||||||
|
#[serde(with = "proxmox::tools::serde::string_as_base64")]
|
||||||
|
pub password: String,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub config: RemoteConfig,
|
||||||
|
}
|
@ -10,10 +10,11 @@ use proxmox::api::{
|
|||||||
ArraySchema,
|
ArraySchema,
|
||||||
IntegerSchema,
|
IntegerSchema,
|
||||||
StringSchema,
|
StringSchema,
|
||||||
|
Updater,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::api2::types::{
|
use crate::{
|
||||||
PROXMOX_SAFE_ID_FORMAT,
|
PROXMOX_SAFE_ID_FORMAT,
|
||||||
OptionalDeviceIdentification,
|
OptionalDeviceIdentification,
|
||||||
};
|
};
|
||||||
@ -62,10 +63,11 @@ Import/Export, i.e. any media in those slots are considered to be
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[derive(Serialize,Deserialize,Updater)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// SCSI tape changer
|
/// SCSI tape changer
|
||||||
pub struct ScsiTapeChanger {
|
pub struct ScsiTapeChanger {
|
||||||
|
#[updater(skip)]
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub path: String,
|
pub path: String,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
@ -6,10 +6,10 @@ use serde::{Deserialize, Serialize};
|
|||||||
|
|
||||||
use proxmox::api::{
|
use proxmox::api::{
|
||||||
api,
|
api,
|
||||||
schema::{Schema, IntegerSchema, StringSchema},
|
schema::{Schema, IntegerSchema, StringSchema, Updater},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::api2::types::{
|
use crate::{
|
||||||
PROXMOX_SAFE_ID_FORMAT,
|
PROXMOX_SAFE_ID_FORMAT,
|
||||||
CHANGER_NAME_SCHEMA,
|
CHANGER_NAME_SCHEMA,
|
||||||
OptionalDeviceIdentification,
|
OptionalDeviceIdentification,
|
||||||
@ -21,14 +21,14 @@ pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.")
|
|||||||
.max_length(32)
|
.max_length(32)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const LINUX_DRIVE_PATH_SCHEMA: Schema = StringSchema::new(
|
pub const LTO_DRIVE_PATH_SCHEMA: Schema = StringSchema::new(
|
||||||
"The path to a LINUX non-rewinding SCSI tape device (i.e. '/dev/nst0')")
|
"The path to a LTO SCSI-generic tape device (i.e. '/dev/sg0')")
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
pub const CHANGER_DRIVENUM_SCHEMA: Schema = IntegerSchema::new(
|
pub const CHANGER_DRIVENUM_SCHEMA: Schema = IntegerSchema::new(
|
||||||
"Associated changer drive number (requires option changer)")
|
"Associated changer drive number (requires option changer)")
|
||||||
.minimum(0)
|
.minimum(0)
|
||||||
.maximum(8)
|
.maximum(255)
|
||||||
.default(0)
|
.default(0)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
@ -57,7 +57,7 @@ pub struct VirtualTapeDrive {
|
|||||||
schema: DRIVE_NAME_SCHEMA,
|
schema: DRIVE_NAME_SCHEMA,
|
||||||
},
|
},
|
||||||
path: {
|
path: {
|
||||||
schema: LINUX_DRIVE_PATH_SCHEMA,
|
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||||
},
|
},
|
||||||
changer: {
|
changer: {
|
||||||
schema: CHANGER_NAME_SCHEMA,
|
schema: CHANGER_NAME_SCHEMA,
|
||||||
@ -69,10 +69,11 @@ pub struct VirtualTapeDrive {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[derive(Serialize,Deserialize,Updater)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Linux SCSI tape driver
|
/// Lto SCSI tape driver
|
||||||
pub struct LinuxTapeDrive {
|
pub struct LtoTapeDrive {
|
||||||
|
#[updater(skip)]
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub path: String,
|
pub path: String,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
@ -84,7 +85,7 @@ pub struct LinuxTapeDrive {
|
|||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
config: {
|
config: {
|
||||||
type: LinuxTapeDrive,
|
type: LtoTapeDrive,
|
||||||
},
|
},
|
||||||
info: {
|
info: {
|
||||||
type: OptionalDeviceIdentification,
|
type: OptionalDeviceIdentification,
|
||||||
@ -96,7 +97,7 @@ pub struct LinuxTapeDrive {
|
|||||||
/// Drive list entry
|
/// Drive list entry
|
||||||
pub struct DriveListEntry {
|
pub struct DriveListEntry {
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
pub config: LinuxTapeDrive,
|
pub config: LtoTapeDrive,
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
pub info: OptionalDeviceIdentification,
|
pub info: OptionalDeviceIdentification,
|
||||||
/// the state of the drive if locked
|
/// the state of the drive if locked
|
||||||
@ -119,6 +120,8 @@ pub struct MamAttribute {
|
|||||||
#[api()]
|
#[api()]
|
||||||
#[derive(Serialize,Deserialize,Copy,Clone,Debug)]
|
#[derive(Serialize,Deserialize,Copy,Clone,Debug)]
|
||||||
pub enum TapeDensity {
|
pub enum TapeDensity {
|
||||||
|
/// Unknown (no media loaded)
|
||||||
|
Unknown,
|
||||||
/// LTO1
|
/// LTO1
|
||||||
LTO1,
|
LTO1,
|
||||||
/// LTO2
|
/// LTO2
|
||||||
@ -144,6 +147,7 @@ impl TryFrom<u8> for TapeDensity {
|
|||||||
|
|
||||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||||
let density = match value {
|
let density = match value {
|
||||||
|
0x00 => TapeDensity::Unknown,
|
||||||
0x40 => TapeDensity::LTO1,
|
0x40 => TapeDensity::LTO1,
|
||||||
0x42 => TapeDensity::LTO2,
|
0x42 => TapeDensity::LTO2,
|
||||||
0x44 => TapeDensity::LTO3,
|
0x44 => TapeDensity::LTO3,
|
||||||
@ -169,29 +173,37 @@ impl TryFrom<u8> for TapeDensity {
|
|||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[derive(Serialize,Deserialize)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Drive/Media status for Linux SCSI drives.
|
/// Drive/Media status for Lto SCSI drives.
|
||||||
///
|
///
|
||||||
/// Media related data is optional - only set if there is a medium
|
/// Media related data is optional - only set if there is a medium
|
||||||
/// loaded.
|
/// loaded.
|
||||||
pub struct LinuxDriveAndMediaStatus {
|
pub struct LtoDriveAndMediaStatus {
|
||||||
|
/// Vendor
|
||||||
|
pub vendor: String,
|
||||||
|
/// Product
|
||||||
|
pub product: String,
|
||||||
|
/// Revision
|
||||||
|
pub revision: String,
|
||||||
/// Block size (0 is variable size)
|
/// Block size (0 is variable size)
|
||||||
pub blocksize: u32,
|
pub blocksize: u32,
|
||||||
|
/// Compression enabled
|
||||||
|
pub compression: bool,
|
||||||
|
/// Drive buffer mode
|
||||||
|
pub buffer_mode: u8,
|
||||||
/// Tape density
|
/// Tape density
|
||||||
|
pub density: TapeDensity,
|
||||||
|
/// Media is write protected
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub density: Option<TapeDensity>,
|
pub write_protect: Option<bool>,
|
||||||
/// Status flags
|
|
||||||
pub status: String,
|
|
||||||
/// Linux Driver Options
|
|
||||||
pub options: String,
|
|
||||||
/// Tape Alert Flags
|
/// Tape Alert Flags
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub alert_flags: Option<String>,
|
pub alert_flags: Option<String>,
|
||||||
/// Current file number
|
/// Current file number
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub file_number: Option<u32>,
|
pub file_number: Option<u64>,
|
||||||
/// Current block number
|
/// Current block number
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub block_number: Option<u32>,
|
pub block_number: Option<u64>,
|
||||||
/// Medium Manufacture Date (epoch)
|
/// Medium Manufacture Date (epoch)
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub manufactured: Option<i64>,
|
pub manufactured: Option<i64>,
|
||||||
@ -212,3 +224,62 @@ pub struct LinuxDriveAndMediaStatus {
|
|||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub medium_wearout: Option<f64>,
|
pub medium_wearout: Option<f64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
/// Volume statistics from SCSI log page 17h
|
||||||
|
#[derive(Default, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct Lp17VolumeStatistics {
|
||||||
|
/// Volume mounts (thread count)
|
||||||
|
pub volume_mounts: u64,
|
||||||
|
/// Total data sets written
|
||||||
|
pub volume_datasets_written: u64,
|
||||||
|
/// Write retries
|
||||||
|
pub volume_recovered_write_data_errors: u64,
|
||||||
|
/// Total unrecovered write errors
|
||||||
|
pub volume_unrecovered_write_data_errors: u64,
|
||||||
|
/// Total suspended writes
|
||||||
|
pub volume_write_servo_errors: u64,
|
||||||
|
/// Total fatal suspended writes
|
||||||
|
pub volume_unrecovered_write_servo_errors: u64,
|
||||||
|
/// Total datasets read
|
||||||
|
pub volume_datasets_read: u64,
|
||||||
|
/// Total read retries
|
||||||
|
pub volume_recovered_read_errors: u64,
|
||||||
|
/// Total unrecovered read errors
|
||||||
|
pub volume_unrecovered_read_errors: u64,
|
||||||
|
/// Last mount unrecovered write errors
|
||||||
|
pub last_mount_unrecovered_write_errors: u64,
|
||||||
|
/// Last mount unrecovered read errors
|
||||||
|
pub last_mount_unrecovered_read_errors: u64,
|
||||||
|
/// Last mount bytes written
|
||||||
|
pub last_mount_bytes_written: u64,
|
||||||
|
/// Last mount bytes read
|
||||||
|
pub last_mount_bytes_read: u64,
|
||||||
|
/// Lifetime bytes written
|
||||||
|
pub lifetime_bytes_written: u64,
|
||||||
|
/// Lifetime bytes read
|
||||||
|
pub lifetime_bytes_read: u64,
|
||||||
|
/// Last load write compression ratio
|
||||||
|
pub last_load_write_compression_ratio: u64,
|
||||||
|
/// Last load read compression ratio
|
||||||
|
pub last_load_read_compression_ratio: u64,
|
||||||
|
/// Medium mount time
|
||||||
|
pub medium_mount_time: u64,
|
||||||
|
/// Medium ready time
|
||||||
|
pub medium_ready_time: u64,
|
||||||
|
/// Total native capacity
|
||||||
|
pub total_native_capacity: u64,
|
||||||
|
/// Total used native capacity
|
||||||
|
pub total_used_native_capacity: u64,
|
||||||
|
/// Write protect
|
||||||
|
pub write_protect: bool,
|
||||||
|
/// Volume is WORM
|
||||||
|
pub worm: bool,
|
||||||
|
/// Beginning of medium passes
|
||||||
|
pub beginning_of_medium_passes: u64,
|
||||||
|
/// Middle of medium passes
|
||||||
|
pub middle_of_tape_passes: u64,
|
||||||
|
/// Volume serial number
|
||||||
|
pub serial: String,
|
||||||
|
}
|
@ -1,17 +1,46 @@
|
|||||||
use ::serde::{Deserialize, Serialize};
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::{
|
use proxmox::{
|
||||||
api::api,
|
api::{api, schema::*},
|
||||||
tools::Uuid,
|
tools::Uuid,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::api2::types::{
|
use crate::{
|
||||||
MEDIA_UUID_SCHEMA,
|
UUID_FORMAT,
|
||||||
MEDIA_SET_UUID_SCHEMA,
|
|
||||||
MediaStatus,
|
MediaStatus,
|
||||||
MediaLocation,
|
MediaLocation,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub const MEDIA_SET_UUID_SCHEMA: Schema =
|
||||||
|
StringSchema::new("MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).")
|
||||||
|
.format(&UUID_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const MEDIA_UUID_SCHEMA: Schema =
|
||||||
|
StringSchema::new("Media Uuid.")
|
||||||
|
.format(&UUID_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
"media-set-uuid": {
|
||||||
|
schema: MEDIA_SET_UUID_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Media Set list entry
|
||||||
|
pub struct MediaSetListEntry {
|
||||||
|
/// Media set name
|
||||||
|
pub media_set_name: String,
|
||||||
|
pub media_set_uuid: Uuid,
|
||||||
|
/// MediaSet creation time stamp
|
||||||
|
pub media_set_ctime: i64,
|
||||||
|
/// Media Pool
|
||||||
|
pub pool: String,
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
location: {
|
location: {
|
@ -9,7 +9,7 @@ use proxmox::api::{
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::api2::types::{
|
use crate::{
|
||||||
PROXMOX_SAFE_ID_FORMAT,
|
PROXMOX_SAFE_ID_FORMAT,
|
||||||
CHANGER_NAME_SCHEMA,
|
CHANGER_NAME_SCHEMA,
|
||||||
};
|
};
|
||||||
@ -35,8 +35,8 @@ pub enum MediaLocation {
|
|||||||
proxmox::forward_deserialize_to_from_str!(MediaLocation);
|
proxmox::forward_deserialize_to_from_str!(MediaLocation);
|
||||||
proxmox::forward_serialize_to_display!(MediaLocation);
|
proxmox::forward_serialize_to_display!(MediaLocation);
|
||||||
|
|
||||||
impl MediaLocation {
|
impl proxmox::api::schema::ApiType for MediaLocation {
|
||||||
pub const API_SCHEMA: Schema = StringSchema::new(
|
const API_SCHEMA: Schema = StringSchema::new(
|
||||||
"Media location (e.g. 'offline', 'online-<changer_name>', 'vault-<vault_name>')")
|
"Media location (e.g. 'offline', 'online-<changer_name>', 'vault-<vault_name>')")
|
||||||
.format(&ApiStringFormat::VerifyFn(|text| {
|
.format(&ApiStringFormat::VerifyFn(|text| {
|
||||||
let location: MediaLocation = text.parse()?;
|
let location: MediaLocation = text.parse()?;
|
@ -4,28 +4,23 @@
|
|||||||
//! so we cannot use them directly for the API. Instead, we represent
|
//! so we cannot use them directly for the API. Instead, we represent
|
||||||
//! them as String.
|
//! them as String.
|
||||||
|
|
||||||
use anyhow::Error;
|
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use anyhow::Error;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::{
|
use proxmox::api::{
|
||||||
api,
|
api,
|
||||||
schema::{Schema, StringSchema, ApiStringFormat},
|
schema::{Schema, StringSchema, ApiStringFormat, Updater},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use proxmox_systemd::time::{parse_calendar_event, parse_time_span, CalendarEvent, TimeSpan};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
tools::systemd::time::{
|
PROXMOX_SAFE_ID_FORMAT,
|
||||||
CalendarEvent,
|
SINGLE_LINE_COMMENT_FORMAT,
|
||||||
TimeSpan,
|
SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
parse_time_span,
|
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||||
parse_calendar_event,
|
|
||||||
},
|
|
||||||
api2::types::{
|
|
||||||
PROXMOX_SAFE_ID_FORMAT,
|
|
||||||
SINGLE_LINE_COMMENT_FORMAT,
|
|
||||||
SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const MEDIA_POOL_NAME_SCHEMA: Schema = StringSchema::new("Media pool name.")
|
pub const MEDIA_POOL_NAME_SCHEMA: Schema = StringSchema::new("Media pool name.")
|
||||||
@ -138,10 +133,11 @@ impl std::str::FromStr for RetentionPolicy {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Serialize,Deserialize)]
|
#[derive(Serialize,Deserialize,Updater)]
|
||||||
/// Media pool configuration
|
/// Media pool configuration
|
||||||
pub struct MediaPoolConfig {
|
pub struct MediaPoolConfig {
|
||||||
/// The pool name
|
/// The pool name
|
||||||
|
#[updater(skip)]
|
||||||
pub name: String,
|
pub name: String,
|
||||||
/// Media Set allocation policy
|
/// Media Set allocation policy
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
94
pbs-api-types/src/tape/mod.rs
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
//! Types for tape backup API
|
||||||
|
|
||||||
|
mod device;
|
||||||
|
pub use device::*;
|
||||||
|
|
||||||
|
mod changer;
|
||||||
|
pub use changer::*;
|
||||||
|
|
||||||
|
mod drive;
|
||||||
|
pub use drive::*;
|
||||||
|
|
||||||
|
mod media_pool;
|
||||||
|
pub use media_pool::*;
|
||||||
|
|
||||||
|
mod media_status;
|
||||||
|
pub use media_status::*;
|
||||||
|
|
||||||
|
mod media_location;
|
||||||
|
|
||||||
|
pub use media_location::*;
|
||||||
|
|
||||||
|
mod media;
|
||||||
|
pub use media::*;
|
||||||
|
|
||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
use proxmox::api::schema::{Schema, StringSchema, ApiStringFormat};
|
||||||
|
use proxmox::tools::Uuid;
|
||||||
|
|
||||||
|
use proxmox::const_regex;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
FINGERPRINT_SHA256_FORMAT, BACKUP_ID_SCHEMA, BACKUP_TYPE_SCHEMA,
|
||||||
|
};
|
||||||
|
|
||||||
|
const_regex!{
|
||||||
|
pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX);
|
||||||
|
|
||||||
|
pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"Tape encryption key fingerprint (sha256)."
|
||||||
|
)
|
||||||
|
.format(&FINGERPRINT_SHA256_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = StringSchema::new(
|
||||||
|
"A snapshot in the format: 'store:type/id/time")
|
||||||
|
.format(&TAPE_RESTORE_SNAPSHOT_FORMAT)
|
||||||
|
.type_text("store:type/id/time")
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
pool: {
|
||||||
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"label-text": {
|
||||||
|
schema: MEDIA_LABEL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"media": {
|
||||||
|
schema: MEDIA_UUID_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"media-set": {
|
||||||
|
schema: MEDIA_SET_UUID_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"backup-type": {
|
||||||
|
schema: BACKUP_TYPE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"backup-id": {
|
||||||
|
schema: BACKUP_ID_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// Content list filter parameters
|
||||||
|
pub struct MediaContentListFilter {
|
||||||
|
pub pool: Option<String>,
|
||||||
|
pub label_text: Option<String>,
|
||||||
|
pub media: Option<Uuid>,
|
||||||
|
pub media_set: Option<Uuid>,
|
||||||
|
pub backup_type: Option<String>,
|
||||||
|
pub backup_id: Option<String>,
|
||||||
|
}
|
203
pbs-api-types/src/upid.rs
Normal file
@ -0,0 +1,203 @@
|
|||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
|
||||||
|
use anyhow::{bail, Error};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
use proxmox::api::schema::{ApiStringFormat, ApiType, Schema, StringSchema, ArraySchema, ReturnType};
|
||||||
|
use proxmox::const_regex;
|
||||||
|
use proxmox::sys::linux::procfs;
|
||||||
|
|
||||||
|
use crate::Authid;
|
||||||
|
|
||||||
|
/// Unique Process/Task Identifier
|
||||||
|
///
|
||||||
|
/// We use this to uniquely identify worker task. UPIDs have a short
|
||||||
|
/// string repesentaion, which gives additional information about the
|
||||||
|
/// type of the task. for example:
|
||||||
|
/// ```text
|
||||||
|
/// UPID:{node}:{pid}:{pstart}:{task_id}:{starttime}:{worker_type}:{worker_id}:{userid}:
|
||||||
|
/// UPID:elsa:00004F37:0039E469:00000000:5CA78B83:garbage_collection::root@pam:
|
||||||
|
/// ```
|
||||||
|
/// Please note that we use tokio, so a single thread can run multiple
|
||||||
|
/// tasks.
|
||||||
|
// #[api] - manually implemented API type
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct UPID {
|
||||||
|
/// The Unix PID
|
||||||
|
pub pid: libc::pid_t,
|
||||||
|
/// The Unix process start time from `/proc/pid/stat`
|
||||||
|
pub pstart: u64,
|
||||||
|
/// The task start time (Epoch)
|
||||||
|
pub starttime: i64,
|
||||||
|
/// The task ID (inside the process/thread)
|
||||||
|
pub task_id: usize,
|
||||||
|
/// Worker type (arbitrary ASCII string)
|
||||||
|
pub worker_type: String,
|
||||||
|
/// Worker ID (arbitrary ASCII string)
|
||||||
|
pub worker_id: Option<String>,
|
||||||
|
/// The authenticated entity who started the task
|
||||||
|
pub auth_id: Authid,
|
||||||
|
/// The node name.
|
||||||
|
pub node: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
proxmox::forward_serialize_to_display!(UPID);
|
||||||
|
proxmox::forward_deserialize_to_from_str!(UPID);
|
||||||
|
|
||||||
|
const_regex! {
|
||||||
|
pub PROXMOX_UPID_REGEX = concat!(
|
||||||
|
r"^UPID:(?P<node>[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?):(?P<pid>[0-9A-Fa-f]{8}):",
|
||||||
|
r"(?P<pstart>[0-9A-Fa-f]{8,9}):(?P<task_id>[0-9A-Fa-f]{8,16}):(?P<starttime>[0-9A-Fa-f]{8}):",
|
||||||
|
r"(?P<wtype>[^:\s]+):(?P<wid>[^:\s]*):(?P<authid>[^:\s]+):$"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const PROXMOX_UPID_FORMAT: ApiStringFormat =
|
||||||
|
ApiStringFormat::Pattern(&PROXMOX_UPID_REGEX);
|
||||||
|
|
||||||
|
pub const UPID_SCHEMA: Schema = StringSchema::new("Unique Process/Task Identifier")
|
||||||
|
.min_length("UPID:N:12345678:12345678:12345678:::".len())
|
||||||
|
.max_length(128) // arbitrary
|
||||||
|
.format(&PROXMOX_UPID_FORMAT)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
impl ApiType for UPID {
|
||||||
|
const API_SCHEMA: Schema = UPID_SCHEMA;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UPID {
|
||||||
|
/// Create a new UPID
|
||||||
|
pub fn new(
|
||||||
|
worker_type: &str,
|
||||||
|
worker_id: Option<String>,
|
||||||
|
auth_id: Authid,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
|
|
||||||
|
let pid = unsafe { libc::getpid() };
|
||||||
|
|
||||||
|
let bad: &[_] = &['/', ':', ' '];
|
||||||
|
|
||||||
|
if worker_type.contains(bad) {
|
||||||
|
bail!("illegal characters in worker type '{}'", worker_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
static WORKER_TASK_NEXT_ID: AtomicUsize = AtomicUsize::new(0);
|
||||||
|
|
||||||
|
let task_id = WORKER_TASK_NEXT_ID.fetch_add(1, Ordering::SeqCst);
|
||||||
|
|
||||||
|
Ok(UPID {
|
||||||
|
pid,
|
||||||
|
pstart: procfs::PidStat::read_from_pid(nix::unistd::Pid::from_raw(pid))?.starttime,
|
||||||
|
starttime: proxmox::tools::time::epoch_i64(),
|
||||||
|
task_id,
|
||||||
|
worker_type: worker_type.to_owned(),
|
||||||
|
worker_id,
|
||||||
|
auth_id,
|
||||||
|
node: proxmox::tools::nodename().to_owned(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
impl std::str::FromStr for UPID {
|
||||||
|
type Err = Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
if let Some(cap) = PROXMOX_UPID_REGEX.captures(s) {
|
||||||
|
|
||||||
|
let worker_id = if cap["wid"].is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
let wid = proxmox_systemd::unescape_unit(&cap["wid"])?;
|
||||||
|
Some(wid)
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(UPID {
|
||||||
|
pid: i32::from_str_radix(&cap["pid"], 16).unwrap(),
|
||||||
|
pstart: u64::from_str_radix(&cap["pstart"], 16).unwrap(),
|
||||||
|
starttime: i64::from_str_radix(&cap["starttime"], 16).unwrap(),
|
||||||
|
task_id: usize::from_str_radix(&cap["task_id"], 16).unwrap(),
|
||||||
|
worker_type: cap["wtype"].to_string(),
|
||||||
|
worker_id,
|
||||||
|
auth_id: cap["authid"].parse()?,
|
||||||
|
node: cap["node"].to_string(),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
bail!("unable to parse UPID '{}'", s);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for UPID {
|
||||||
|
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||||
|
|
||||||
|
let wid = if let Some(ref id) = self.worker_id {
|
||||||
|
proxmox_systemd::escape_unit(id, false)
|
||||||
|
} else {
|
||||||
|
String::new()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Note: pstart can be > 32bit if uptime > 497 days, so this can result in
|
||||||
|
// more that 8 characters for pstart
|
||||||
|
|
||||||
|
write!(f, "UPID:{}:{:08X}:{:08X}:{:08X}:{:08X}:{}:{}:{}:",
|
||||||
|
self.node, self.pid, self.pstart, self.task_id, self.starttime, self.worker_type, wid, self.auth_id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
pub enum TaskStateType {
|
||||||
|
/// Ok
|
||||||
|
OK,
|
||||||
|
/// Warning
|
||||||
|
Warning,
|
||||||
|
/// Error
|
||||||
|
Error,
|
||||||
|
/// Unknown
|
||||||
|
Unknown,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
upid: { schema: UPID::API_SCHEMA },
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
/// Task properties.
|
||||||
|
pub struct TaskListItem {
|
||||||
|
pub upid: String,
|
||||||
|
/// The node name where the task is running on.
|
||||||
|
pub node: String,
|
||||||
|
/// The Unix PID
|
||||||
|
pub pid: i64,
|
||||||
|
/// The task start time (Epoch)
|
||||||
|
pub pstart: u64,
|
||||||
|
/// The task start time (Epoch)
|
||||||
|
pub starttime: i64,
|
||||||
|
/// Worker type (arbitrary ASCII string)
|
||||||
|
pub worker_type: String,
|
||||||
|
/// Worker ID (arbitrary ASCII string)
|
||||||
|
pub worker_id: Option<String>,
|
||||||
|
/// The authenticated entity who started the task
|
||||||
|
pub user: Authid,
|
||||||
|
/// The task end time (Epoch)
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub endtime: Option<i64>,
|
||||||
|
/// Task end status
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub status: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType {
|
||||||
|
optional: false,
|
||||||
|
schema: &ArraySchema::new(
|
||||||
|
"A list of tasks.",
|
||||||
|
&TaskListItem::API_SCHEMA,
|
||||||
|
).schema(),
|
||||||
|
};
|
||||||
|
|
208
pbs-api-types/src/user.rs
Normal file
@ -0,0 +1,208 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::api;
|
||||||
|
use proxmox::api::schema::{
|
||||||
|
BooleanSchema, IntegerSchema, Schema, StringSchema, Updater,
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA};
|
||||||
|
use super::userid::{Authid, Userid, PROXMOX_TOKEN_ID_SCHEMA};
|
||||||
|
|
||||||
|
pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new(
|
||||||
|
"Enable the account (default). You can set this to '0' to disable the account.")
|
||||||
|
.default(true)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new(
|
||||||
|
"Account expiration date (seconds since epoch). '0' means no expiration date.")
|
||||||
|
.default(0)
|
||||||
|
.minimum(0)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.")
|
||||||
|
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||||
|
.min_length(2)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const LAST_NAME_SCHEMA: Schema = StringSchema::new("Last name.")
|
||||||
|
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||||
|
.min_length(2)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
|
||||||
|
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||||
|
.min_length(2)
|
||||||
|
.max_length(64)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
userid: {
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
enable: {
|
||||||
|
optional: true,
|
||||||
|
schema: ENABLE_USER_SCHEMA,
|
||||||
|
},
|
||||||
|
expire: {
|
||||||
|
optional: true,
|
||||||
|
schema: EXPIRE_USER_SCHEMA,
|
||||||
|
},
|
||||||
|
firstname: {
|
||||||
|
optional: true,
|
||||||
|
schema: FIRST_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
lastname: {
|
||||||
|
schema: LAST_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
email: {
|
||||||
|
schema: EMAIL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
tokens: {
|
||||||
|
type: Array,
|
||||||
|
optional: true,
|
||||||
|
description: "List of user's API tokens.",
|
||||||
|
items: {
|
||||||
|
type: ApiToken
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
/// User properties with added list of ApiTokens
|
||||||
|
pub struct UserWithTokens {
|
||||||
|
pub userid: Userid,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub enable: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub expire: Option<i64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub firstname: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub lastname: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub email: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Vec::is_empty", default)]
|
||||||
|
pub tokens: Vec<ApiToken>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
tokenid: {
|
||||||
|
schema: PROXMOX_TOKEN_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
enable: {
|
||||||
|
optional: true,
|
||||||
|
schema: ENABLE_USER_SCHEMA,
|
||||||
|
},
|
||||||
|
expire: {
|
||||||
|
optional: true,
|
||||||
|
schema: EXPIRE_USER_SCHEMA,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize)]
|
||||||
|
/// ApiToken properties.
|
||||||
|
pub struct ApiToken {
|
||||||
|
pub tokenid: Authid,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub enable: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub expire: Option<i64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ApiToken {
|
||||||
|
pub fn is_active(&self) -> bool {
|
||||||
|
if !self.enable.unwrap_or(true) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if let Some(expire) = self.expire {
|
||||||
|
let now = proxmox::tools::time::epoch_i64();
|
||||||
|
if expire > 0 && expire <= now {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
properties: {
|
||||||
|
userid: {
|
||||||
|
type: Userid,
|
||||||
|
},
|
||||||
|
comment: {
|
||||||
|
optional: true,
|
||||||
|
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
enable: {
|
||||||
|
optional: true,
|
||||||
|
schema: ENABLE_USER_SCHEMA,
|
||||||
|
},
|
||||||
|
expire: {
|
||||||
|
optional: true,
|
||||||
|
schema: EXPIRE_USER_SCHEMA,
|
||||||
|
},
|
||||||
|
firstname: {
|
||||||
|
optional: true,
|
||||||
|
schema: FIRST_NAME_SCHEMA,
|
||||||
|
},
|
||||||
|
lastname: {
|
||||||
|
schema: LAST_NAME_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
email: {
|
||||||
|
schema: EMAIL_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
#[derive(Serialize,Deserialize,Updater)]
|
||||||
|
/// User properties.
|
||||||
|
pub struct User {
|
||||||
|
#[updater(skip)]
|
||||||
|
pub userid: Userid,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub enable: Option<bool>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub expire: Option<i64>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub firstname: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub lastname: Option<String>,
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub email: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl User {
|
||||||
|
pub fn is_active(&self) -> bool {
|
||||||
|
if !self.enable.unwrap_or(true) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if let Some(expire) = self.expire {
|
||||||
|
let now = proxmox::tools::time::epoch_i64();
|
||||||
|
if expire > 0 && expire <= now {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
@ -30,7 +30,7 @@ use lazy_static::lazy_static;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use proxmox::api::api;
|
use proxmox::api::api;
|
||||||
use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema};
|
use proxmox::api::schema::{ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType};
|
||||||
use proxmox::const_regex;
|
use proxmox::const_regex;
|
||||||
|
|
||||||
// we only allow a limited set of characters
|
// we only allow a limited set of characters
|
||||||
@ -38,10 +38,15 @@ use proxmox::const_regex;
|
|||||||
// colon separated lists)!
|
// colon separated lists)!
|
||||||
// slash is not allowed because it is used as pve API delimiter
|
// slash is not allowed because it is used as pve API delimiter
|
||||||
// also see "man useradd"
|
// also see "man useradd"
|
||||||
|
#[macro_export]
|
||||||
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
|
macro_rules! USER_NAME_REGEX_STR { () => (r"(?:[^\s:/[:cntrl:]]+)") }
|
||||||
|
#[macro_export]
|
||||||
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
|
macro_rules! GROUP_NAME_REGEX_STR { () => (USER_NAME_REGEX_STR!()) }
|
||||||
|
#[macro_export]
|
||||||
macro_rules! TOKEN_NAME_REGEX_STR { () => (PROXMOX_SAFE_ID_REGEX_STR!()) }
|
macro_rules! TOKEN_NAME_REGEX_STR { () => (PROXMOX_SAFE_ID_REGEX_STR!()) }
|
||||||
|
#[macro_export]
|
||||||
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
|
macro_rules! USER_ID_REGEX_STR { () => (concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())) }
|
||||||
|
#[macro_export]
|
||||||
macro_rules! APITOKEN_ID_REGEX_STR { () => (concat!(USER_ID_REGEX_STR!() , r"!", TOKEN_NAME_REGEX_STR!())) }
|
macro_rules! APITOKEN_ID_REGEX_STR { () => (concat!(USER_ID_REGEX_STR!() , r"!", TOKEN_NAME_REGEX_STR!())) }
|
||||||
|
|
||||||
const_regex! {
|
const_regex! {
|
||||||
@ -93,7 +98,6 @@ pub const PROXMOX_AUTH_REALM_STRING_SCHEMA: StringSchema =
|
|||||||
.max_length(32);
|
.max_length(32);
|
||||||
pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = PROXMOX_AUTH_REALM_STRING_SCHEMA.schema();
|
pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = PROXMOX_AUTH_REALM_STRING_SCHEMA.schema();
|
||||||
|
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
type: String,
|
type: String,
|
||||||
format: &PROXMOX_USER_NAME_FORMAT,
|
format: &PROXMOX_USER_NAME_FORMAT,
|
||||||
@ -393,19 +397,21 @@ impl<'a> TryFrom<&'a str> for &'a TokennameRef {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A complete user id consisting of a user name and a realm
|
/// A complete user id consisting of a user name and a realm
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
#[derive(Clone, Debug, PartialEq, Eq, Hash, UpdaterType)]
|
||||||
pub struct Userid {
|
pub struct Userid {
|
||||||
data: String,
|
data: String,
|
||||||
name_len: usize,
|
name_len: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Userid {
|
impl ApiType for Userid {
|
||||||
pub const API_SCHEMA: Schema = StringSchema::new("User ID")
|
const API_SCHEMA: Schema = StringSchema::new("User ID")
|
||||||
.format(&PROXMOX_USER_ID_FORMAT)
|
.format(&PROXMOX_USER_ID_FORMAT)
|
||||||
.min_length(3)
|
.min_length(3)
|
||||||
.max_length(64)
|
.max_length(64)
|
||||||
.schema();
|
.schema();
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Userid {
|
||||||
const fn new(data: String, name_len: usize) -> Self {
|
const fn new(data: String, name_len: usize) -> Self {
|
||||||
Self { data, name_len }
|
Self { data, name_len }
|
||||||
}
|
}
|
||||||
@ -522,19 +528,21 @@ impl PartialEq<String> for Userid {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A complete authentication id consisting of a user id and an optional token name.
|
/// A complete authentication id consisting of a user id and an optional token name.
|
||||||
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
|
#[derive(Clone, Debug, Eq, PartialEq, Hash, UpdaterType)]
|
||||||
pub struct Authid {
|
pub struct Authid {
|
||||||
user: Userid,
|
user: Userid,
|
||||||
tokenname: Option<Tokenname>
|
tokenname: Option<Tokenname>
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Authid {
|
impl ApiType for Authid {
|
||||||
pub const API_SCHEMA: Schema = StringSchema::new("Authentication ID")
|
const API_SCHEMA: Schema = StringSchema::new("Authentication ID")
|
||||||
.format(&PROXMOX_AUTH_ID_FORMAT)
|
.format(&PROXMOX_AUTH_ID_FORMAT)
|
||||||
.min_length(3)
|
.min_length(3)
|
||||||
.max_length(64)
|
.max_length(64)
|
||||||
.schema();
|
.schema();
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Authid {
|
||||||
const fn new(user: Userid, tokenname: Option<Tokenname>) -> Self {
|
const fn new(user: Userid, tokenname: Option<Tokenname>) -> Self {
|
||||||
Self { user, tokenname }
|
Self { user, tokenname }
|
||||||
}
|
}
|
81
pbs-api-types/src/zfs.rs
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use proxmox::api::{api, schema::*};
|
||||||
|
|
||||||
|
use proxmox::const_regex;
|
||||||
|
|
||||||
|
const_regex! {
|
||||||
|
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new(
|
||||||
|
"Pool sector size exponent.")
|
||||||
|
.minimum(9)
|
||||||
|
.maximum(16)
|
||||||
|
.default(12)
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
pub const ZPOOL_NAME_SCHEMA: Schema = StringSchema::new("ZFS Pool Name")
|
||||||
|
.format(&ApiStringFormat::Pattern(&ZPOOL_NAME_REGEX))
|
||||||
|
.schema();
|
||||||
|
|
||||||
|
#[api(default: "On")]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// The ZFS compression algorithm to use.
|
||||||
|
pub enum ZfsCompressionType {
|
||||||
|
/// Gnu Zip
|
||||||
|
Gzip,
|
||||||
|
/// LZ4
|
||||||
|
Lz4,
|
||||||
|
/// LZJB
|
||||||
|
Lzjb,
|
||||||
|
/// ZLE
|
||||||
|
Zle,
|
||||||
|
/// ZStd
|
||||||
|
ZStd,
|
||||||
|
/// Enable compression using the default algorithm.
|
||||||
|
On,
|
||||||
|
/// Disable compression.
|
||||||
|
Off,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
/// The ZFS RAID level to use.
|
||||||
|
pub enum ZfsRaidLevel {
|
||||||
|
/// Single Disk
|
||||||
|
Single,
|
||||||
|
/// Mirror
|
||||||
|
Mirror,
|
||||||
|
/// Raid10
|
||||||
|
Raid10,
|
||||||
|
/// RaidZ
|
||||||
|
RaidZ,
|
||||||
|
/// RaidZ2
|
||||||
|
RaidZ2,
|
||||||
|
/// RaidZ3
|
||||||
|
RaidZ3,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all="kebab-case")]
|
||||||
|
/// zpool list item
|
||||||
|
pub struct ZpoolListItem {
|
||||||
|
/// zpool name
|
||||||
|
pub name: String,
|
||||||
|
/// Health
|
||||||
|
pub health: String,
|
||||||
|
/// Total size
|
||||||
|
pub size: u64,
|
||||||
|
/// Used size
|
||||||
|
pub alloc: u64,
|
||||||
|
/// Free space
|
||||||
|
pub free: u64,
|
||||||
|
/// ZFS fragnentation level
|
||||||
|
pub frag: u64,
|
||||||
|
/// ZFS deduplication ratio
|
||||||
|
pub dedup: f64,
|
||||||
|
}
|
9
pbs-buildcfg/Cargo.toml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
[package]
|
||||||
|
name = "pbs-buildcfg"
|
||||||
|
version = "2.0.10"
|
||||||
|
authors = ["Proxmox Support Team <support@proxmox.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
description = "macros used for pbs related paths such as configdir and rundir"
|
||||||
|
build = "build.rs"
|
||||||
|
|
||||||
|
[dependencies]
|
24
pbs-buildcfg/build.rs
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
// build.rs
|
||||||
|
use std::env;
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
let repoid = match env::var("REPOID") {
|
||||||
|
Ok(repoid) => repoid,
|
||||||
|
Err(_) => {
|
||||||
|
match Command::new("git")
|
||||||
|
.args(&["rev-parse", "HEAD"])
|
||||||
|
.output()
|
||||||
|
{
|
||||||
|
Ok(output) => {
|
||||||
|
String::from_utf8(output.stdout).unwrap()
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
panic!("git rev-parse failed: {}", err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
println!("cargo:rustc-env=REPOID={}", repoid);
|
||||||
|
}
|
@ -1,15 +1,38 @@
|
|||||||
//! Exports configuration data from the build system
|
//! Exports configuration data from the build system
|
||||||
|
|
||||||
|
pub const PROXMOX_PKG_VERSION: &str =
|
||||||
|
concat!(
|
||||||
|
env!("CARGO_PKG_VERSION_MAJOR"),
|
||||||
|
".",
|
||||||
|
env!("CARGO_PKG_VERSION_MINOR"),
|
||||||
|
);
|
||||||
|
pub const PROXMOX_PKG_RELEASE: &str = env!("CARGO_PKG_VERSION_PATCH");
|
||||||
|
pub const PROXMOX_PKG_REPOID: &str = env!("REPOID");
|
||||||
|
|
||||||
|
|
||||||
/// The configured configuration directory
|
/// The configured configuration directory
|
||||||
pub const CONFIGDIR: &str = "/etc/proxmox-backup";
|
pub const CONFIGDIR: &str = "/etc/proxmox-backup";
|
||||||
pub const JS_DIR: &str = "/usr/share/javascript/proxmox-backup";
|
pub const JS_DIR: &str = "/usr/share/javascript/proxmox-backup";
|
||||||
|
|
||||||
|
/// Unix system user used by proxmox-backup-proxy
|
||||||
|
pub const BACKUP_USER_NAME: &str = "backup";
|
||||||
|
/// Unix system group used by proxmox-backup-proxy
|
||||||
|
pub const BACKUP_GROUP_NAME: &str = "backup";
|
||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! PROXMOX_BACKUP_RUN_DIR_M { () => ("/run/proxmox-backup") }
|
macro_rules! PROXMOX_BACKUP_RUN_DIR_M { () => ("/run/proxmox-backup") }
|
||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") }
|
macro_rules! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") }
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! PROXMOX_BACKUP_CACHE_DIR_M { () => ("/var/cache/proxmox-backup") }
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M {
|
||||||
|
() => ("/usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore")
|
||||||
|
}
|
||||||
|
|
||||||
/// namespaced directory for in-memory (tmpfs) run state
|
/// namespaced directory for in-memory (tmpfs) run state
|
||||||
pub const PROXMOX_BACKUP_RUN_DIR: &str = PROXMOX_BACKUP_RUN_DIR_M!();
|
pub const PROXMOX_BACKUP_RUN_DIR: &str = PROXMOX_BACKUP_RUN_DIR_M!();
|
||||||
|
|
||||||
@ -30,12 +53,25 @@ pub const PROXMOX_BACKUP_PROXY_PID_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(
|
|||||||
/// the PID filename for the privileged api daemon
|
/// the PID filename for the privileged api daemon
|
||||||
pub const PROXMOX_BACKUP_API_PID_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(), "/api.pid");
|
pub const PROXMOX_BACKUP_API_PID_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(), "/api.pid");
|
||||||
|
|
||||||
|
/// filename of the cached initramfs to use for booting single file restore VMs, this file is
|
||||||
|
/// automatically created by APT hooks
|
||||||
|
pub const PROXMOX_BACKUP_INITRAMFS_FN: &str =
|
||||||
|
concat!(PROXMOX_BACKUP_CACHE_DIR_M!(), "/file-restore-initramfs.img");
|
||||||
|
|
||||||
|
/// filename of the cached initramfs to use for debugging single file restore
|
||||||
|
pub const PROXMOX_BACKUP_INITRAMFS_DBG_FN: &str =
|
||||||
|
concat!(PROXMOX_BACKUP_CACHE_DIR_M!(), "/file-restore-initramfs-debug.img");
|
||||||
|
|
||||||
|
/// filename of the kernel to use for booting single file restore VMs
|
||||||
|
pub const PROXMOX_BACKUP_KERNEL_FN: &str =
|
||||||
|
concat!(PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M!(), "/bzImage");
|
||||||
|
|
||||||
/// Prepend configuration directory to a file name
|
/// Prepend configuration directory to a file name
|
||||||
///
|
///
|
||||||
/// This is a simply way to get the full path for configuration files.
|
/// This is a simply way to get the full path for configuration files.
|
||||||
/// #### Example:
|
/// #### Example:
|
||||||
/// ```
|
/// ```
|
||||||
/// # #[macro_use] extern crate proxmox_backup;
|
/// use pbs_buildcfg::configdir;
|
||||||
/// let cert_path = configdir!("/proxy.pfx");
|
/// let cert_path = configdir!("/proxy.pfx");
|
||||||
/// ```
|
/// ```
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
@ -49,6 +85,6 @@ macro_rules! configdir {
|
|||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! rundir {
|
macro_rules! rundir {
|
||||||
($subdir:expr) => {
|
($subdir:expr) => {
|
||||||
concat!(PROXMOX_BACKUP_RUN_DIR_M!(), $subdir)
|
concat!($crate::PROXMOX_BACKUP_RUN_DIR_M!(), $subdir)
|
||||||
};
|
};
|
||||||
}
|
}
|
40
pbs-client/Cargo.toml
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
[package]
|
||||||
|
name = "pbs-client"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Wolfgang Bumiller <w.bumiller@proxmox.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
description = "The main proxmox backup client crate"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow = "1.0"
|
||||||
|
bitflags = "1.2.1"
|
||||||
|
bytes = "1.0"
|
||||||
|
futures = "0.3"
|
||||||
|
h2 = { version = "0.3", features = [ "stream" ] }
|
||||||
|
http = "0.2"
|
||||||
|
hyper = { version = "0.14", features = [ "full" ] }
|
||||||
|
lazy_static = "1.4"
|
||||||
|
libc = "0.2"
|
||||||
|
nix = "0.19.1"
|
||||||
|
openssl = "0.10"
|
||||||
|
percent-encoding = "2.1"
|
||||||
|
pin-project-lite = "0.2"
|
||||||
|
regex = "1.2"
|
||||||
|
rustyline = "7"
|
||||||
|
serde_json = "1.0"
|
||||||
|
tokio = { version = "1.6", features = [ "fs", "signal" ] }
|
||||||
|
tokio-stream = "0.1.0"
|
||||||
|
tower-service = "0.3.0"
|
||||||
|
xdg = "2.2"
|
||||||
|
|
||||||
|
pathpatterns = "0.1.2"
|
||||||
|
proxmox = { version = "0.13.3", default-features = false, features = [ "cli" ] }
|
||||||
|
proxmox-fuse = "0.1.1"
|
||||||
|
proxmox-http = { version = "0.4.0", features = [ "client", "http-helpers", "websocket" ] }
|
||||||
|
pxar = { version = "0.10.1", features = [ "tokio-io" ] }
|
||||||
|
|
||||||
|
pbs-api-types = { path = "../pbs-api-types" }
|
||||||
|
pbs-buildcfg = { path = "../pbs-buildcfg" }
|
||||||
|
pbs-datastore = { path = "../pbs-datastore" }
|
||||||
|
pbs-runtime = { path = "../pbs-runtime" }
|
||||||
|
pbs-tools = { path = "../pbs-tools" }
|
@ -9,10 +9,15 @@ use serde_json::{json, Value};
|
|||||||
|
|
||||||
use proxmox::tools::digest_to_hex;
|
use proxmox::tools::digest_to_hex;
|
||||||
|
|
||||||
use crate::{
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
tools::compute_file_csum,
|
use pbs_tools::sha::sha256;
|
||||||
backup::*,
|
use pbs_datastore::{PROXMOX_BACKUP_READER_PROTOCOL_ID_V1, BackupManifest};
|
||||||
};
|
use pbs_datastore::data_blob::DataBlob;
|
||||||
|
use pbs_datastore::data_blob_reader::DataBlobReader;
|
||||||
|
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||||
|
use pbs_datastore::fixed_index::FixedIndexReader;
|
||||||
|
use pbs_datastore::index::IndexFile;
|
||||||
|
use pbs_datastore::manifest::MANIFEST_BLOB_NAME;
|
||||||
|
|
||||||
use super::{HttpClient, H2Client};
|
use super::{HttpClient, H2Client};
|
||||||
|
|
||||||
@ -148,7 +153,7 @@ impl BackupReader {
|
|||||||
&self,
|
&self,
|
||||||
manifest: &BackupManifest,
|
manifest: &BackupManifest,
|
||||||
name: &str,
|
name: &str,
|
||||||
) -> Result<DataBlobReader<File>, Error> {
|
) -> Result<DataBlobReader<'_, File>, Error> {
|
||||||
|
|
||||||
let mut tmpfile = std::fs::OpenOptions::new()
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
@ -158,7 +163,8 @@ impl BackupReader {
|
|||||||
|
|
||||||
self.download(name, &mut tmpfile).await?;
|
self.download(name, &mut tmpfile).await?;
|
||||||
|
|
||||||
let (csum, size) = compute_file_csum(&mut tmpfile)?;
|
tmpfile.seek(SeekFrom::Start(0))?;
|
||||||
|
let (csum, size) = sha256(&mut tmpfile)?;
|
||||||
manifest.verify_file(name, &csum, size)?;
|
manifest.verify_file(name, &csum, size)?;
|
||||||
|
|
||||||
tmpfile.seek(SeekFrom::Start(0))?;
|
tmpfile.seek(SeekFrom::Start(0))?;
|
@ -3,12 +3,7 @@ use std::fmt;
|
|||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
|
|
||||||
use proxmox::api::schema::*;
|
use pbs_api_types::{BACKUP_REPO_URL_REGEX, IP_V6_REGEX, Authid, Userid};
|
||||||
|
|
||||||
use crate::api2::types::*;
|
|
||||||
|
|
||||||
/// API schema format definition for repository URLs
|
|
||||||
pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX);
|
|
||||||
|
|
||||||
/// Reference remote backup locations
|
/// Reference remote backup locations
|
||||||
///
|
///
|
@ -1,12 +1,12 @@
|
|||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
use std::future::Future;
|
||||||
use std::os::unix::fs::OpenOptionsExt;
|
use std::os::unix::fs::OpenOptionsExt;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::*;
|
use futures::future::{self, AbortHandle, Either, FutureExt, TryFutureExt};
|
||||||
use futures::stream::Stream;
|
use futures::stream::{Stream, StreamExt, TryStreamExt};
|
||||||
use futures::future::AbortHandle;
|
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
use tokio::io::AsyncReadExt;
|
use tokio::io::AsyncReadExt;
|
||||||
use tokio::sync::{mpsc, oneshot};
|
use tokio::sync::{mpsc, oneshot};
|
||||||
@ -14,11 +14,18 @@ use tokio_stream::wrappers::ReceiverStream;
|
|||||||
|
|
||||||
use proxmox::tools::digest_to_hex;
|
use proxmox::tools::digest_to_hex;
|
||||||
|
|
||||||
use super::merge_known_chunks::{MergedChunkInfo, MergeKnownChunks};
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
use crate::backup::*;
|
use pbs_tools::format::HumanByte;
|
||||||
use crate::tools::format::HumanByte;
|
use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1};
|
||||||
|
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
|
||||||
|
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||||
|
use pbs_datastore::fixed_index::FixedIndexReader;
|
||||||
|
use pbs_datastore::index::IndexFile;
|
||||||
|
use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME};
|
||||||
|
|
||||||
use super::{HttpClient, H2Client};
|
use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
|
||||||
|
|
||||||
|
use super::{H2Client, HttpClient};
|
||||||
|
|
||||||
pub struct BackupWriter {
|
pub struct BackupWriter {
|
||||||
h2: H2Client,
|
h2: H2Client,
|
||||||
@ -28,7 +35,6 @@ pub struct BackupWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for BackupWriter {
|
impl Drop for BackupWriter {
|
||||||
|
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
self.abort.abort();
|
self.abort.abort();
|
||||||
}
|
}
|
||||||
@ -48,13 +54,32 @@ pub struct UploadOptions {
|
|||||||
pub fixed_size: Option<u64>,
|
pub fixed_size: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct UploadStats {
|
||||||
|
chunk_count: usize,
|
||||||
|
chunk_reused: usize,
|
||||||
|
size: usize,
|
||||||
|
size_reused: usize,
|
||||||
|
size_compressed: usize,
|
||||||
|
duration: std::time::Duration,
|
||||||
|
csum: [u8; 32],
|
||||||
|
}
|
||||||
|
|
||||||
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>;
|
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>;
|
||||||
type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
|
type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
|
||||||
|
|
||||||
impl BackupWriter {
|
impl BackupWriter {
|
||||||
|
fn new(
|
||||||
fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>, verbose: bool) -> Arc<Self> {
|
h2: H2Client,
|
||||||
Arc::new(Self { h2, abort, crypt_config, verbose })
|
abort: AbortHandle,
|
||||||
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
|
verbose: bool,
|
||||||
|
) -> Arc<Self> {
|
||||||
|
Arc::new(Self {
|
||||||
|
h2,
|
||||||
|
abort,
|
||||||
|
crypt_config,
|
||||||
|
verbose,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: extract into (flattened) parameter struct?
|
// FIXME: extract into (flattened) parameter struct?
|
||||||
@ -67,9 +92,8 @@ impl BackupWriter {
|
|||||||
backup_id: &str,
|
backup_id: &str,
|
||||||
backup_time: i64,
|
backup_time: i64,
|
||||||
debug: bool,
|
debug: bool,
|
||||||
benchmark: bool
|
benchmark: bool,
|
||||||
) -> Result<Arc<BackupWriter>, Error> {
|
) -> Result<Arc<BackupWriter>, Error> {
|
||||||
|
|
||||||
let param = json!({
|
let param = json!({
|
||||||
"backup-type": backup_type,
|
"backup-type": backup_type,
|
||||||
"backup-id": backup_id,
|
"backup-id": backup_id,
|
||||||
@ -80,34 +104,30 @@ impl BackupWriter {
|
|||||||
});
|
});
|
||||||
|
|
||||||
let req = HttpClient::request_builder(
|
let req = HttpClient::request_builder(
|
||||||
client.server(), client.port(), "GET", "/api2/json/backup", Some(param)).unwrap();
|
client.server(),
|
||||||
|
client.port(),
|
||||||
|
"GET",
|
||||||
|
"/api2/json/backup",
|
||||||
|
Some(param),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let (h2, abort) = client.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!())).await?;
|
let (h2, abort) = client
|
||||||
|
.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(BackupWriter::new(h2, abort, crypt_config, debug))
|
Ok(BackupWriter::new(h2, abort, crypt_config, debug))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get(
|
pub async fn get(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
|
||||||
&self,
|
|
||||||
path: &str,
|
|
||||||
param: Option<Value>,
|
|
||||||
) -> Result<Value, Error> {
|
|
||||||
self.h2.get(path, param).await
|
self.h2.get(path, param).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn put(
|
pub async fn put(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
|
||||||
&self,
|
|
||||||
path: &str,
|
|
||||||
param: Option<Value>,
|
|
||||||
) -> Result<Value, Error> {
|
|
||||||
self.h2.put(path, param).await
|
self.h2.put(path, param).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn post(
|
pub async fn post(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
|
||||||
&self,
|
|
||||||
path: &str,
|
|
||||||
param: Option<Value>,
|
|
||||||
) -> Result<Value, Error> {
|
|
||||||
self.h2.post(path, param).await
|
self.h2.post(path, param).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,7 +138,9 @@ impl BackupWriter {
|
|||||||
content_type: &str,
|
content_type: &str,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
self.h2.upload("POST", path, param, content_type, data).await
|
self.h2
|
||||||
|
.upload("POST", path, param, content_type, data)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send_upload_request(
|
pub async fn send_upload_request(
|
||||||
@ -129,9 +151,13 @@ impl BackupWriter {
|
|||||||
content_type: &str,
|
content_type: &str,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
) -> Result<h2::client::ResponseFuture, Error> {
|
) -> Result<h2::client::ResponseFuture, Error> {
|
||||||
|
let request =
|
||||||
let request = H2Client::request_builder("localhost", method, path, param, Some(content_type)).unwrap();
|
H2Client::request_builder("localhost", method, path, param, Some(content_type))
|
||||||
let response_future = self.h2.send_request(request, Some(bytes::Bytes::from(data.clone()))).await?;
|
.unwrap();
|
||||||
|
let response_future = self
|
||||||
|
.h2
|
||||||
|
.send_request(request, Some(bytes::Bytes::from(data.clone())))
|
||||||
|
.await?;
|
||||||
Ok(response_future)
|
Ok(response_future)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,7 +189,7 @@ impl BackupWriter {
|
|||||||
&self,
|
&self,
|
||||||
mut reader: R,
|
mut reader: R,
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
let mut raw_data = Vec::new();
|
let mut raw_data = Vec::new();
|
||||||
// fixme: avoid loading into memory
|
// fixme: avoid loading into memory
|
||||||
reader.read_to_end(&mut raw_data)?;
|
reader.read_to_end(&mut raw_data)?;
|
||||||
@ -171,7 +197,16 @@ impl BackupWriter {
|
|||||||
let csum = openssl::sha::sha256(&raw_data);
|
let csum = openssl::sha::sha256(&raw_data);
|
||||||
let param = json!({"encoded-size": raw_data.len(), "file-name": file_name });
|
let param = json!({"encoded-size": raw_data.len(), "file-name": file_name });
|
||||||
let size = raw_data.len() as u64;
|
let size = raw_data.len() as u64;
|
||||||
let _value = self.h2.upload("POST", "blob", Some(param), "application/octet-stream", raw_data).await?;
|
let _value = self
|
||||||
|
.h2
|
||||||
|
.upload(
|
||||||
|
"POST",
|
||||||
|
"blob",
|
||||||
|
Some(param),
|
||||||
|
"application/octet-stream",
|
||||||
|
raw_data,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
Ok(BackupStats { size, csum })
|
Ok(BackupStats { size, csum })
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -182,9 +217,11 @@ impl BackupWriter {
|
|||||||
options: UploadOptions,
|
options: UploadOptions,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
let blob = match (options.encrypt, &self.crypt_config) {
|
let blob = match (options.encrypt, &self.crypt_config) {
|
||||||
(false, _) => DataBlob::encode(&data, None, options.compress)?,
|
(false, _) => DataBlob::encode(&data, None, options.compress)?,
|
||||||
(true, None) => bail!("requested encryption without a crypt config"),
|
(true, None) => bail!("requested encryption without a crypt config"),
|
||||||
(true, Some(crypt_config)) => DataBlob::encode(&data, Some(crypt_config), options.compress)?,
|
(true, Some(crypt_config)) => {
|
||||||
|
DataBlob::encode(&data, Some(crypt_config), options.compress)?
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let raw_data = blob.into_inner();
|
let raw_data = blob.into_inner();
|
||||||
@ -192,7 +229,16 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let csum = openssl::sha::sha256(&raw_data);
|
let csum = openssl::sha::sha256(&raw_data);
|
||||||
let param = json!({"encoded-size": size, "file-name": file_name });
|
let param = json!({"encoded-size": size, "file-name": file_name });
|
||||||
let _value = self.h2.upload("POST", "blob", Some(param), "application/octet-stream", raw_data).await?;
|
let _value = self
|
||||||
|
.h2
|
||||||
|
.upload(
|
||||||
|
"POST",
|
||||||
|
"blob",
|
||||||
|
Some(param),
|
||||||
|
"application/octet-stream",
|
||||||
|
raw_data,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
Ok(BackupStats { size, csum })
|
Ok(BackupStats { size, csum })
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -202,7 +248,6 @@ impl BackupWriter {
|
|||||||
file_name: &str,
|
file_name: &str,
|
||||||
options: UploadOptions,
|
options: UploadOptions,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
|
|
||||||
let src_path = src_path.as_ref();
|
let src_path = src_path.as_ref();
|
||||||
|
|
||||||
let mut file = tokio::fs::File::open(src_path)
|
let mut file = tokio::fs::File::open(src_path)
|
||||||
@ -215,7 +260,8 @@ impl BackupWriter {
|
|||||||
.await
|
.await
|
||||||
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
|
.map_err(|err| format_err!("unable to read file {:?} - {}", src_path, err))?;
|
||||||
|
|
||||||
self.upload_blob_from_data(contents, file_name, options).await
|
self.upload_blob_from_data(contents, file_name, options)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn upload_stream(
|
pub async fn upload_stream(
|
||||||
@ -243,74 +289,120 @@ impl BackupWriter {
|
|||||||
|
|
||||||
if let Some(manifest) = options.previous_manifest {
|
if let Some(manifest) = options.previous_manifest {
|
||||||
// try, but ignore errors
|
// try, but ignore errors
|
||||||
match archive_type(archive_name) {
|
match ArchiveType::from_path(archive_name) {
|
||||||
Ok(ArchiveType::FixedIndex) => {
|
Ok(ArchiveType::FixedIndex) => {
|
||||||
let _ = self.download_previous_fixed_index(archive_name, &manifest, known_chunks.clone()).await;
|
let _ = self
|
||||||
|
.download_previous_fixed_index(
|
||||||
|
archive_name,
|
||||||
|
&manifest,
|
||||||
|
known_chunks.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
Ok(ArchiveType::DynamicIndex) => {
|
Ok(ArchiveType::DynamicIndex) => {
|
||||||
let _ = self.download_previous_dynamic_index(archive_name, &manifest, known_chunks.clone()).await;
|
let _ = self
|
||||||
|
.download_previous_dynamic_index(
|
||||||
|
archive_name,
|
||||||
|
&manifest,
|
||||||
|
known_chunks.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
_ => { /* do nothing */ }
|
_ => { /* do nothing */ }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let wid = self.h2.post(&index_path, Some(param)).await?.as_u64().unwrap();
|
let wid = self
|
||||||
|
.h2
|
||||||
|
.post(&index_path, Some(param))
|
||||||
|
.await?
|
||||||
|
.as_u64()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let (chunk_count, chunk_reused, size, size_reused, duration, csum) =
|
let upload_stats = Self::upload_chunk_info_stream(
|
||||||
Self::upload_chunk_info_stream(
|
self.h2.clone(),
|
||||||
self.h2.clone(),
|
wid,
|
||||||
wid,
|
stream,
|
||||||
stream,
|
&prefix,
|
||||||
&prefix,
|
known_chunks.clone(),
|
||||||
known_chunks.clone(),
|
if options.encrypt {
|
||||||
if options.encrypt { self.crypt_config.clone() } else { None },
|
self.crypt_config.clone()
|
||||||
options.compress,
|
} else {
|
||||||
self.verbose,
|
None
|
||||||
)
|
},
|
||||||
.await?;
|
options.compress,
|
||||||
|
self.verbose,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let uploaded = size - size_reused;
|
let size_dirty = upload_stats.size - upload_stats.size_reused;
|
||||||
let vsize_h: HumanByte = size.into();
|
let size: HumanByte = upload_stats.size.into();
|
||||||
let archive = if self.verbose {
|
let archive = if self.verbose {
|
||||||
archive_name.to_string()
|
archive_name.to_string()
|
||||||
} else {
|
} else {
|
||||||
crate::tools::format::strip_server_file_extension(archive_name)
|
pbs_tools::format::strip_server_file_extension(archive_name)
|
||||||
};
|
};
|
||||||
if archive_name != CATALOG_NAME {
|
if archive_name != CATALOG_NAME {
|
||||||
let speed: HumanByte = ((uploaded * 1_000_000) / (duration.as_micros() as usize)).into();
|
let speed: HumanByte =
|
||||||
let uploaded: HumanByte = uploaded.into();
|
((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into();
|
||||||
println!("{}: had to upload {} of {} in {:.2}s, average speed {}/s).", archive, uploaded, vsize_h, duration.as_secs_f64(), speed);
|
let size_dirty: HumanByte = size_dirty.into();
|
||||||
|
let size_compressed: HumanByte = upload_stats.size_compressed.into();
|
||||||
|
println!(
|
||||||
|
"{}: had to backup {} of {} (compressed {}) in {:.2}s",
|
||||||
|
archive,
|
||||||
|
size_dirty,
|
||||||
|
size,
|
||||||
|
size_compressed,
|
||||||
|
upload_stats.duration.as_secs_f64()
|
||||||
|
);
|
||||||
|
println!("{}: average backup speed: {}/s", archive, speed);
|
||||||
} else {
|
} else {
|
||||||
println!("Uploaded backup catalog ({})", vsize_h);
|
println!("Uploaded backup catalog ({})", size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if size_reused > 0 && size > 1024*1024 {
|
if upload_stats.size_reused > 0 && upload_stats.size > 1024 * 1024 {
|
||||||
let reused_percent = size_reused as f64 * 100. / size as f64;
|
let reused_percent = upload_stats.size_reused as f64 * 100. / upload_stats.size as f64;
|
||||||
let reused: HumanByte = size_reused.into();
|
let reused: HumanByte = upload_stats.size_reused.into();
|
||||||
println!("{}: backup was done incrementally, reused {} ({:.1}%)", archive, reused, reused_percent);
|
println!(
|
||||||
|
"{}: backup was done incrementally, reused {} ({:.1}%)",
|
||||||
|
archive, reused, reused_percent
|
||||||
|
);
|
||||||
}
|
}
|
||||||
if self.verbose && chunk_count > 0 {
|
if self.verbose && upload_stats.chunk_count > 0 {
|
||||||
println!("{}: Reused {} from {} chunks.", archive, chunk_reused, chunk_count);
|
println!(
|
||||||
println!("{}: Average chunk size was {}.", archive, HumanByte::from(size/chunk_count));
|
"{}: Reused {} from {} chunks.",
|
||||||
println!("{}: Average time per request: {} microseconds.", archive, (duration.as_micros())/(chunk_count as u128));
|
archive, upload_stats.chunk_reused, upload_stats.chunk_count
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
"{}: Average chunk size was {}.",
|
||||||
|
archive,
|
||||||
|
HumanByte::from(upload_stats.size / upload_stats.chunk_count)
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
"{}: Average time per request: {} microseconds.",
|
||||||
|
archive,
|
||||||
|
(upload_stats.duration.as_micros()) / (upload_stats.chunk_count as u128)
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let param = json!({
|
let param = json!({
|
||||||
"wid": wid ,
|
"wid": wid ,
|
||||||
"chunk-count": chunk_count,
|
"chunk-count": upload_stats.chunk_count,
|
||||||
"size": size,
|
"size": upload_stats.size,
|
||||||
"csum": proxmox::tools::digest_to_hex(&csum),
|
"csum": proxmox::tools::digest_to_hex(&upload_stats.csum),
|
||||||
});
|
});
|
||||||
let _value = self.h2.post(&close_path, Some(param)).await?;
|
let _value = self.h2.post(&close_path, Some(param)).await?;
|
||||||
Ok(BackupStats {
|
Ok(BackupStats {
|
||||||
size: size as u64,
|
size: upload_stats.size as u64,
|
||||||
csum,
|
csum: upload_stats.csum,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn response_queue(verbose: bool) -> (
|
fn response_queue(
|
||||||
|
verbose: bool,
|
||||||
|
) -> (
|
||||||
mpsc::Sender<h2::client::ResponseFuture>,
|
mpsc::Sender<h2::client::ResponseFuture>,
|
||||||
oneshot::Receiver<Result<(), Error>>
|
oneshot::Receiver<Result<(), Error>>,
|
||||||
) {
|
) {
|
||||||
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
|
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
|
||||||
let (verify_result_tx, verify_result_rx) = oneshot::channel();
|
let (verify_result_tx, verify_result_rx) = oneshot::channel();
|
||||||
@ -336,12 +428,16 @@ impl BackupWriter {
|
|||||||
response
|
response
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
.and_then(H2Client::h2api_response)
|
.and_then(H2Client::h2api_response)
|
||||||
.map_ok(move |result| if verbose { println!("RESPONSE: {:?}", result) })
|
.map_ok(move |result| {
|
||||||
|
if verbose {
|
||||||
|
println!("RESPONSE: {:?}", result)
|
||||||
|
}
|
||||||
|
})
|
||||||
.map_err(|err| format_err!("pipelined request failed: {}", err))
|
.map_err(|err| format_err!("pipelined request failed: {}", err))
|
||||||
})
|
})
|
||||||
.map(|result| {
|
.map(|result| {
|
||||||
let _ignore_closed_channel = verify_result_tx.send(result);
|
let _ignore_closed_channel = verify_result_tx.send(result);
|
||||||
})
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
(verify_queue_tx, verify_result_rx)
|
(verify_queue_tx, verify_result_rx)
|
||||||
@ -363,7 +459,7 @@ impl BackupWriter {
|
|||||||
.and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<h2::client::ResponseFuture>)| {
|
.and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<h2::client::ResponseFuture>)| {
|
||||||
match (response, merged_chunk_info) {
|
match (response, merged_chunk_info) {
|
||||||
(Some(response), MergedChunkInfo::Known(list)) => {
|
(Some(response), MergedChunkInfo::Known(list)) => {
|
||||||
future::Either::Left(
|
Either::Left(
|
||||||
response
|
response
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
.and_then(H2Client::h2api_response)
|
.and_then(H2Client::h2api_response)
|
||||||
@ -373,7 +469,7 @@ impl BackupWriter {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
(None, MergedChunkInfo::Known(list)) => {
|
(None, MergedChunkInfo::Known(list)) => {
|
||||||
future::Either::Right(future::ok(MergedChunkInfo::Known(list)))
|
Either::Right(future::ok(MergedChunkInfo::Known(list)))
|
||||||
}
|
}
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
@ -418,9 +514,8 @@ impl BackupWriter {
|
|||||||
&self,
|
&self,
|
||||||
archive_name: &str,
|
archive_name: &str,
|
||||||
manifest: &BackupManifest,
|
manifest: &BackupManifest,
|
||||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
) -> Result<FixedIndexReader, Error> {
|
) -> Result<FixedIndexReader, Error> {
|
||||||
|
|
||||||
let mut tmpfile = std::fs::OpenOptions::new()
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
.read(true)
|
.read(true)
|
||||||
@ -428,10 +523,13 @@ impl BackupWriter {
|
|||||||
.open("/tmp")?;
|
.open("/tmp")?;
|
||||||
|
|
||||||
let param = json!({ "archive-name": archive_name });
|
let param = json!({ "archive-name": archive_name });
|
||||||
self.h2.download("previous", Some(param), &mut tmpfile).await?;
|
self.h2
|
||||||
|
.download("previous", Some(param), &mut tmpfile)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let index = FixedIndexReader::new(tmpfile)
|
let index = FixedIndexReader::new(tmpfile).map_err(|err| {
|
||||||
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", archive_name, err))?;
|
format_err!("unable to read fixed index '{}' - {}", archive_name, err)
|
||||||
|
})?;
|
||||||
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||||
let (csum, size) = index.compute_csum();
|
let (csum, size) = index.compute_csum();
|
||||||
manifest.verify_file(archive_name, &csum, size)?;
|
manifest.verify_file(archive_name, &csum, size)?;
|
||||||
@ -443,7 +541,11 @@ impl BackupWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if self.verbose {
|
if self.verbose {
|
||||||
println!("{}: known chunks list length is {}", archive_name, index.index_count());
|
println!(
|
||||||
|
"{}: known chunks list length is {}",
|
||||||
|
archive_name,
|
||||||
|
index.index_count()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(index)
|
Ok(index)
|
||||||
@ -453,9 +555,8 @@ impl BackupWriter {
|
|||||||
&self,
|
&self,
|
||||||
archive_name: &str,
|
archive_name: &str,
|
||||||
manifest: &BackupManifest,
|
manifest: &BackupManifest,
|
||||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
) -> Result<DynamicIndexReader, Error> {
|
) -> Result<DynamicIndexReader, Error> {
|
||||||
|
|
||||||
let mut tmpfile = std::fs::OpenOptions::new()
|
let mut tmpfile = std::fs::OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
.read(true)
|
.read(true)
|
||||||
@ -463,10 +564,13 @@ impl BackupWriter {
|
|||||||
.open("/tmp")?;
|
.open("/tmp")?;
|
||||||
|
|
||||||
let param = json!({ "archive-name": archive_name });
|
let param = json!({ "archive-name": archive_name });
|
||||||
self.h2.download("previous", Some(param), &mut tmpfile).await?;
|
self.h2
|
||||||
|
.download("previous", Some(param), &mut tmpfile)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let index = DynamicIndexReader::new(tmpfile)
|
let index = DynamicIndexReader::new(tmpfile).map_err(|err| {
|
||||||
.map_err(|err| format_err!("unable to read dynmamic index '{}' - {}", archive_name, err))?;
|
format_err!("unable to read dynmamic index '{}' - {}", archive_name, err)
|
||||||
|
})?;
|
||||||
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||||
let (csum, size) = index.compute_csum();
|
let (csum, size) = index.compute_csum();
|
||||||
manifest.verify_file(archive_name, &csum, size)?;
|
manifest.verify_file(archive_name, &csum, size)?;
|
||||||
@ -478,7 +582,11 @@ impl BackupWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if self.verbose {
|
if self.verbose {
|
||||||
println!("{}: known chunks list length is {}", archive_name, index.index_count());
|
println!(
|
||||||
|
"{}: known chunks list length is {}",
|
||||||
|
archive_name,
|
||||||
|
index.index_count()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(index)
|
Ok(index)
|
||||||
@ -487,23 +595,29 @@ impl BackupWriter {
|
|||||||
/// Retrieve backup time of last backup
|
/// Retrieve backup time of last backup
|
||||||
pub async fn previous_backup_time(&self) -> Result<Option<i64>, Error> {
|
pub async fn previous_backup_time(&self) -> Result<Option<i64>, Error> {
|
||||||
let data = self.h2.get("previous_backup_time", None).await?;
|
let data = self.h2.get("previous_backup_time", None).await?;
|
||||||
serde_json::from_value(data)
|
serde_json::from_value(data).map_err(|err| {
|
||||||
.map_err(|err| format_err!("Failed to parse backup time value returned by server - {}", err))
|
format_err!(
|
||||||
|
"Failed to parse backup time value returned by server - {}",
|
||||||
|
err
|
||||||
|
)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Download backup manifest (index.json) of last backup
|
/// Download backup manifest (index.json) of last backup
|
||||||
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
|
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
|
||||||
|
|
||||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||||
|
|
||||||
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
|
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
|
||||||
self.h2.download("previous", Some(param), &mut raw_data).await?;
|
self.h2
|
||||||
|
.download("previous", Some(param), &mut raw_data)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
|
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
|
||||||
// no expected digest available
|
// no expected digest available
|
||||||
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref), None)?;
|
let data = blob.decode(self.crypt_config.as_ref().map(Arc::as_ref), None)?;
|
||||||
|
|
||||||
let manifest = BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
|
let manifest =
|
||||||
|
BackupManifest::from_data(&data[..], self.crypt_config.as_ref().map(Arc::as_ref))?;
|
||||||
|
|
||||||
Ok(manifest)
|
Ok(manifest)
|
||||||
}
|
}
|
||||||
@ -517,12 +631,11 @@ impl BackupWriter {
|
|||||||
wid: u64,
|
wid: u64,
|
||||||
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
||||||
prefix: &str,
|
prefix: &str,
|
||||||
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
|
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
compress: bool,
|
compress: bool,
|
||||||
verbose: bool,
|
verbose: bool,
|
||||||
) -> impl Future<Output = Result<(usize, usize, usize, usize, std::time::Duration, [u8; 32]), Error>> {
|
) -> impl Future<Output = Result<UploadStats, Error>> {
|
||||||
|
|
||||||
let total_chunks = Arc::new(AtomicUsize::new(0));
|
let total_chunks = Arc::new(AtomicUsize::new(0));
|
||||||
let total_chunks2 = total_chunks.clone();
|
let total_chunks2 = total_chunks.clone();
|
||||||
let known_chunk_count = Arc::new(AtomicUsize::new(0));
|
let known_chunk_count = Arc::new(AtomicUsize::new(0));
|
||||||
@ -530,6 +643,8 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let stream_len = Arc::new(AtomicUsize::new(0));
|
let stream_len = Arc::new(AtomicUsize::new(0));
|
||||||
let stream_len2 = stream_len.clone();
|
let stream_len2 = stream_len.clone();
|
||||||
|
let compressed_stream_len = Arc::new(AtomicU64::new(0));
|
||||||
|
let compressed_stream_len2 = compressed_stream_len.clone();
|
||||||
let reused_len = Arc::new(AtomicUsize::new(0));
|
let reused_len = Arc::new(AtomicUsize::new(0));
|
||||||
let reused_len2 = reused_len.clone();
|
let reused_len2 = reused_len.clone();
|
||||||
|
|
||||||
@ -547,14 +662,12 @@ impl BackupWriter {
|
|||||||
|
|
||||||
stream
|
stream
|
||||||
.and_then(move |data| {
|
.and_then(move |data| {
|
||||||
|
|
||||||
let chunk_len = data.len();
|
let chunk_len = data.len();
|
||||||
|
|
||||||
total_chunks.fetch_add(1, Ordering::SeqCst);
|
total_chunks.fetch_add(1, Ordering::SeqCst);
|
||||||
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
|
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
|
||||||
|
|
||||||
let mut chunk_builder = DataChunkBuilder::new(data.as_ref())
|
let mut chunk_builder = DataChunkBuilder::new(data.as_ref()).compress(compress);
|
||||||
.compress(compress);
|
|
||||||
|
|
||||||
if let Some(ref crypt_config) = crypt_config {
|
if let Some(ref crypt_config) = crypt_config {
|
||||||
chunk_builder = chunk_builder.crypt_config(crypt_config);
|
chunk_builder = chunk_builder.crypt_config(crypt_config);
|
||||||
@ -568,7 +681,9 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let chunk_end = offset + chunk_len as u64;
|
let chunk_end = offset + chunk_len as u64;
|
||||||
|
|
||||||
if !is_fixed_chunk_size { csum.update(&chunk_end.to_le_bytes()); }
|
if !is_fixed_chunk_size {
|
||||||
|
csum.update(&chunk_end.to_le_bytes());
|
||||||
|
}
|
||||||
csum.update(digest);
|
csum.update(digest);
|
||||||
|
|
||||||
let chunk_is_known = known_chunks.contains(digest);
|
let chunk_is_known = known_chunks.contains(digest);
|
||||||
@ -577,16 +692,17 @@ impl BackupWriter {
|
|||||||
reused_len.fetch_add(chunk_len, Ordering::SeqCst);
|
reused_len.fetch_add(chunk_len, Ordering::SeqCst);
|
||||||
future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
|
future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
|
||||||
} else {
|
} else {
|
||||||
|
let compressed_stream_len2 = compressed_stream_len.clone();
|
||||||
known_chunks.insert(*digest);
|
known_chunks.insert(*digest);
|
||||||
future::ready(chunk_builder
|
future::ready(chunk_builder.build().map(move |(chunk, digest)| {
|
||||||
.build()
|
compressed_stream_len2.fetch_add(chunk.raw_size(), Ordering::SeqCst);
|
||||||
.map(move |(chunk, digest)| MergedChunkInfo::New(ChunkInfo {
|
MergedChunkInfo::New(ChunkInfo {
|
||||||
chunk,
|
chunk,
|
||||||
digest,
|
digest,
|
||||||
chunk_len: chunk_len as u64,
|
chunk_len: chunk_len as u64,
|
||||||
offset,
|
offset,
|
||||||
}))
|
})
|
||||||
)
|
}))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.merge_known_chunks()
|
.merge_known_chunks()
|
||||||
@ -614,22 +730,30 @@ impl BackupWriter {
|
|||||||
});
|
});
|
||||||
|
|
||||||
let ct = "application/octet-stream";
|
let ct = "application/octet-stream";
|
||||||
let request = H2Client::request_builder("localhost", "POST", &upload_chunk_path, Some(param), Some(ct)).unwrap();
|
let request = H2Client::request_builder(
|
||||||
|
"localhost",
|
||||||
|
"POST",
|
||||||
|
&upload_chunk_path,
|
||||||
|
Some(param),
|
||||||
|
Some(ct),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
let upload_data = Some(bytes::Bytes::from(chunk_data));
|
let upload_data = Some(bytes::Bytes::from(chunk_data));
|
||||||
|
|
||||||
let new_info = MergedChunkInfo::Known(vec![(offset, digest)]);
|
let new_info = MergedChunkInfo::Known(vec![(offset, digest)]);
|
||||||
|
|
||||||
future::Either::Left(h2
|
Either::Left(h2.send_request(request, upload_data).and_then(
|
||||||
.send_request(request, upload_data)
|
move |response| async move {
|
||||||
.and_then(move |response| async move {
|
|
||||||
upload_queue
|
upload_queue
|
||||||
.send((new_info, Some(response)))
|
.send((new_info, Some(response)))
|
||||||
.await
|
.await
|
||||||
.map_err(|err| format_err!("failed to send to upload queue: {}", err))
|
.map_err(|err| {
|
||||||
})
|
format_err!("failed to send to upload queue: {}", err)
|
||||||
)
|
})
|
||||||
|
},
|
||||||
|
))
|
||||||
} else {
|
} else {
|
||||||
future::Either::Right(async move {
|
Either::Right(async move {
|
||||||
upload_queue
|
upload_queue
|
||||||
.send((merged_chunk_info, None))
|
.send((merged_chunk_info, None))
|
||||||
.await
|
.await
|
||||||
@ -637,31 +761,37 @@ impl BackupWriter {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.then(move |result| async move {
|
.then(move |result| async move { upload_result.await?.and(result) }.boxed())
|
||||||
upload_result.await?.and(result)
|
|
||||||
}.boxed())
|
|
||||||
.and_then(move |_| {
|
.and_then(move |_| {
|
||||||
let duration = start_time.elapsed();
|
let duration = start_time.elapsed();
|
||||||
let total_chunks = total_chunks2.load(Ordering::SeqCst);
|
let chunk_count = total_chunks2.load(Ordering::SeqCst);
|
||||||
let known_chunk_count = known_chunk_count2.load(Ordering::SeqCst);
|
let chunk_reused = known_chunk_count2.load(Ordering::SeqCst);
|
||||||
let stream_len = stream_len2.load(Ordering::SeqCst);
|
let size = stream_len2.load(Ordering::SeqCst);
|
||||||
let reused_len = reused_len2.load(Ordering::SeqCst);
|
let size_reused = reused_len2.load(Ordering::SeqCst);
|
||||||
|
let size_compressed = compressed_stream_len2.load(Ordering::SeqCst) as usize;
|
||||||
|
|
||||||
let mut guard = index_csum_2.lock().unwrap();
|
let mut guard = index_csum_2.lock().unwrap();
|
||||||
let csum = guard.take().unwrap().finish();
|
let csum = guard.take().unwrap().finish();
|
||||||
|
|
||||||
futures::future::ok((total_chunks, known_chunk_count, stream_len, reused_len, duration, csum))
|
futures::future::ok(UploadStats {
|
||||||
|
chunk_count,
|
||||||
|
chunk_reused,
|
||||||
|
size,
|
||||||
|
size_reused,
|
||||||
|
size_compressed,
|
||||||
|
duration,
|
||||||
|
csum,
|
||||||
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Upload speed test - prints result to stderr
|
/// Upload speed test - prints result to stderr
|
||||||
pub async fn upload_speedtest(&self, verbose: bool) -> Result<f64, Error> {
|
pub async fn upload_speedtest(&self, verbose: bool) -> Result<f64, Error> {
|
||||||
|
|
||||||
let mut data = vec![];
|
let mut data = vec![];
|
||||||
// generate pseudo random byte sequence
|
// generate pseudo random byte sequence
|
||||||
for i in 0..1024*1024 {
|
for i in 0..1024 * 1024 {
|
||||||
for j in 0..4 {
|
for j in 0..4 {
|
||||||
let byte = ((i >> (j<<3))&0xff) as u8;
|
let byte = ((i >> (j << 3)) & 0xff) as u8;
|
||||||
data.push(byte);
|
data.push(byte);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -680,9 +810,15 @@ impl BackupWriter {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if verbose { eprintln!("send test data ({} bytes)", data.len()); }
|
if verbose {
|
||||||
let request = H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
|
eprintln!("send test data ({} bytes)", data.len());
|
||||||
let request_future = self.h2.send_request(request, Some(bytes::Bytes::from(data.clone()))).await?;
|
}
|
||||||
|
let request =
|
||||||
|
H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
|
||||||
|
let request_future = self
|
||||||
|
.h2
|
||||||
|
.send_request(request, Some(bytes::Bytes::from(data.clone())))
|
||||||
|
.await?;
|
||||||
|
|
||||||
upload_queue.send(request_future).await?;
|
upload_queue.send(request_future).await?;
|
||||||
}
|
}
|
||||||
@ -691,9 +827,16 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let _ = upload_result.await?;
|
let _ = upload_result.await?;
|
||||||
|
|
||||||
eprintln!("Uploaded {} chunks in {} seconds.", repeat, start_time.elapsed().as_secs());
|
eprintln!(
|
||||||
let speed = ((item_len*(repeat as usize)) as f64)/start_time.elapsed().as_secs_f64();
|
"Uploaded {} chunks in {} seconds.",
|
||||||
eprintln!("Time per request: {} microseconds.", (start_time.elapsed().as_micros())/(repeat as u128));
|
repeat,
|
||||||
|
start_time.elapsed().as_secs()
|
||||||
|
);
|
||||||
|
let speed = ((item_len * (repeat as usize)) as f64) / start_time.elapsed().as_secs_f64();
|
||||||
|
eprintln!(
|
||||||
|
"Time per request: {} microseconds.",
|
||||||
|
(start_time.elapsed().as_micros()) / (repeat as u128)
|
||||||
|
);
|
||||||
|
|
||||||
Ok(speed)
|
Ok(speed)
|
||||||
}
|
}
|
@ -18,12 +18,14 @@ use proxmox::api::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineI
|
|||||||
use proxmox::tools::fs::{create_path, CreateOptions};
|
use proxmox::tools::fs::{create_path, CreateOptions};
|
||||||
use pxar::{EntryKind, Metadata};
|
use pxar::{EntryKind, Metadata};
|
||||||
|
|
||||||
use crate::backup::catalog::{self, DirEntryAttribute};
|
use pbs_runtime::block_in_place;
|
||||||
|
use pbs_datastore::catalog::{self, DirEntryAttribute};
|
||||||
|
use pbs_tools::ops::ControlFlow;
|
||||||
|
|
||||||
use crate::pxar::Flags;
|
use crate::pxar::Flags;
|
||||||
use crate::pxar::fuse::{Accessor, FileEntry};
|
use crate::pxar::fuse::{Accessor, FileEntry};
|
||||||
use crate::tools::runtime::block_in_place;
|
|
||||||
|
|
||||||
type CatalogReader = crate::backup::CatalogReader<std::fs::File>;
|
type CatalogReader = pbs_datastore::catalog::CatalogReader<std::fs::File>;
|
||||||
|
|
||||||
const MAX_SYMLINK_COUNT: usize = 40;
|
const MAX_SYMLINK_COUNT: usize = 40;
|
||||||
|
|
||||||
@ -77,13 +79,13 @@ pub fn catalog_shell_cli() -> CommandLineInterface {
|
|||||||
"restore-selected",
|
"restore-selected",
|
||||||
CliCommand::new(&API_METHOD_RESTORE_SELECTED_COMMAND)
|
CliCommand::new(&API_METHOD_RESTORE_SELECTED_COMMAND)
|
||||||
.arg_param(&["target"])
|
.arg_param(&["target"])
|
||||||
.completion_cb("target", crate::tools::complete_file_name),
|
.completion_cb("target", pbs_tools::fs::complete_file_name),
|
||||||
)
|
)
|
||||||
.insert(
|
.insert(
|
||||||
"restore",
|
"restore",
|
||||||
CliCommand::new(&API_METHOD_RESTORE_COMMAND)
|
CliCommand::new(&API_METHOD_RESTORE_COMMAND)
|
||||||
.arg_param(&["target"])
|
.arg_param(&["target"])
|
||||||
.completion_cb("target", crate::tools::complete_file_name),
|
.completion_cb("target", pbs_tools::fs::complete_file_name),
|
||||||
)
|
)
|
||||||
.insert(
|
.insert(
|
||||||
"find",
|
"find",
|
||||||
@ -984,7 +986,8 @@ impl Shell {
|
|||||||
.metadata()
|
.metadata()
|
||||||
.clone();
|
.clone();
|
||||||
|
|
||||||
let extractor = crate::pxar::extract::Extractor::new(rootdir, root_meta, true, Flags::DEFAULT);
|
let extractor =
|
||||||
|
crate::pxar::extract::Extractor::new(rootdir, root_meta, true, Flags::DEFAULT);
|
||||||
|
|
||||||
let mut extractor = ExtractorState::new(
|
let mut extractor = ExtractorState::new(
|
||||||
&mut self.catalog,
|
&mut self.catalog,
|
||||||
@ -998,11 +1001,6 @@ impl Shell {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum LoopState {
|
|
||||||
Break,
|
|
||||||
Continue,
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ExtractorState<'a> {
|
struct ExtractorState<'a> {
|
||||||
path: Vec<u8>,
|
path: Vec<u8>,
|
||||||
path_len: usize,
|
path_len: usize,
|
||||||
@ -1060,8 +1058,8 @@ impl<'a> ExtractorState<'a> {
|
|||||||
let entry = match self.read_dir.next() {
|
let entry = match self.read_dir.next() {
|
||||||
Some(entry) => entry,
|
Some(entry) => entry,
|
||||||
None => match self.handle_end_of_directory()? {
|
None => match self.handle_end_of_directory()? {
|
||||||
LoopState::Break => break, // done with root directory
|
ControlFlow::Break(()) => break, // done with root directory
|
||||||
LoopState::Continue => continue,
|
ControlFlow::Continue(()) => continue,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1079,11 +1077,11 @@ impl<'a> ExtractorState<'a> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_end_of_directory(&mut self) -> Result<LoopState, Error> {
|
fn handle_end_of_directory(&mut self) -> Result<ControlFlow<()>, Error> {
|
||||||
// go up a directory:
|
// go up a directory:
|
||||||
self.read_dir = match self.read_dir_stack.pop() {
|
self.read_dir = match self.read_dir_stack.pop() {
|
||||||
Some(r) => r,
|
Some(r) => r,
|
||||||
None => return Ok(LoopState::Break), // out of root directory
|
None => return Ok(ControlFlow::Break(())), // out of root directory
|
||||||
};
|
};
|
||||||
|
|
||||||
self.matches = self
|
self.matches = self
|
||||||
@ -1102,7 +1100,7 @@ impl<'a> ExtractorState<'a> {
|
|||||||
|
|
||||||
self.extractor.leave_directory()?;
|
self.extractor.leave_directory()?;
|
||||||
|
|
||||||
Ok(LoopState::Continue)
|
Ok(ControlFlow::CONTINUE)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_new_directory(
|
async fn handle_new_directory(
|
@ -6,7 +6,7 @@ use anyhow::{Error};
|
|||||||
use futures::ready;
|
use futures::ready;
|
||||||
use futures::stream::{Stream, TryStream};
|
use futures::stream::{Stream, TryStream};
|
||||||
|
|
||||||
use super::Chunker;
|
use pbs_datastore::Chunker;
|
||||||
|
|
||||||
/// Split input stream into dynamic sized chunks
|
/// Split input stream into dynamic sized chunks
|
||||||
pub struct ChunkStream<S: Unpin> {
|
pub struct ChunkStream<S: Unpin> {
|